|
|
|
@@ -42,6 +42,8 @@ class Base(ABC): |
|
|
|
def chat(self, system, history, gen_conf): |
|
|
|
if system: |
|
|
|
history.insert(0, {"role": "system", "content": system}) |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
try: |
|
|
|
response = self.client.chat.completions.create( |
|
|
|
model=self.model_name, |
|
|
|
@@ -62,6 +64,8 @@ class Base(ABC): |
|
|
|
def chat_streamly(self, system, history, gen_conf): |
|
|
|
if system: |
|
|
|
history.insert(0, {"role": "system", "content": system}) |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
ans = "" |
|
|
|
total_tokens = 0 |
|
|
|
try: |
|
|
|
@@ -187,6 +191,8 @@ class BaiChuanChat(Base): |
|
|
|
def chat(self, system, history, gen_conf): |
|
|
|
if system: |
|
|
|
history.insert(0, {"role": "system", "content": system}) |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
try: |
|
|
|
response = self.client.chat.completions.create( |
|
|
|
model=self.model_name, |
|
|
|
@@ -214,6 +220,8 @@ class BaiChuanChat(Base): |
|
|
|
def chat_streamly(self, system, history, gen_conf): |
|
|
|
if system: |
|
|
|
history.insert(0, {"role": "system", "content": system}) |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
ans = "" |
|
|
|
total_tokens = 0 |
|
|
|
try: |
|
|
|
@@ -264,6 +272,8 @@ class QWenChat(Base): |
|
|
|
super().__init__(key, model_name, "https://dashscope.aliyuncs.com/compatible-mode/v1") |
|
|
|
|
|
|
|
def chat(self, system, history, gen_conf): |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
if self.model_name.lower().find("deepseek") >= 0: |
|
|
|
return super().chat(system, history, gen_conf) |
|
|
|
|
|
|
|
@@ -305,6 +315,8 @@ class QWenChat(Base): |
|
|
|
from http import HTTPStatus |
|
|
|
if system: |
|
|
|
history.insert(0, {"role": "system", "content": system}) |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
ans = "" |
|
|
|
tk_count = 0 |
|
|
|
try: |
|
|
|
@@ -334,6 +346,8 @@ class QWenChat(Base): |
|
|
|
yield tk_count |
|
|
|
|
|
|
|
def chat_streamly(self, system, history, gen_conf): |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
if self.model_name.lower().find("deepseek") >= 0: |
|
|
|
return super().chat_streamly(system, history, gen_conf) |
|
|
|
|
|
|
|
@@ -348,6 +362,8 @@ class ZhipuChat(Base): |
|
|
|
def chat(self, system, history, gen_conf): |
|
|
|
if system: |
|
|
|
history.insert(0, {"role": "system", "content": system}) |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
try: |
|
|
|
if "presence_penalty" in gen_conf: |
|
|
|
del gen_conf["presence_penalty"] |
|
|
|
@@ -371,6 +387,8 @@ class ZhipuChat(Base): |
|
|
|
def chat_streamly(self, system, history, gen_conf): |
|
|
|
if system: |
|
|
|
history.insert(0, {"role": "system", "content": system}) |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
if "presence_penalty" in gen_conf: |
|
|
|
del gen_conf["presence_penalty"] |
|
|
|
if "frequency_penalty" in gen_conf: |
|
|
|
@@ -412,6 +430,8 @@ class OllamaChat(Base): |
|
|
|
def chat(self, system, history, gen_conf): |
|
|
|
if system: |
|
|
|
history.insert(0, {"role": "system", "content": system}) |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
try: |
|
|
|
options = {} |
|
|
|
if "temperature" in gen_conf: |
|
|
|
@@ -438,6 +458,8 @@ class OllamaChat(Base): |
|
|
|
def chat_streamly(self, system, history, gen_conf): |
|
|
|
if system: |
|
|
|
history.insert(0, {"role": "system", "content": system}) |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
options = {} |
|
|
|
if "temperature" in gen_conf: |
|
|
|
options["temperature"] = gen_conf["temperature"] |
|
|
|
@@ -515,8 +537,6 @@ class LocalLLM(Base): |
|
|
|
from rag.svr.jina_server import Prompt |
|
|
|
if system: |
|
|
|
history.insert(0, {"role": "system", "content": system}) |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
gen_conf["max_new_tokens"] = gen_conf.pop("max_tokens") |
|
|
|
return Prompt(message=history, gen_conf=gen_conf) |
|
|
|
|
|
|
|
def _stream_response(self, endpoint, prompt): |
|
|
|
@@ -538,6 +558,8 @@ class LocalLLM(Base): |
|
|
|
yield num_tokens_from_string(answer) |
|
|
|
|
|
|
|
def chat(self, system, history, gen_conf): |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
prompt = self._prepare_prompt(system, history, gen_conf) |
|
|
|
chat_gen = self._stream_response("/chat", prompt) |
|
|
|
ans = next(chat_gen) |
|
|
|
@@ -545,6 +567,8 @@ class LocalLLM(Base): |
|
|
|
return ans, total_tokens |
|
|
|
|
|
|
|
def chat_streamly(self, system, history, gen_conf): |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
prompt = self._prepare_prompt(system, history, gen_conf) |
|
|
|
return self._stream_response("/stream", prompt) |
|
|
|
|
|
|
|
@@ -606,6 +630,9 @@ class MiniMaxChat(Base): |
|
|
|
def chat_streamly(self, system, history, gen_conf): |
|
|
|
if system: |
|
|
|
history.insert(0, {"role": "system", "content": system}) |
|
|
|
for k in list(gen_conf.keys()): |
|
|
|
if k not in ["temperature", "top_p", "max_tokens"]: |
|
|
|
del gen_conf[k] |
|
|
|
ans = "" |
|
|
|
total_tokens = 0 |
|
|
|
try: |
|
|
|
@@ -724,14 +751,8 @@ class BedrockChat(Base): |
|
|
|
def chat(self, system, history, gen_conf): |
|
|
|
from botocore.exceptions import ClientError |
|
|
|
for k in list(gen_conf.keys()): |
|
|
|
if k not in ["temperature", "top_p", "max_tokens"]: |
|
|
|
if k not in ["top_p", "max_tokens"]: |
|
|
|
del gen_conf[k] |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
gen_conf["maxTokens"] = gen_conf["max_tokens"] |
|
|
|
_ = gen_conf.pop("max_tokens") |
|
|
|
if "top_p" in gen_conf: |
|
|
|
gen_conf["topP"] = gen_conf["top_p"] |
|
|
|
_ = gen_conf.pop("top_p") |
|
|
|
for item in history: |
|
|
|
if not isinstance(item["content"], list) and not isinstance(item["content"], tuple): |
|
|
|
item["content"] = [{"text": item["content"]}] |
|
|
|
@@ -755,14 +776,8 @@ class BedrockChat(Base): |
|
|
|
def chat_streamly(self, system, history, gen_conf): |
|
|
|
from botocore.exceptions import ClientError |
|
|
|
for k in list(gen_conf.keys()): |
|
|
|
if k not in ["temperature", "top_p", "max_tokens"]: |
|
|
|
if k not in ["top_p", "max_tokens"]: |
|
|
|
del gen_conf[k] |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
gen_conf["maxTokens"] = gen_conf["max_tokens"] |
|
|
|
_ = gen_conf.pop("max_tokens") |
|
|
|
if "top_p" in gen_conf: |
|
|
|
gen_conf["topP"] = gen_conf["top_p"] |
|
|
|
_ = gen_conf.pop("top_p") |
|
|
|
for item in history: |
|
|
|
if not isinstance(item["content"], list) and not isinstance(item["content"], tuple): |
|
|
|
item["content"] = [{"text": item["content"]}] |
|
|
|
@@ -819,11 +834,8 @@ class GeminiChat(Base): |
|
|
|
|
|
|
|
if system: |
|
|
|
self.model._system_instruction = content_types.to_content(system) |
|
|
|
|
|
|
|
if 'max_tokens' in gen_conf: |
|
|
|
gen_conf['max_output_tokens'] = gen_conf['max_tokens'] |
|
|
|
for k in list(gen_conf.keys()): |
|
|
|
if k not in ["temperature", "top_p", "max_output_tokens"]: |
|
|
|
if k not in ["temperature", "top_p", "max_tokens"]: |
|
|
|
del gen_conf[k] |
|
|
|
for item in history: |
|
|
|
if 'role' in item and item['role'] == 'assistant': |
|
|
|
@@ -847,10 +859,8 @@ class GeminiChat(Base): |
|
|
|
|
|
|
|
if system: |
|
|
|
self.model._system_instruction = content_types.to_content(system) |
|
|
|
if 'max_tokens' in gen_conf: |
|
|
|
gen_conf['max_output_tokens'] = gen_conf['max_tokens'] |
|
|
|
for k in list(gen_conf.keys()): |
|
|
|
if k not in ["temperature", "top_p", "max_output_tokens"]: |
|
|
|
if k not in ["temperature", "top_p", "max_tokens"]: |
|
|
|
del gen_conf[k] |
|
|
|
for item in history: |
|
|
|
if 'role' in item and item['role'] == 'assistant': |
|
|
|
@@ -992,6 +1002,8 @@ class CoHereChat(Base): |
|
|
|
def chat(self, system, history, gen_conf): |
|
|
|
if system: |
|
|
|
history.insert(0, {"role": "system", "content": system}) |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
if "top_p" in gen_conf: |
|
|
|
gen_conf["p"] = gen_conf.pop("top_p") |
|
|
|
if "frequency_penalty" in gen_conf and "presence_penalty" in gen_conf: |
|
|
|
@@ -1026,6 +1038,8 @@ class CoHereChat(Base): |
|
|
|
def chat_streamly(self, system, history, gen_conf): |
|
|
|
if system: |
|
|
|
history.insert(0, {"role": "system", "content": system}) |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
if "top_p" in gen_conf: |
|
|
|
gen_conf["p"] = gen_conf.pop("top_p") |
|
|
|
if "frequency_penalty" in gen_conf and "presence_penalty" in gen_conf: |
|
|
|
@@ -1122,7 +1136,7 @@ class ReplicateChat(Base): |
|
|
|
|
|
|
|
def chat(self, system, history, gen_conf): |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
gen_conf["max_new_tokens"] = gen_conf.pop("max_tokens") |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
if system: |
|
|
|
self.system = system |
|
|
|
prompt = "\n".join( |
|
|
|
@@ -1141,7 +1155,7 @@ class ReplicateChat(Base): |
|
|
|
|
|
|
|
def chat_streamly(self, system, history, gen_conf): |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
gen_conf["max_new_tokens"] = gen_conf.pop("max_tokens") |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
if system: |
|
|
|
self.system = system |
|
|
|
prompt = "\n".join( |
|
|
|
@@ -1185,6 +1199,8 @@ class HunyuanChat(Base): |
|
|
|
_history = [{k.capitalize(): v for k, v in item.items()} for item in history] |
|
|
|
if system: |
|
|
|
_history.insert(0, {"Role": "system", "Content": system}) |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
if "temperature" in gen_conf: |
|
|
|
_gen_conf["Temperature"] = gen_conf["temperature"] |
|
|
|
if "top_p" in gen_conf: |
|
|
|
@@ -1211,7 +1227,8 @@ class HunyuanChat(Base): |
|
|
|
_history = [{k.capitalize(): v for k, v in item.items()} for item in history] |
|
|
|
if system: |
|
|
|
_history.insert(0, {"Role": "system", "Content": system}) |
|
|
|
|
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
if "temperature" in gen_conf: |
|
|
|
_gen_conf["Temperature"] = gen_conf["temperature"] |
|
|
|
if "top_p" in gen_conf: |
|
|
|
@@ -1284,7 +1301,7 @@ class BaiduYiyanChat(Base): |
|
|
|
0)) / 2 |
|
|
|
) + 1 |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
gen_conf["max_output_tokens"] = gen_conf["max_tokens"] |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
ans = "" |
|
|
|
|
|
|
|
try: |
|
|
|
@@ -1308,7 +1325,7 @@ class BaiduYiyanChat(Base): |
|
|
|
0)) / 2 |
|
|
|
) + 1 |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
gen_conf["max_output_tokens"] = gen_conf["max_tokens"] |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
ans = "" |
|
|
|
total_tokens = 0 |
|
|
|
|
|
|
|
@@ -1344,8 +1361,6 @@ class AnthropicChat(Base): |
|
|
|
def chat(self, system, history, gen_conf): |
|
|
|
if system: |
|
|
|
self.system = system |
|
|
|
if "max_tokens" not in gen_conf: |
|
|
|
gen_conf["max_tokens"] = 4096 |
|
|
|
if "presence_penalty" in gen_conf: |
|
|
|
del gen_conf["presence_penalty"] |
|
|
|
if "frequency_penalty" in gen_conf: |
|
|
|
@@ -1377,8 +1392,6 @@ class AnthropicChat(Base): |
|
|
|
def chat_streamly(self, system, history, gen_conf): |
|
|
|
if system: |
|
|
|
self.system = system |
|
|
|
if "max_tokens" not in gen_conf: |
|
|
|
gen_conf["max_tokens"] = 4096 |
|
|
|
if "presence_penalty" in gen_conf: |
|
|
|
del gen_conf["presence_penalty"] |
|
|
|
if "frequency_penalty" in gen_conf: |
|
|
|
@@ -1458,8 +1471,8 @@ class GoogleChat(Base): |
|
|
|
self.system = system |
|
|
|
|
|
|
|
if "claude" in self.model_name: |
|
|
|
if "max_tokens" not in gen_conf: |
|
|
|
gen_conf["max_tokens"] = 4096 |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
try: |
|
|
|
response = self.client.messages.create( |
|
|
|
model=self.model_name, |
|
|
|
@@ -1508,8 +1521,8 @@ class GoogleChat(Base): |
|
|
|
self.system = system |
|
|
|
|
|
|
|
if "claude" in self.model_name: |
|
|
|
if "max_tokens" not in gen_conf: |
|
|
|
gen_conf["max_tokens"] = 4096 |
|
|
|
if "max_tokens" in gen_conf: |
|
|
|
del gen_conf["max_tokens"] |
|
|
|
ans = "" |
|
|
|
total_tokens = 0 |
|
|
|
try: |
|
|
|
@@ -1556,6 +1569,7 @@ class GoogleChat(Base): |
|
|
|
|
|
|
|
yield response._chunks[-1].usage_metadata.total_token_count |
|
|
|
|
|
|
|
|
|
|
|
class GPUStackChat(Base): |
|
|
|
def __init__(self, key=None, model_name="", base_url=""): |
|
|
|
if not base_url: |