Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

chat_model.py 58KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import re
  17. from openai.lib.azure import AzureOpenAI
  18. from zhipuai import ZhipuAI
  19. from dashscope import Generation
  20. from abc import ABC
  21. from openai import OpenAI
  22. import openai
  23. from ollama import Client
  24. from rag.nlp import is_chinese, is_english
  25. from rag.utils import num_tokens_from_string
  26. import os
  27. import json
  28. import requests
  29. import asyncio
  30. LENGTH_NOTIFICATION_CN = "······\n由于长度的原因,回答被截断了,要继续吗?"
  31. LENGTH_NOTIFICATION_EN = "...\nFor the content length reason, it stopped, continue?"
  32. class Base(ABC):
  33. def __init__(self, key, model_name, base_url):
  34. timeout = int(os.environ.get('LM_TIMEOUT_SECONDS', 600))
  35. self.client = OpenAI(api_key=key, base_url=base_url, timeout=timeout)
  36. self.model_name = model_name
  37. def chat(self, system, history, gen_conf):
  38. if system:
  39. history.insert(0, {"role": "system", "content": system})
  40. if "max_tokens" in gen_conf:
  41. del gen_conf["max_tokens"]
  42. try:
  43. response = self.client.chat.completions.create(
  44. model=self.model_name,
  45. messages=history,
  46. **gen_conf)
  47. if not response.choices:
  48. return "", 0
  49. ans = response.choices[0].message.content.strip()
  50. if response.choices[0].finish_reason == "length":
  51. if is_chinese(ans):
  52. ans += LENGTH_NOTIFICATION_CN
  53. else:
  54. ans += LENGTH_NOTIFICATION_EN
  55. return ans, self.total_token_count(response)
  56. except openai.APIError as e:
  57. return "**ERROR**: " + str(e), 0
  58. def chat_streamly(self, system, history, gen_conf):
  59. if system:
  60. history.insert(0, {"role": "system", "content": system})
  61. if "max_tokens" in gen_conf:
  62. del gen_conf["max_tokens"]
  63. ans = ""
  64. total_tokens = 0
  65. try:
  66. response = self.client.chat.completions.create(
  67. model=self.model_name,
  68. messages=history,
  69. stream=True,
  70. **gen_conf)
  71. for resp in response:
  72. if not resp.choices:
  73. continue
  74. if not resp.choices[0].delta.content:
  75. resp.choices[0].delta.content = ""
  76. if hasattr(resp.choices[0].delta, "reasoning_content") and resp.choices[0].delta.reasoning_content:
  77. if ans.find("<think>") < 0:
  78. ans += "<think>"
  79. ans = ans.replace("</think>", "")
  80. ans += resp.choices[0].delta.reasoning_content + "</think>"
  81. else:
  82. ans += resp.choices[0].delta.content
  83. tol = self.total_token_count(resp)
  84. if not tol:
  85. total_tokens += num_tokens_from_string(resp.choices[0].delta.content)
  86. else:
  87. total_tokens = tol
  88. if resp.choices[0].finish_reason == "length":
  89. if is_chinese(ans):
  90. ans += LENGTH_NOTIFICATION_CN
  91. else:
  92. ans += LENGTH_NOTIFICATION_EN
  93. yield ans
  94. except openai.APIError as e:
  95. yield ans + "\n**ERROR**: " + str(e)
  96. yield total_tokens
  97. def total_token_count(self, resp):
  98. try:
  99. return resp.usage.total_tokens
  100. except Exception:
  101. pass
  102. try:
  103. return resp["usage"]["total_tokens"]
  104. except Exception:
  105. pass
  106. return 0
  107. class GptTurbo(Base):
  108. def __init__(self, key, model_name="gpt-3.5-turbo", base_url="https://api.openai.com/v1"):
  109. if not base_url:
  110. base_url = "https://api.openai.com/v1"
  111. super().__init__(key, model_name, base_url)
  112. class MoonshotChat(Base):
  113. def __init__(self, key, model_name="moonshot-v1-8k", base_url="https://api.moonshot.cn/v1"):
  114. if not base_url:
  115. base_url = "https://api.moonshot.cn/v1"
  116. super().__init__(key, model_name, base_url)
  117. class XinferenceChat(Base):
  118. def __init__(self, key=None, model_name="", base_url=""):
  119. if not base_url:
  120. raise ValueError("Local llm url cannot be None")
  121. if base_url.split("/")[-1] != "v1":
  122. base_url = os.path.join(base_url, "v1")
  123. super().__init__(key, model_name, base_url)
  124. class HuggingFaceChat(Base):
  125. def __init__(self, key=None, model_name="", base_url=""):
  126. if not base_url:
  127. raise ValueError("Local llm url cannot be None")
  128. if base_url.split("/")[-1] != "v1":
  129. base_url = os.path.join(base_url, "v1")
  130. super().__init__(key, model_name.split("___")[0], base_url)
  131. class ModelScopeChat(Base):
  132. def __init__(self, key=None, model_name="", base_url=""):
  133. if not base_url:
  134. raise ValueError("Local llm url cannot be None")
  135. base_url = base_url.rstrip('/')
  136. if base_url.split("/")[-1] != "v1":
  137. base_url = os.path.join(base_url, "v1")
  138. super().__init__(key, model_name.split("___")[0], base_url)
  139. class DeepSeekChat(Base):
  140. def __init__(self, key, model_name="deepseek-chat", base_url="https://api.deepseek.com/v1"):
  141. if not base_url:
  142. base_url = "https://api.deepseek.com/v1"
  143. super().__init__(key, model_name, base_url)
  144. class AzureChat(Base):
  145. def __init__(self, key, model_name, **kwargs):
  146. api_key = json.loads(key).get('api_key', '')
  147. api_version = json.loads(key).get('api_version', '2024-02-01')
  148. self.client = AzureOpenAI(api_key=api_key, azure_endpoint=kwargs["base_url"], api_version=api_version)
  149. self.model_name = model_name
  150. class BaiChuanChat(Base):
  151. def __init__(self, key, model_name="Baichuan3-Turbo", base_url="https://api.baichuan-ai.com/v1"):
  152. if not base_url:
  153. base_url = "https://api.baichuan-ai.com/v1"
  154. super().__init__(key, model_name, base_url)
  155. @staticmethod
  156. def _format_params(params):
  157. return {
  158. "temperature": params.get("temperature", 0.3),
  159. "max_tokens": params.get("max_tokens", 2048),
  160. "top_p": params.get("top_p", 0.85),
  161. }
  162. def chat(self, system, history, gen_conf):
  163. if system:
  164. history.insert(0, {"role": "system", "content": system})
  165. if "max_tokens" in gen_conf:
  166. del gen_conf["max_tokens"]
  167. try:
  168. response = self.client.chat.completions.create(
  169. model=self.model_name,
  170. messages=history,
  171. extra_body={
  172. "tools": [{
  173. "type": "web_search",
  174. "web_search": {
  175. "enable": True,
  176. "search_mode": "performance_first"
  177. }
  178. }]
  179. },
  180. **self._format_params(gen_conf))
  181. ans = response.choices[0].message.content.strip()
  182. if response.choices[0].finish_reason == "length":
  183. if is_chinese([ans]):
  184. ans += LENGTH_NOTIFICATION_CN
  185. else:
  186. ans += LENGTH_NOTIFICATION_EN
  187. return ans, self.total_token_count(response)
  188. except openai.APIError as e:
  189. return "**ERROR**: " + str(e), 0
  190. def chat_streamly(self, system, history, gen_conf):
  191. if system:
  192. history.insert(0, {"role": "system", "content": system})
  193. if "max_tokens" in gen_conf:
  194. del gen_conf["max_tokens"]
  195. ans = ""
  196. total_tokens = 0
  197. try:
  198. response = self.client.chat.completions.create(
  199. model=self.model_name,
  200. messages=history,
  201. extra_body={
  202. "tools": [{
  203. "type": "web_search",
  204. "web_search": {
  205. "enable": True,
  206. "search_mode": "performance_first"
  207. }
  208. }]
  209. },
  210. stream=True,
  211. **self._format_params(gen_conf))
  212. for resp in response:
  213. if not resp.choices:
  214. continue
  215. if not resp.choices[0].delta.content:
  216. resp.choices[0].delta.content = ""
  217. ans += resp.choices[0].delta.content
  218. tol = self.total_token_count(resp)
  219. if not tol:
  220. total_tokens += num_tokens_from_string(resp.choices[0].delta.content)
  221. else:
  222. total_tokens = tol
  223. if resp.choices[0].finish_reason == "length":
  224. if is_chinese([ans]):
  225. ans += LENGTH_NOTIFICATION_CN
  226. else:
  227. ans += LENGTH_NOTIFICATION_EN
  228. yield ans
  229. except Exception as e:
  230. yield ans + "\n**ERROR**: " + str(e)
  231. yield total_tokens
  232. class QWenChat(Base):
  233. def __init__(self, key, model_name=Generation.Models.qwen_turbo, **kwargs):
  234. import dashscope
  235. dashscope.api_key = key
  236. self.model_name = model_name
  237. if model_name.lower().find("deepseek") >= 0:
  238. super().__init__(key, model_name, "https://dashscope.aliyuncs.com/compatible-mode/v1")
  239. def chat(self, system, history, gen_conf):
  240. if "max_tokens" in gen_conf:
  241. del gen_conf["max_tokens"]
  242. if self.model_name.lower().find("deepseek") >= 0:
  243. return super().chat(system, history, gen_conf)
  244. stream_flag = str(os.environ.get('QWEN_CHAT_BY_STREAM', 'true')).lower() == 'true'
  245. if not stream_flag:
  246. from http import HTTPStatus
  247. if system:
  248. history.insert(0, {"role": "system", "content": system})
  249. response = Generation.call(
  250. self.model_name,
  251. messages=history,
  252. result_format='message',
  253. **gen_conf
  254. )
  255. ans = ""
  256. tk_count = 0
  257. if response.status_code == HTTPStatus.OK:
  258. ans += response.output.choices[0]['message']['content']
  259. tk_count += self.total_token_count(response)
  260. if response.output.choices[0].get("finish_reason", "") == "length":
  261. if is_chinese([ans]):
  262. ans += LENGTH_NOTIFICATION_CN
  263. else:
  264. ans += LENGTH_NOTIFICATION_EN
  265. return ans, tk_count
  266. return "**ERROR**: " + response.message, tk_count
  267. else:
  268. g = self._chat_streamly(system, history, gen_conf, incremental_output=True)
  269. result_list = list(g)
  270. error_msg_list = [item for item in result_list if str(item).find("**ERROR**") >= 0]
  271. if len(error_msg_list) > 0:
  272. return "**ERROR**: " + "".join(error_msg_list) , 0
  273. else:
  274. return "".join(result_list[:-1]), result_list[-1]
  275. def _chat_streamly(self, system, history, gen_conf, incremental_output=False):
  276. from http import HTTPStatus
  277. if system:
  278. history.insert(0, {"role": "system", "content": system})
  279. if "max_tokens" in gen_conf:
  280. del gen_conf["max_tokens"]
  281. ans = ""
  282. tk_count = 0
  283. try:
  284. response = Generation.call(
  285. self.model_name,
  286. messages=history,
  287. result_format='message',
  288. stream=True,
  289. incremental_output=incremental_output,
  290. **gen_conf
  291. )
  292. for resp in response:
  293. if resp.status_code == HTTPStatus.OK:
  294. ans = resp.output.choices[0]['message']['content']
  295. tk_count = self.total_token_count(resp)
  296. if resp.output.choices[0].get("finish_reason", "") == "length":
  297. if is_chinese(ans):
  298. ans += LENGTH_NOTIFICATION_CN
  299. else:
  300. ans += LENGTH_NOTIFICATION_EN
  301. yield ans
  302. else:
  303. yield ans + "\n**ERROR**: " + resp.message if not re.search(r" (key|quota)", str(resp.message).lower()) else "Out of credit. Please set the API key in **settings > Model providers.**"
  304. except Exception as e:
  305. yield ans + "\n**ERROR**: " + str(e)
  306. yield tk_count
  307. def chat_streamly(self, system, history, gen_conf):
  308. if "max_tokens" in gen_conf:
  309. del gen_conf["max_tokens"]
  310. if self.model_name.lower().find("deepseek") >= 0:
  311. return super().chat_streamly(system, history, gen_conf)
  312. return self._chat_streamly(system, history, gen_conf)
  313. class ZhipuChat(Base):
  314. def __init__(self, key, model_name="glm-3-turbo", **kwargs):
  315. self.client = ZhipuAI(api_key=key)
  316. self.model_name = model_name
  317. def chat(self, system, history, gen_conf):
  318. if system:
  319. history.insert(0, {"role": "system", "content": system})
  320. if "max_tokens" in gen_conf:
  321. del gen_conf["max_tokens"]
  322. try:
  323. if "presence_penalty" in gen_conf:
  324. del gen_conf["presence_penalty"]
  325. if "frequency_penalty" in gen_conf:
  326. del gen_conf["frequency_penalty"]
  327. response = self.client.chat.completions.create(
  328. model=self.model_name,
  329. messages=history,
  330. **gen_conf
  331. )
  332. ans = response.choices[0].message.content.strip()
  333. if response.choices[0].finish_reason == "length":
  334. if is_chinese(ans):
  335. ans += LENGTH_NOTIFICATION_CN
  336. else:
  337. ans += LENGTH_NOTIFICATION_EN
  338. return ans, self.total_token_count(response)
  339. except Exception as e:
  340. return "**ERROR**: " + str(e), 0
  341. def chat_streamly(self, system, history, gen_conf):
  342. if system:
  343. history.insert(0, {"role": "system", "content": system})
  344. if "max_tokens" in gen_conf:
  345. del gen_conf["max_tokens"]
  346. if "presence_penalty" in gen_conf:
  347. del gen_conf["presence_penalty"]
  348. if "frequency_penalty" in gen_conf:
  349. del gen_conf["frequency_penalty"]
  350. ans = ""
  351. tk_count = 0
  352. try:
  353. response = self.client.chat.completions.create(
  354. model=self.model_name,
  355. messages=history,
  356. stream=True,
  357. **gen_conf
  358. )
  359. for resp in response:
  360. if not resp.choices[0].delta.content:
  361. continue
  362. delta = resp.choices[0].delta.content
  363. ans += delta
  364. if resp.choices[0].finish_reason == "length":
  365. if is_chinese(ans):
  366. ans += LENGTH_NOTIFICATION_CN
  367. else:
  368. ans += LENGTH_NOTIFICATION_EN
  369. tk_count = self.total_token_count(resp)
  370. if resp.choices[0].finish_reason == "stop":
  371. tk_count = self.total_token_count(resp)
  372. yield ans
  373. except Exception as e:
  374. yield ans + "\n**ERROR**: " + str(e)
  375. yield tk_count
  376. class OllamaChat(Base):
  377. def __init__(self, key, model_name, **kwargs):
  378. self.client = Client(host=kwargs["base_url"])
  379. self.model_name = model_name
  380. def chat(self, system, history, gen_conf):
  381. if system:
  382. history.insert(0, {"role": "system", "content": system})
  383. if "max_tokens" in gen_conf:
  384. del gen_conf["max_tokens"]
  385. try:
  386. options = {}
  387. if "temperature" in gen_conf:
  388. options["temperature"] = gen_conf["temperature"]
  389. if "max_tokens" in gen_conf:
  390. options["num_predict"] = gen_conf["max_tokens"]
  391. if "top_p" in gen_conf:
  392. options["top_p"] = gen_conf["top_p"]
  393. if "presence_penalty" in gen_conf:
  394. options["presence_penalty"] = gen_conf["presence_penalty"]
  395. if "frequency_penalty" in gen_conf:
  396. options["frequency_penalty"] = gen_conf["frequency_penalty"]
  397. response = self.client.chat(
  398. model=self.model_name,
  399. messages=history,
  400. options=options,
  401. keep_alive=-1
  402. )
  403. ans = response["message"]["content"].strip()
  404. return ans, response.get("eval_count", 0) + response.get("prompt_eval_count", 0)
  405. except Exception as e:
  406. return "**ERROR**: " + str(e), 0
  407. def chat_streamly(self, system, history, gen_conf):
  408. if system:
  409. history.insert(0, {"role": "system", "content": system})
  410. if "max_tokens" in gen_conf:
  411. del gen_conf["max_tokens"]
  412. options = {}
  413. if "temperature" in gen_conf:
  414. options["temperature"] = gen_conf["temperature"]
  415. if "max_tokens" in gen_conf:
  416. options["num_predict"] = gen_conf["max_tokens"]
  417. if "top_p" in gen_conf:
  418. options["top_p"] = gen_conf["top_p"]
  419. if "presence_penalty" in gen_conf:
  420. options["presence_penalty"] = gen_conf["presence_penalty"]
  421. if "frequency_penalty" in gen_conf:
  422. options["frequency_penalty"] = gen_conf["frequency_penalty"]
  423. ans = ""
  424. try:
  425. response = self.client.chat(
  426. model=self.model_name,
  427. messages=history,
  428. stream=True,
  429. options=options,
  430. keep_alive=-1
  431. )
  432. for resp in response:
  433. if resp["done"]:
  434. yield resp.get("prompt_eval_count", 0) + resp.get("eval_count", 0)
  435. ans += resp["message"]["content"]
  436. yield ans
  437. except Exception as e:
  438. yield ans + "\n**ERROR**: " + str(e)
  439. yield 0
  440. class LocalAIChat(Base):
  441. def __init__(self, key, model_name, base_url):
  442. if not base_url:
  443. raise ValueError("Local llm url cannot be None")
  444. if base_url.split("/")[-1] != "v1":
  445. base_url = os.path.join(base_url, "v1")
  446. self.client = OpenAI(api_key="empty", base_url=base_url)
  447. self.model_name = model_name.split("___")[0]
  448. class LocalLLM(Base):
  449. class RPCProxy:
  450. def __init__(self, host, port):
  451. self.host = host
  452. self.port = int(port)
  453. self.__conn()
  454. def __conn(self):
  455. from multiprocessing.connection import Client
  456. self._connection = Client(
  457. (self.host, self.port), authkey=b"infiniflow-token4kevinhu"
  458. )
  459. def __getattr__(self, name):
  460. import pickle
  461. def do_rpc(*args, **kwargs):
  462. for _ in range(3):
  463. try:
  464. self._connection.send(pickle.dumps((name, args, kwargs)))
  465. return pickle.loads(self._connection.recv())
  466. except Exception:
  467. self.__conn()
  468. raise Exception("RPC connection lost!")
  469. return do_rpc
  470. def __init__(self, key, model_name):
  471. from jina import Client
  472. self.client = Client(port=12345, protocol="grpc", asyncio=True)
  473. def _prepare_prompt(self, system, history, gen_conf):
  474. from rag.svr.jina_server import Prompt
  475. if system:
  476. history.insert(0, {"role": "system", "content": system})
  477. return Prompt(message=history, gen_conf=gen_conf)
  478. def _stream_response(self, endpoint, prompt):
  479. from rag.svr.jina_server import Generation
  480. answer = ""
  481. try:
  482. res = self.client.stream_doc(
  483. on=endpoint, inputs=prompt, return_type=Generation
  484. )
  485. loop = asyncio.get_event_loop()
  486. try:
  487. while True:
  488. answer = loop.run_until_complete(res.__anext__()).text
  489. yield answer
  490. except StopAsyncIteration:
  491. pass
  492. except Exception as e:
  493. yield answer + "\n**ERROR**: " + str(e)
  494. yield num_tokens_from_string(answer)
  495. def chat(self, system, history, gen_conf):
  496. if "max_tokens" in gen_conf:
  497. del gen_conf["max_tokens"]
  498. prompt = self._prepare_prompt(system, history, gen_conf)
  499. chat_gen = self._stream_response("/chat", prompt)
  500. ans = next(chat_gen)
  501. total_tokens = next(chat_gen)
  502. return ans, total_tokens
  503. def chat_streamly(self, system, history, gen_conf):
  504. if "max_tokens" in gen_conf:
  505. del gen_conf["max_tokens"]
  506. prompt = self._prepare_prompt(system, history, gen_conf)
  507. return self._stream_response("/stream", prompt)
  508. class VolcEngineChat(Base):
  509. def __init__(self, key, model_name, base_url='https://ark.cn-beijing.volces.com/api/v3'):
  510. """
  511. Since do not want to modify the original database fields, and the VolcEngine authentication method is quite special,
  512. Assemble ark_api_key, ep_id into api_key, store it as a dictionary type, and parse it for use
  513. model_name is for display only
  514. """
  515. base_url = base_url if base_url else 'https://ark.cn-beijing.volces.com/api/v3'
  516. ark_api_key = json.loads(key).get('ark_api_key', '')
  517. model_name = json.loads(key).get('ep_id', '') + json.loads(key).get('endpoint_id', '')
  518. super().__init__(ark_api_key, model_name, base_url)
  519. class MiniMaxChat(Base):
  520. def __init__(
  521. self,
  522. key,
  523. model_name,
  524. base_url="https://api.minimax.chat/v1/text/chatcompletion_v2",
  525. ):
  526. if not base_url:
  527. base_url = "https://api.minimax.chat/v1/text/chatcompletion_v2"
  528. self.base_url = base_url
  529. self.model_name = model_name
  530. self.api_key = key
  531. def chat(self, system, history, gen_conf):
  532. if system:
  533. history.insert(0, {"role": "system", "content": system})
  534. for k in list(gen_conf.keys()):
  535. if k not in ["temperature", "top_p", "max_tokens"]:
  536. del gen_conf[k]
  537. headers = {
  538. "Authorization": f"Bearer {self.api_key}",
  539. "Content-Type": "application/json",
  540. }
  541. payload = json.dumps(
  542. {"model": self.model_name, "messages": history, **gen_conf}
  543. )
  544. try:
  545. response = requests.request(
  546. "POST", url=self.base_url, headers=headers, data=payload
  547. )
  548. response = response.json()
  549. ans = response["choices"][0]["message"]["content"].strip()
  550. if response["choices"][0]["finish_reason"] == "length":
  551. if is_chinese(ans):
  552. ans += LENGTH_NOTIFICATION_CN
  553. else:
  554. ans += LENGTH_NOTIFICATION_EN
  555. return ans, self.total_token_count(response)
  556. except Exception as e:
  557. return "**ERROR**: " + str(e), 0
  558. def chat_streamly(self, system, history, gen_conf):
  559. if system:
  560. history.insert(0, {"role": "system", "content": system})
  561. for k in list(gen_conf.keys()):
  562. if k not in ["temperature", "top_p", "max_tokens"]:
  563. del gen_conf[k]
  564. ans = ""
  565. total_tokens = 0
  566. try:
  567. headers = {
  568. "Authorization": f"Bearer {self.api_key}",
  569. "Content-Type": "application/json",
  570. }
  571. payload = json.dumps(
  572. {
  573. "model": self.model_name,
  574. "messages": history,
  575. "stream": True,
  576. **gen_conf,
  577. }
  578. )
  579. response = requests.request(
  580. "POST",
  581. url=self.base_url,
  582. headers=headers,
  583. data=payload,
  584. )
  585. for resp in response.text.split("\n\n")[:-1]:
  586. resp = json.loads(resp[6:])
  587. text = ""
  588. if "choices" in resp and "delta" in resp["choices"][0]:
  589. text = resp["choices"][0]["delta"]["content"]
  590. ans += text
  591. tol = self.total_token_count(resp)
  592. if not tol:
  593. total_tokens += num_tokens_from_string(text)
  594. else:
  595. total_tokens = tol
  596. yield ans
  597. except Exception as e:
  598. yield ans + "\n**ERROR**: " + str(e)
  599. yield total_tokens
  600. class MistralChat(Base):
  601. def __init__(self, key, model_name, base_url=None):
  602. from mistralai.client import MistralClient
  603. self.client = MistralClient(api_key=key)
  604. self.model_name = model_name
  605. def chat(self, system, history, gen_conf):
  606. if system:
  607. history.insert(0, {"role": "system", "content": system})
  608. for k in list(gen_conf.keys()):
  609. if k not in ["temperature", "top_p", "max_tokens"]:
  610. del gen_conf[k]
  611. try:
  612. response = self.client.chat(
  613. model=self.model_name,
  614. messages=history,
  615. **gen_conf)
  616. ans = response.choices[0].message.content
  617. if response.choices[0].finish_reason == "length":
  618. if is_chinese(ans):
  619. ans += LENGTH_NOTIFICATION_CN
  620. else:
  621. ans += LENGTH_NOTIFICATION_EN
  622. return ans, self.total_token_count(response)
  623. except openai.APIError as e:
  624. return "**ERROR**: " + str(e), 0
  625. def chat_streamly(self, system, history, gen_conf):
  626. if system:
  627. history.insert(0, {"role": "system", "content": system})
  628. for k in list(gen_conf.keys()):
  629. if k not in ["temperature", "top_p", "max_tokens"]:
  630. del gen_conf[k]
  631. ans = ""
  632. total_tokens = 0
  633. try:
  634. response = self.client.chat_stream(
  635. model=self.model_name,
  636. messages=history,
  637. **gen_conf)
  638. for resp in response:
  639. if not resp.choices or not resp.choices[0].delta.content:
  640. continue
  641. ans += resp.choices[0].delta.content
  642. total_tokens += 1
  643. if resp.choices[0].finish_reason == "length":
  644. if is_chinese(ans):
  645. ans += LENGTH_NOTIFICATION_CN
  646. else:
  647. ans += LENGTH_NOTIFICATION_EN
  648. yield ans
  649. except openai.APIError as e:
  650. yield ans + "\n**ERROR**: " + str(e)
  651. yield total_tokens
  652. class BedrockChat(Base):
  653. def __init__(self, key, model_name, **kwargs):
  654. import boto3
  655. self.bedrock_ak = json.loads(key).get('bedrock_ak', '')
  656. self.bedrock_sk = json.loads(key).get('bedrock_sk', '')
  657. self.bedrock_region = json.loads(key).get('bedrock_region', '')
  658. self.model_name = model_name
  659. if self.bedrock_ak == '' or self.bedrock_sk == '' or self.bedrock_region == '':
  660. # Try to create a client using the default credentials (AWS_PROFILE, AWS_DEFAULT_REGION, etc.)
  661. self.client = boto3.client('bedrock-runtime')
  662. else:
  663. self.client = boto3.client(service_name='bedrock-runtime', region_name=self.bedrock_region,
  664. aws_access_key_id=self.bedrock_ak, aws_secret_access_key=self.bedrock_sk)
  665. def chat(self, system, history, gen_conf):
  666. from botocore.exceptions import ClientError
  667. for k in list(gen_conf.keys()):
  668. if k not in ["top_p", "max_tokens"]:
  669. del gen_conf[k]
  670. for item in history:
  671. if not isinstance(item["content"], list) and not isinstance(item["content"], tuple):
  672. item["content"] = [{"text": item["content"]}]
  673. try:
  674. # Send the message to the model, using a basic inference configuration.
  675. response = self.client.converse(
  676. modelId=self.model_name,
  677. messages=history,
  678. inferenceConfig=gen_conf,
  679. system=[{"text": (system if system else "Answer the user's message.")}],
  680. )
  681. # Extract and print the response text.
  682. ans = response["output"]["message"]["content"][0]["text"]
  683. return ans, num_tokens_from_string(ans)
  684. except (ClientError, Exception) as e:
  685. return f"ERROR: Can't invoke '{self.model_name}'. Reason: {e}", 0
  686. def chat_streamly(self, system, history, gen_conf):
  687. from botocore.exceptions import ClientError
  688. for k in list(gen_conf.keys()):
  689. if k not in ["top_p", "max_tokens"]:
  690. del gen_conf[k]
  691. for item in history:
  692. if not isinstance(item["content"], list) and not isinstance(item["content"], tuple):
  693. item["content"] = [{"text": item["content"]}]
  694. if self.model_name.split('.')[0] == 'ai21':
  695. try:
  696. response = self.client.converse(
  697. modelId=self.model_name,
  698. messages=history,
  699. inferenceConfig=gen_conf,
  700. system=[{"text": (system if system else "Answer the user's message.")}]
  701. )
  702. ans = response["output"]["message"]["content"][0]["text"]
  703. return ans, num_tokens_from_string(ans)
  704. except (ClientError, Exception) as e:
  705. return f"ERROR: Can't invoke '{self.model_name}'. Reason: {e}", 0
  706. ans = ""
  707. try:
  708. # Send the message to the model, using a basic inference configuration.
  709. streaming_response = self.client.converse_stream(
  710. modelId=self.model_name,
  711. messages=history,
  712. inferenceConfig=gen_conf,
  713. system=[{"text": (system if system else "Answer the user's message.")}]
  714. )
  715. # Extract and print the streamed response text in real-time.
  716. for resp in streaming_response["stream"]:
  717. if "contentBlockDelta" in resp:
  718. ans += resp["contentBlockDelta"]["delta"]["text"]
  719. yield ans
  720. except (ClientError, Exception) as e:
  721. yield ans + f"ERROR: Can't invoke '{self.model_name}'. Reason: {e}"
  722. yield num_tokens_from_string(ans)
  723. class GeminiChat(Base):
  724. def __init__(self, key, model_name, base_url=None):
  725. from google.generativeai import client, GenerativeModel
  726. client.configure(api_key=key)
  727. _client = client.get_default_generative_client()
  728. self.model_name = 'models/' + model_name
  729. self.model = GenerativeModel(model_name=self.model_name)
  730. self.model._client = _client
  731. def chat(self, system, history, gen_conf):
  732. from google.generativeai.types import content_types
  733. if system:
  734. self.model._system_instruction = content_types.to_content(system)
  735. for k in list(gen_conf.keys()):
  736. if k not in ["temperature", "top_p", "max_tokens"]:
  737. del gen_conf[k]
  738. for item in history:
  739. if 'role' in item and item['role'] == 'assistant':
  740. item['role'] = 'model'
  741. if 'role' in item and item['role'] == 'system':
  742. item['role'] = 'user'
  743. if 'content' in item:
  744. item['parts'] = item.pop('content')
  745. try:
  746. response = self.model.generate_content(
  747. history,
  748. generation_config=gen_conf)
  749. ans = response.text
  750. return ans, response.usage_metadata.total_token_count
  751. except Exception as e:
  752. return "**ERROR**: " + str(e), 0
  753. def chat_streamly(self, system, history, gen_conf):
  754. from google.generativeai.types import content_types
  755. if system:
  756. self.model._system_instruction = content_types.to_content(system)
  757. for k in list(gen_conf.keys()):
  758. if k not in ["temperature", "top_p", "max_tokens"]:
  759. del gen_conf[k]
  760. for item in history:
  761. if 'role' in item and item['role'] == 'assistant':
  762. item['role'] = 'model'
  763. if 'content' in item:
  764. item['parts'] = item.pop('content')
  765. ans = ""
  766. try:
  767. response = self.model.generate_content(
  768. history,
  769. generation_config=gen_conf, stream=True)
  770. for resp in response:
  771. ans += resp.text
  772. yield ans
  773. yield response._chunks[-1].usage_metadata.total_token_count
  774. except Exception as e:
  775. yield ans + "\n**ERROR**: " + str(e)
  776. yield 0
  777. class GroqChat(Base):
  778. def __init__(self, key, model_name, base_url=''):
  779. from groq import Groq
  780. self.client = Groq(api_key=key)
  781. self.model_name = model_name
  782. def chat(self, system, history, gen_conf):
  783. if system:
  784. history.insert(0, {"role": "system", "content": system})
  785. for k in list(gen_conf.keys()):
  786. if k not in ["temperature", "top_p", "max_tokens"]:
  787. del gen_conf[k]
  788. ans = ""
  789. try:
  790. response = self.client.chat.completions.create(
  791. model=self.model_name,
  792. messages=history,
  793. **gen_conf
  794. )
  795. ans = response.choices[0].message.content
  796. if response.choices[0].finish_reason == "length":
  797. if is_chinese(ans):
  798. ans += LENGTH_NOTIFICATION_CN
  799. else:
  800. ans += LENGTH_NOTIFICATION_EN
  801. return ans, self.total_token_count(response)
  802. except Exception as e:
  803. return ans + "\n**ERROR**: " + str(e), 0
  804. def chat_streamly(self, system, history, gen_conf):
  805. if system:
  806. history.insert(0, {"role": "system", "content": system})
  807. for k in list(gen_conf.keys()):
  808. if k not in ["temperature", "top_p", "max_tokens"]:
  809. del gen_conf[k]
  810. ans = ""
  811. total_tokens = 0
  812. try:
  813. response = self.client.chat.completions.create(
  814. model=self.model_name,
  815. messages=history,
  816. stream=True,
  817. **gen_conf
  818. )
  819. for resp in response:
  820. if not resp.choices or not resp.choices[0].delta.content:
  821. continue
  822. ans += resp.choices[0].delta.content
  823. total_tokens += 1
  824. if resp.choices[0].finish_reason == "length":
  825. if is_chinese(ans):
  826. ans += LENGTH_NOTIFICATION_CN
  827. else:
  828. ans += LENGTH_NOTIFICATION_EN
  829. yield ans
  830. except Exception as e:
  831. yield ans + "\n**ERROR**: " + str(e)
  832. yield total_tokens
  833. ## openrouter
  834. class OpenRouterChat(Base):
  835. def __init__(self, key, model_name, base_url="https://openrouter.ai/api/v1"):
  836. if not base_url:
  837. base_url = "https://openrouter.ai/api/v1"
  838. super().__init__(key, model_name, base_url)
  839. class StepFunChat(Base):
  840. def __init__(self, key, model_name, base_url="https://api.stepfun.com/v1"):
  841. if not base_url:
  842. base_url = "https://api.stepfun.com/v1"
  843. super().__init__(key, model_name, base_url)
  844. class NvidiaChat(Base):
  845. def __init__(self, key, model_name, base_url="https://integrate.api.nvidia.com/v1"):
  846. if not base_url:
  847. base_url = "https://integrate.api.nvidia.com/v1"
  848. super().__init__(key, model_name, base_url)
  849. class LmStudioChat(Base):
  850. def __init__(self, key, model_name, base_url):
  851. if not base_url:
  852. raise ValueError("Local llm url cannot be None")
  853. if base_url.split("/")[-1] != "v1":
  854. base_url = os.path.join(base_url, "v1")
  855. self.client = OpenAI(api_key="lm-studio", base_url=base_url)
  856. self.model_name = model_name
  857. class OpenAI_APIChat(Base):
  858. def __init__(self, key, model_name, base_url):
  859. if not base_url:
  860. raise ValueError("url cannot be None")
  861. model_name = model_name.split("___")[0]
  862. super().__init__(key, model_name, base_url)
  863. class PPIOChat(Base):
  864. def __init__(self, key, model_name, base_url="https://api.ppinfra.com/v3/openai"):
  865. if not base_url:
  866. base_url = "https://api.ppinfra.com/v3/openai"
  867. super().__init__(key, model_name, base_url)
  868. class CoHereChat(Base):
  869. def __init__(self, key, model_name, base_url=""):
  870. from cohere import Client
  871. self.client = Client(api_key=key)
  872. self.model_name = model_name
  873. def chat(self, system, history, gen_conf):
  874. if system:
  875. history.insert(0, {"role": "system", "content": system})
  876. if "max_tokens" in gen_conf:
  877. del gen_conf["max_tokens"]
  878. if "top_p" in gen_conf:
  879. gen_conf["p"] = gen_conf.pop("top_p")
  880. if "frequency_penalty" in gen_conf and "presence_penalty" in gen_conf:
  881. gen_conf.pop("presence_penalty")
  882. for item in history:
  883. if "role" in item and item["role"] == "user":
  884. item["role"] = "USER"
  885. if "role" in item and item["role"] == "assistant":
  886. item["role"] = "CHATBOT"
  887. if "content" in item:
  888. item["message"] = item.pop("content")
  889. mes = history.pop()["message"]
  890. ans = ""
  891. try:
  892. response = self.client.chat(
  893. model=self.model_name, chat_history=history, message=mes, **gen_conf
  894. )
  895. ans = response.text
  896. if response.finish_reason == "MAX_TOKENS":
  897. ans += (
  898. "...\nFor the content length reason, it stopped, continue?"
  899. if is_english([ans])
  900. else "······\n由于长度的原因,回答被截断了,要继续吗?"
  901. )
  902. return (
  903. ans,
  904. response.meta.tokens.input_tokens + response.meta.tokens.output_tokens,
  905. )
  906. except Exception as e:
  907. return ans + "\n**ERROR**: " + str(e), 0
  908. def chat_streamly(self, system, history, gen_conf):
  909. if system:
  910. history.insert(0, {"role": "system", "content": system})
  911. if "max_tokens" in gen_conf:
  912. del gen_conf["max_tokens"]
  913. if "top_p" in gen_conf:
  914. gen_conf["p"] = gen_conf.pop("top_p")
  915. if "frequency_penalty" in gen_conf and "presence_penalty" in gen_conf:
  916. gen_conf.pop("presence_penalty")
  917. for item in history:
  918. if "role" in item and item["role"] == "user":
  919. item["role"] = "USER"
  920. if "role" in item and item["role"] == "assistant":
  921. item["role"] = "CHATBOT"
  922. if "content" in item:
  923. item["message"] = item.pop("content")
  924. mes = history.pop()["message"]
  925. ans = ""
  926. total_tokens = 0
  927. try:
  928. response = self.client.chat_stream(
  929. model=self.model_name, chat_history=history, message=mes, **gen_conf
  930. )
  931. for resp in response:
  932. if resp.event_type == "text-generation":
  933. ans += resp.text
  934. total_tokens += num_tokens_from_string(resp.text)
  935. elif resp.event_type == "stream-end":
  936. if resp.finish_reason == "MAX_TOKENS":
  937. ans += (
  938. "...\nFor the content length reason, it stopped, continue?"
  939. if is_english([ans])
  940. else "······\n由于长度的原因,回答被截断了,要继续吗?"
  941. )
  942. yield ans
  943. except Exception as e:
  944. yield ans + "\n**ERROR**: " + str(e)
  945. yield total_tokens
  946. class LeptonAIChat(Base):
  947. def __init__(self, key, model_name, base_url=None):
  948. if not base_url:
  949. base_url = os.path.join("https://" + model_name + ".lepton.run", "api", "v1")
  950. super().__init__(key, model_name, base_url)
  951. class TogetherAIChat(Base):
  952. def __init__(self, key, model_name, base_url="https://api.together.xyz/v1"):
  953. if not base_url:
  954. base_url = "https://api.together.xyz/v1"
  955. super().__init__(key, model_name, base_url)
  956. class PerfXCloudChat(Base):
  957. def __init__(self, key, model_name, base_url="https://cloud.perfxlab.cn/v1"):
  958. if not base_url:
  959. base_url = "https://cloud.perfxlab.cn/v1"
  960. super().__init__(key, model_name, base_url)
  961. class UpstageChat(Base):
  962. def __init__(self, key, model_name, base_url="https://api.upstage.ai/v1/solar"):
  963. if not base_url:
  964. base_url = "https://api.upstage.ai/v1/solar"
  965. super().__init__(key, model_name, base_url)
  966. class NovitaAIChat(Base):
  967. def __init__(self, key, model_name, base_url="https://api.novita.ai/v3/openai"):
  968. if not base_url:
  969. base_url = "https://api.novita.ai/v3/openai"
  970. super().__init__(key, model_name, base_url)
  971. class SILICONFLOWChat(Base):
  972. def __init__(self, key, model_name, base_url="https://api.siliconflow.cn/v1"):
  973. if not base_url:
  974. base_url = "https://api.siliconflow.cn/v1"
  975. super().__init__(key, model_name, base_url)
  976. class YiChat(Base):
  977. def __init__(self, key, model_name, base_url="https://api.lingyiwanwu.com/v1"):
  978. if not base_url:
  979. base_url = "https://api.lingyiwanwu.com/v1"
  980. super().__init__(key, model_name, base_url)
  981. class ReplicateChat(Base):
  982. def __init__(self, key, model_name, base_url=None):
  983. from replicate.client import Client
  984. self.model_name = model_name
  985. self.client = Client(api_token=key)
  986. self.system = ""
  987. def chat(self, system, history, gen_conf):
  988. if "max_tokens" in gen_conf:
  989. del gen_conf["max_tokens"]
  990. if system:
  991. self.system = system
  992. prompt = "\n".join(
  993. [item["role"] + ":" + item["content"] for item in history[-5:]]
  994. )
  995. ans = ""
  996. try:
  997. response = self.client.run(
  998. self.model_name,
  999. input={"system_prompt": self.system, "prompt": prompt, **gen_conf},
  1000. )
  1001. ans = "".join(response)
  1002. return ans, num_tokens_from_string(ans)
  1003. except Exception as e:
  1004. return ans + "\n**ERROR**: " + str(e), 0
  1005. def chat_streamly(self, system, history, gen_conf):
  1006. if "max_tokens" in gen_conf:
  1007. del gen_conf["max_tokens"]
  1008. if system:
  1009. self.system = system
  1010. prompt = "\n".join(
  1011. [item["role"] + ":" + item["content"] for item in history[-5:]]
  1012. )
  1013. ans = ""
  1014. try:
  1015. response = self.client.run(
  1016. self.model_name,
  1017. input={"system_prompt": self.system, "prompt": prompt, **gen_conf},
  1018. )
  1019. for resp in response:
  1020. ans += resp
  1021. yield ans
  1022. except Exception as e:
  1023. yield ans + "\n**ERROR**: " + str(e)
  1024. yield num_tokens_from_string(ans)
  1025. class HunyuanChat(Base):
  1026. def __init__(self, key, model_name, base_url=None):
  1027. from tencentcloud.common import credential
  1028. from tencentcloud.hunyuan.v20230901 import hunyuan_client
  1029. key = json.loads(key)
  1030. sid = key.get("hunyuan_sid", "")
  1031. sk = key.get("hunyuan_sk", "")
  1032. cred = credential.Credential(sid, sk)
  1033. self.model_name = model_name
  1034. self.client = hunyuan_client.HunyuanClient(cred, "")
  1035. def chat(self, system, history, gen_conf):
  1036. from tencentcloud.hunyuan.v20230901 import models
  1037. from tencentcloud.common.exception.tencent_cloud_sdk_exception import (
  1038. TencentCloudSDKException,
  1039. )
  1040. _gen_conf = {}
  1041. _history = [{k.capitalize(): v for k, v in item.items()} for item in history]
  1042. if system:
  1043. _history.insert(0, {"Role": "system", "Content": system})
  1044. if "max_tokens" in gen_conf:
  1045. del gen_conf["max_tokens"]
  1046. if "temperature" in gen_conf:
  1047. _gen_conf["Temperature"] = gen_conf["temperature"]
  1048. if "top_p" in gen_conf:
  1049. _gen_conf["TopP"] = gen_conf["top_p"]
  1050. req = models.ChatCompletionsRequest()
  1051. params = {"Model": self.model_name, "Messages": _history, **_gen_conf}
  1052. req.from_json_string(json.dumps(params))
  1053. ans = ""
  1054. try:
  1055. response = self.client.ChatCompletions(req)
  1056. ans = response.Choices[0].Message.Content
  1057. return ans, response.Usage.TotalTokens
  1058. except TencentCloudSDKException as e:
  1059. return ans + "\n**ERROR**: " + str(e), 0
  1060. def chat_streamly(self, system, history, gen_conf):
  1061. from tencentcloud.hunyuan.v20230901 import models
  1062. from tencentcloud.common.exception.tencent_cloud_sdk_exception import (
  1063. TencentCloudSDKException,
  1064. )
  1065. _gen_conf = {}
  1066. _history = [{k.capitalize(): v for k, v in item.items()} for item in history]
  1067. if system:
  1068. _history.insert(0, {"Role": "system", "Content": system})
  1069. if "max_tokens" in gen_conf:
  1070. del gen_conf["max_tokens"]
  1071. if "temperature" in gen_conf:
  1072. _gen_conf["Temperature"] = gen_conf["temperature"]
  1073. if "top_p" in gen_conf:
  1074. _gen_conf["TopP"] = gen_conf["top_p"]
  1075. req = models.ChatCompletionsRequest()
  1076. params = {
  1077. "Model": self.model_name,
  1078. "Messages": _history,
  1079. "Stream": True,
  1080. **_gen_conf,
  1081. }
  1082. req.from_json_string(json.dumps(params))
  1083. ans = ""
  1084. total_tokens = 0
  1085. try:
  1086. response = self.client.ChatCompletions(req)
  1087. for resp in response:
  1088. resp = json.loads(resp["data"])
  1089. if not resp["Choices"] or not resp["Choices"][0]["Delta"]["Content"]:
  1090. continue
  1091. ans += resp["Choices"][0]["Delta"]["Content"]
  1092. total_tokens += 1
  1093. yield ans
  1094. except TencentCloudSDKException as e:
  1095. yield ans + "\n**ERROR**: " + str(e)
  1096. yield total_tokens
  1097. class SparkChat(Base):
  1098. def __init__(
  1099. self, key, model_name, base_url="https://spark-api-open.xf-yun.com/v1"
  1100. ):
  1101. if not base_url:
  1102. base_url = "https://spark-api-open.xf-yun.com/v1"
  1103. model2version = {
  1104. "Spark-Max": "generalv3.5",
  1105. "Spark-Lite": "general",
  1106. "Spark-Pro": "generalv3",
  1107. "Spark-Pro-128K": "pro-128k",
  1108. "Spark-4.0-Ultra": "4.0Ultra",
  1109. }
  1110. version2model = {v: k for k, v in model2version.items()}
  1111. assert model_name in model2version or model_name in version2model, f"The given model name is not supported yet. Support: {list(model2version.keys())}"
  1112. if model_name in model2version:
  1113. model_version = model2version[model_name]
  1114. else:
  1115. model_version = model_name
  1116. super().__init__(key, model_version, base_url)
  1117. class BaiduYiyanChat(Base):
  1118. def __init__(self, key, model_name, base_url=None):
  1119. import qianfan
  1120. key = json.loads(key)
  1121. ak = key.get("yiyan_ak", "")
  1122. sk = key.get("yiyan_sk", "")
  1123. self.client = qianfan.ChatCompletion(ak=ak, sk=sk)
  1124. self.model_name = model_name.lower()
  1125. self.system = ""
  1126. def chat(self, system, history, gen_conf):
  1127. if system:
  1128. self.system = system
  1129. gen_conf["penalty_score"] = (
  1130. (gen_conf.get("presence_penalty", 0) + gen_conf.get("frequency_penalty",
  1131. 0)) / 2
  1132. ) + 1
  1133. if "max_tokens" in gen_conf:
  1134. del gen_conf["max_tokens"]
  1135. ans = ""
  1136. try:
  1137. response = self.client.do(
  1138. model=self.model_name,
  1139. messages=history,
  1140. system=self.system,
  1141. **gen_conf
  1142. ).body
  1143. ans = response['result']
  1144. return ans, self.total_token_count(response)
  1145. except Exception as e:
  1146. return ans + "\n**ERROR**: " + str(e), 0
  1147. def chat_streamly(self, system, history, gen_conf):
  1148. if system:
  1149. self.system = system
  1150. gen_conf["penalty_score"] = (
  1151. (gen_conf.get("presence_penalty", 0) + gen_conf.get("frequency_penalty",
  1152. 0)) / 2
  1153. ) + 1
  1154. if "max_tokens" in gen_conf:
  1155. del gen_conf["max_tokens"]
  1156. ans = ""
  1157. total_tokens = 0
  1158. try:
  1159. response = self.client.do(
  1160. model=self.model_name,
  1161. messages=history,
  1162. system=self.system,
  1163. stream=True,
  1164. **gen_conf
  1165. )
  1166. for resp in response:
  1167. resp = resp.body
  1168. ans += resp['result']
  1169. total_tokens = self.total_token_count(resp)
  1170. yield ans
  1171. except Exception as e:
  1172. return ans + "\n**ERROR**: " + str(e), 0
  1173. yield total_tokens
  1174. class AnthropicChat(Base):
  1175. def __init__(self, key, model_name, base_url=None):
  1176. import anthropic
  1177. self.client = anthropic.Anthropic(api_key=key)
  1178. self.model_name = model_name
  1179. self.system = ""
  1180. def chat(self, system, history, gen_conf):
  1181. if system:
  1182. self.system = system
  1183. if "presence_penalty" in gen_conf:
  1184. del gen_conf["presence_penalty"]
  1185. if "frequency_penalty" in gen_conf:
  1186. del gen_conf["frequency_penalty"]
  1187. ans = ""
  1188. try:
  1189. response = self.client.messages.create(
  1190. model=self.model_name,
  1191. messages=history,
  1192. system=self.system,
  1193. stream=False,
  1194. **gen_conf,
  1195. ).to_dict()
  1196. ans = response["content"][0]["text"]
  1197. if response["stop_reason"] == "max_tokens":
  1198. ans += (
  1199. "...\nFor the content length reason, it stopped, continue?"
  1200. if is_english([ans])
  1201. else "······\n由于长度的原因,回答被截断了,要继续吗?"
  1202. )
  1203. return (
  1204. ans,
  1205. response["usage"]["input_tokens"] + response["usage"]["output_tokens"],
  1206. )
  1207. except Exception as e:
  1208. return ans + "\n**ERROR**: " + str(e), 0
  1209. def chat_streamly(self, system, history, gen_conf):
  1210. if system:
  1211. self.system = system
  1212. if "presence_penalty" in gen_conf:
  1213. del gen_conf["presence_penalty"]
  1214. if "frequency_penalty" in gen_conf:
  1215. del gen_conf["frequency_penalty"]
  1216. ans = ""
  1217. total_tokens = 0
  1218. try:
  1219. response = self.client.messages.create(
  1220. model=self.model_name,
  1221. messages=history,
  1222. system=self.system,
  1223. stream=True,
  1224. **gen_conf,
  1225. )
  1226. for res in response:
  1227. if res.type == 'content_block_delta':
  1228. text = res.delta.text
  1229. ans += text
  1230. total_tokens += num_tokens_from_string(text)
  1231. yield ans
  1232. except Exception as e:
  1233. yield ans + "\n**ERROR**: " + str(e)
  1234. yield total_tokens
  1235. class GoogleChat(Base):
  1236. def __init__(self, key, model_name, base_url=None):
  1237. from google.oauth2 import service_account
  1238. import base64
  1239. key = json.loads(key)
  1240. access_token = json.loads(
  1241. base64.b64decode(key.get("google_service_account_key", ""))
  1242. )
  1243. project_id = key.get("google_project_id", "")
  1244. region = key.get("google_region", "")
  1245. scopes = ["https://www.googleapis.com/auth/cloud-platform"]
  1246. self.model_name = model_name
  1247. self.system = ""
  1248. if "claude" in self.model_name:
  1249. from anthropic import AnthropicVertex
  1250. from google.auth.transport.requests import Request
  1251. if access_token:
  1252. credits = service_account.Credentials.from_service_account_info(
  1253. access_token, scopes=scopes
  1254. )
  1255. request = Request()
  1256. credits.refresh(request)
  1257. token = credits.token
  1258. self.client = AnthropicVertex(
  1259. region=region, project_id=project_id, access_token=token
  1260. )
  1261. else:
  1262. self.client = AnthropicVertex(region=region, project_id=project_id)
  1263. else:
  1264. from google.cloud import aiplatform
  1265. import vertexai.generative_models as glm
  1266. if access_token:
  1267. credits = service_account.Credentials.from_service_account_info(
  1268. access_token
  1269. )
  1270. aiplatform.init(
  1271. credentials=credits, project=project_id, location=region
  1272. )
  1273. else:
  1274. aiplatform.init(project=project_id, location=region)
  1275. self.client = glm.GenerativeModel(model_name=self.model_name)
  1276. def chat(self, system, history, gen_conf):
  1277. if system:
  1278. self.system = system
  1279. if "claude" in self.model_name:
  1280. if "max_tokens" in gen_conf:
  1281. del gen_conf["max_tokens"]
  1282. try:
  1283. response = self.client.messages.create(
  1284. model=self.model_name,
  1285. messages=history,
  1286. system=self.system,
  1287. stream=False,
  1288. **gen_conf,
  1289. ).json()
  1290. ans = response["content"][0]["text"]
  1291. if response["stop_reason"] == "max_tokens":
  1292. ans += (
  1293. "...\nFor the content length reason, it stopped, continue?"
  1294. if is_english([ans])
  1295. else "······\n由于长度的原因,回答被截断了,要继续吗?"
  1296. )
  1297. return (
  1298. ans,
  1299. response["usage"]["input_tokens"]
  1300. + response["usage"]["output_tokens"],
  1301. )
  1302. except Exception as e:
  1303. return "\n**ERROR**: " + str(e), 0
  1304. else:
  1305. self.client._system_instruction = self.system
  1306. if "max_tokens" in gen_conf:
  1307. gen_conf["max_output_tokens"] = gen_conf["max_tokens"]
  1308. for k in list(gen_conf.keys()):
  1309. if k not in ["temperature", "top_p", "max_output_tokens"]:
  1310. del gen_conf[k]
  1311. for item in history:
  1312. if "role" in item and item["role"] == "assistant":
  1313. item["role"] = "model"
  1314. if "content" in item:
  1315. item["parts"] = item.pop("content")
  1316. try:
  1317. response = self.client.generate_content(
  1318. history, generation_config=gen_conf
  1319. )
  1320. ans = response.text
  1321. return ans, response.usage_metadata.total_token_count
  1322. except Exception as e:
  1323. return "**ERROR**: " + str(e), 0
  1324. def chat_streamly(self, system, history, gen_conf):
  1325. if system:
  1326. self.system = system
  1327. if "claude" in self.model_name:
  1328. if "max_tokens" in gen_conf:
  1329. del gen_conf["max_tokens"]
  1330. ans = ""
  1331. total_tokens = 0
  1332. try:
  1333. response = self.client.messages.create(
  1334. model=self.model_name,
  1335. messages=history,
  1336. system=self.system,
  1337. stream=True,
  1338. **gen_conf,
  1339. )
  1340. for res in response.iter_lines():
  1341. res = res.decode("utf-8")
  1342. if "content_block_delta" in res and "data" in res:
  1343. text = json.loads(res[6:])["delta"]["text"]
  1344. ans += text
  1345. total_tokens += num_tokens_from_string(text)
  1346. except Exception as e:
  1347. yield ans + "\n**ERROR**: " + str(e)
  1348. yield total_tokens
  1349. else:
  1350. self.client._system_instruction = self.system
  1351. if "max_tokens" in gen_conf:
  1352. gen_conf["max_output_tokens"] = gen_conf["max_tokens"]
  1353. for k in list(gen_conf.keys()):
  1354. if k not in ["temperature", "top_p", "max_output_tokens"]:
  1355. del gen_conf[k]
  1356. for item in history:
  1357. if "role" in item and item["role"] == "assistant":
  1358. item["role"] = "model"
  1359. if "content" in item:
  1360. item["parts"] = item.pop("content")
  1361. ans = ""
  1362. try:
  1363. response = self.model.generate_content(
  1364. history, generation_config=gen_conf, stream=True
  1365. )
  1366. for resp in response:
  1367. ans += resp.text
  1368. yield ans
  1369. except Exception as e:
  1370. yield ans + "\n**ERROR**: " + str(e)
  1371. yield response._chunks[-1].usage_metadata.total_token_count
  1372. class GPUStackChat(Base):
  1373. def __init__(self, key=None, model_name="", base_url=""):
  1374. if not base_url:
  1375. raise ValueError("Local llm url cannot be None")
  1376. if base_url.split("/")[-1] != "v1-openai":
  1377. base_url = os.path.join(base_url, "v1-openai")
  1378. super().__init__(key, model_name, base_url)