您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

chat_model.py 19KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. from zhipuai import ZhipuAI
  17. from dashscope import Generation
  18. from abc import ABC
  19. from openai import OpenAI
  20. import openai
  21. from ollama import Client
  22. from volcengine.maas.v2 import MaasService
  23. from rag.nlp import is_english
  24. from rag.utils import num_tokens_from_string
  25. class Base(ABC):
  26. def __init__(self, key, model_name, base_url):
  27. self.client = OpenAI(api_key=key, base_url=base_url)
  28. self.model_name = model_name
  29. def chat(self, system, history, gen_conf):
  30. if system:
  31. history.insert(0, {"role": "system", "content": system})
  32. try:
  33. response = self.client.chat.completions.create(
  34. model=self.model_name,
  35. messages=history,
  36. **gen_conf)
  37. ans = response.choices[0].message.content.strip()
  38. if response.choices[0].finish_reason == "length":
  39. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  40. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  41. return ans, response.usage.total_tokens
  42. except openai.APIError as e:
  43. return "**ERROR**: " + str(e), 0
  44. def chat_streamly(self, system, history, gen_conf):
  45. if system:
  46. history.insert(0, {"role": "system", "content": system})
  47. ans = ""
  48. total_tokens = 0
  49. try:
  50. response = self.client.chat.completions.create(
  51. model=self.model_name,
  52. messages=history,
  53. stream=True,
  54. **gen_conf)
  55. for resp in response:
  56. if not resp.choices or not resp.choices[0].delta.content:continue
  57. ans += resp.choices[0].delta.content
  58. total_tokens += 1
  59. if resp.choices[0].finish_reason == "length":
  60. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  61. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  62. yield ans
  63. except openai.APIError as e:
  64. yield ans + "\n**ERROR**: " + str(e)
  65. yield total_tokens
  66. class GptTurbo(Base):
  67. def __init__(self, key, model_name="gpt-3.5-turbo", base_url="https://api.openai.com/v1"):
  68. if not base_url: base_url="https://api.openai.com/v1"
  69. super().__init__(key, model_name, base_url)
  70. class MoonshotChat(Base):
  71. def __init__(self, key, model_name="moonshot-v1-8k", base_url="https://api.moonshot.cn/v1"):
  72. if not base_url: base_url="https://api.moonshot.cn/v1"
  73. super().__init__(key, model_name, base_url)
  74. class XinferenceChat(Base):
  75. def __init__(self, key=None, model_name="", base_url=""):
  76. key = "xxx"
  77. super().__init__(key, model_name, base_url)
  78. class DeepSeekChat(Base):
  79. def __init__(self, key, model_name="deepseek-chat", base_url="https://api.deepseek.com/v1"):
  80. if not base_url: base_url="https://api.deepseek.com/v1"
  81. super().__init__(key, model_name, base_url)
  82. class BaiChuanChat(Base):
  83. def __init__(self, key, model_name="Baichuan3-Turbo", base_url="https://api.baichuan-ai.com/v1"):
  84. if not base_url:
  85. base_url = "https://api.baichuan-ai.com/v1"
  86. super().__init__(key, model_name, base_url)
  87. @staticmethod
  88. def _format_params(params):
  89. return {
  90. "temperature": params.get("temperature", 0.3),
  91. "max_tokens": params.get("max_tokens", 2048),
  92. "top_p": params.get("top_p", 0.85),
  93. }
  94. def chat(self, system, history, gen_conf):
  95. if system:
  96. history.insert(0, {"role": "system", "content": system})
  97. try:
  98. response = self.client.chat.completions.create(
  99. model=self.model_name,
  100. messages=history,
  101. extra_body={
  102. "tools": [{
  103. "type": "web_search",
  104. "web_search": {
  105. "enable": True,
  106. "search_mode": "performance_first"
  107. }
  108. }]
  109. },
  110. **self._format_params(gen_conf))
  111. ans = response.choices[0].message.content.strip()
  112. if response.choices[0].finish_reason == "length":
  113. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  114. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  115. return ans, response.usage.total_tokens
  116. except openai.APIError as e:
  117. return "**ERROR**: " + str(e), 0
  118. def chat_streamly(self, system, history, gen_conf):
  119. if system:
  120. history.insert(0, {"role": "system", "content": system})
  121. ans = ""
  122. total_tokens = 0
  123. try:
  124. response = self.client.chat.completions.create(
  125. model=self.model_name,
  126. messages=history,
  127. extra_body={
  128. "tools": [{
  129. "type": "web_search",
  130. "web_search": {
  131. "enable": True,
  132. "search_mode": "performance_first"
  133. }
  134. }]
  135. },
  136. stream=True,
  137. **self._format_params(gen_conf))
  138. for resp in response:
  139. if resp.choices[0].finish_reason == "stop":
  140. if not resp.choices[0].delta.content:
  141. continue
  142. total_tokens = resp.usage.get('total_tokens', 0)
  143. if not resp.choices[0].delta.content:
  144. continue
  145. ans += resp.choices[0].delta.content
  146. if resp.choices[0].finish_reason == "length":
  147. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  148. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  149. yield ans
  150. except Exception as e:
  151. yield ans + "\n**ERROR**: " + str(e)
  152. yield total_tokens
  153. class QWenChat(Base):
  154. def __init__(self, key, model_name=Generation.Models.qwen_turbo, **kwargs):
  155. import dashscope
  156. dashscope.api_key = key
  157. self.model_name = model_name
  158. def chat(self, system, history, gen_conf):
  159. from http import HTTPStatus
  160. if system:
  161. history.insert(0, {"role": "system", "content": system})
  162. response = Generation.call(
  163. self.model_name,
  164. messages=history,
  165. result_format='message',
  166. **gen_conf
  167. )
  168. ans = ""
  169. tk_count = 0
  170. if response.status_code == HTTPStatus.OK:
  171. ans += response.output.choices[0]['message']['content']
  172. tk_count += response.usage.total_tokens
  173. if response.output.choices[0].get("finish_reason", "") == "length":
  174. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  175. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  176. return ans, tk_count
  177. return "**ERROR**: " + response.message, tk_count
  178. def chat_streamly(self, system, history, gen_conf):
  179. from http import HTTPStatus
  180. if system:
  181. history.insert(0, {"role": "system", "content": system})
  182. ans = ""
  183. tk_count = 0
  184. try:
  185. response = Generation.call(
  186. self.model_name,
  187. messages=history,
  188. result_format='message',
  189. stream=True,
  190. **gen_conf
  191. )
  192. for resp in response:
  193. if resp.status_code == HTTPStatus.OK:
  194. ans = resp.output.choices[0]['message']['content']
  195. tk_count = resp.usage.total_tokens
  196. if resp.output.choices[0].get("finish_reason", "") == "length":
  197. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  198. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  199. yield ans
  200. else:
  201. yield ans + "\n**ERROR**: " + resp.message if str(resp.message).find("Access")<0 else "Out of credit. Please set the API key in **settings > Model providers.**"
  202. except Exception as e:
  203. yield ans + "\n**ERROR**: " + str(e)
  204. yield tk_count
  205. class ZhipuChat(Base):
  206. def __init__(self, key, model_name="glm-3-turbo", **kwargs):
  207. self.client = ZhipuAI(api_key=key)
  208. self.model_name = model_name
  209. def chat(self, system, history, gen_conf):
  210. if system:
  211. history.insert(0, {"role": "system", "content": system})
  212. try:
  213. if "presence_penalty" in gen_conf: del gen_conf["presence_penalty"]
  214. if "frequency_penalty" in gen_conf: del gen_conf["frequency_penalty"]
  215. response = self.client.chat.completions.create(
  216. model=self.model_name,
  217. messages=history,
  218. **gen_conf
  219. )
  220. ans = response.choices[0].message.content.strip()
  221. if response.choices[0].finish_reason == "length":
  222. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  223. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  224. return ans, response.usage.total_tokens
  225. except Exception as e:
  226. return "**ERROR**: " + str(e), 0
  227. def chat_streamly(self, system, history, gen_conf):
  228. if system:
  229. history.insert(0, {"role": "system", "content": system})
  230. if "presence_penalty" in gen_conf: del gen_conf["presence_penalty"]
  231. if "frequency_penalty" in gen_conf: del gen_conf["frequency_penalty"]
  232. ans = ""
  233. tk_count = 0
  234. try:
  235. response = self.client.chat.completions.create(
  236. model=self.model_name,
  237. messages=history,
  238. stream=True,
  239. **gen_conf
  240. )
  241. for resp in response:
  242. if not resp.choices[0].delta.content:continue
  243. delta = resp.choices[0].delta.content
  244. ans += delta
  245. if resp.choices[0].finish_reason == "length":
  246. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  247. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  248. tk_count = resp.usage.total_tokens
  249. if resp.choices[0].finish_reason == "stop": tk_count = resp.usage.total_tokens
  250. yield ans
  251. except Exception as e:
  252. yield ans + "\n**ERROR**: " + str(e)
  253. yield tk_count
  254. class OllamaChat(Base):
  255. def __init__(self, key, model_name, **kwargs):
  256. self.client = Client(host=kwargs["base_url"])
  257. self.model_name = model_name
  258. def chat(self, system, history, gen_conf):
  259. if system:
  260. history.insert(0, {"role": "system", "content": system})
  261. try:
  262. options = {}
  263. if "temperature" in gen_conf: options["temperature"] = gen_conf["temperature"]
  264. if "max_tokens" in gen_conf: options["num_predict"] = gen_conf["max_tokens"]
  265. if "top_p" in gen_conf: options["top_k"] = gen_conf["top_p"]
  266. if "presence_penalty" in gen_conf: options["presence_penalty"] = gen_conf["presence_penalty"]
  267. if "frequency_penalty" in gen_conf: options["frequency_penalty"] = gen_conf["frequency_penalty"]
  268. response = self.client.chat(
  269. model=self.model_name,
  270. messages=history,
  271. options=options,
  272. keep_alive=-1
  273. )
  274. ans = response["message"]["content"].strip()
  275. return ans, response["eval_count"] + response.get("prompt_eval_count", 0)
  276. except Exception as e:
  277. return "**ERROR**: " + str(e), 0
  278. def chat_streamly(self, system, history, gen_conf):
  279. if system:
  280. history.insert(0, {"role": "system", "content": system})
  281. options = {}
  282. if "temperature" in gen_conf: options["temperature"] = gen_conf["temperature"]
  283. if "max_tokens" in gen_conf: options["num_predict"] = gen_conf["max_tokens"]
  284. if "top_p" in gen_conf: options["top_k"] = gen_conf["top_p"]
  285. if "presence_penalty" in gen_conf: options["presence_penalty"] = gen_conf["presence_penalty"]
  286. if "frequency_penalty" in gen_conf: options["frequency_penalty"] = gen_conf["frequency_penalty"]
  287. ans = ""
  288. try:
  289. response = self.client.chat(
  290. model=self.model_name,
  291. messages=history,
  292. stream=True,
  293. options=options,
  294. keep_alive=-1
  295. )
  296. for resp in response:
  297. if resp["done"]:
  298. yield resp.get("prompt_eval_count", 0) + resp.get("eval_count", 0)
  299. ans += resp["message"]["content"]
  300. yield ans
  301. except Exception as e:
  302. yield ans + "\n**ERROR**: " + str(e)
  303. yield 0
  304. class LocalLLM(Base):
  305. class RPCProxy:
  306. def __init__(self, host, port):
  307. self.host = host
  308. self.port = int(port)
  309. self.__conn()
  310. def __conn(self):
  311. from multiprocessing.connection import Client
  312. self._connection = Client(
  313. (self.host, self.port), authkey=b'infiniflow-token4kevinhu')
  314. def __getattr__(self, name):
  315. import pickle
  316. def do_rpc(*args, **kwargs):
  317. for _ in range(3):
  318. try:
  319. self._connection.send(
  320. pickle.dumps((name, args, kwargs)))
  321. return pickle.loads(self._connection.recv())
  322. except Exception as e:
  323. self.__conn()
  324. raise Exception("RPC connection lost!")
  325. return do_rpc
  326. def __init__(self, key, model_name="glm-3-turbo"):
  327. self.client = LocalLLM.RPCProxy("127.0.0.1", 7860)
  328. def chat(self, system, history, gen_conf):
  329. if system:
  330. history.insert(0, {"role": "system", "content": system})
  331. try:
  332. ans = self.client.chat(
  333. history,
  334. gen_conf
  335. )
  336. return ans, num_tokens_from_string(ans)
  337. except Exception as e:
  338. return "**ERROR**: " + str(e), 0
  339. def chat_streamly(self, system, history, gen_conf):
  340. if system:
  341. history.insert(0, {"role": "system", "content": system})
  342. token_count = 0
  343. answer = ""
  344. try:
  345. for ans in self.client.chat_streamly(history, gen_conf):
  346. answer += ans
  347. token_count += 1
  348. yield answer
  349. except Exception as e:
  350. yield answer + "\n**ERROR**: " + str(e)
  351. yield token_count
  352. class VolcEngineChat(Base):
  353. def __init__(self, key, model_name, base_url):
  354. """
  355. Since do not want to modify the original database fields, and the VolcEngine authentication method is quite special,
  356. Assemble ak, sk, ep_id into api_key, store it as a dictionary type, and parse it for use
  357. model_name is for display only
  358. """
  359. self.client = MaasService('maas-api.ml-platform-cn-beijing.volces.com', 'cn-beijing')
  360. self.volc_ak = eval(key).get('volc_ak', '')
  361. self.volc_sk = eval(key).get('volc_sk', '')
  362. self.client.set_ak(self.volc_ak)
  363. self.client.set_sk(self.volc_sk)
  364. self.model_name = eval(key).get('ep_id', '')
  365. def chat(self, system, history, gen_conf):
  366. if system:
  367. history.insert(0, {"role": "system", "content": system})
  368. try:
  369. req = {
  370. "parameters": {
  371. "min_new_tokens": gen_conf.get("min_new_tokens", 1),
  372. "top_k": gen_conf.get("top_k", 0),
  373. "max_prompt_tokens": gen_conf.get("max_prompt_tokens", 30000),
  374. "temperature": gen_conf.get("temperature", 0.1),
  375. "max_new_tokens": gen_conf.get("max_tokens", 1000),
  376. "top_p": gen_conf.get("top_p", 0.3),
  377. },
  378. "messages": history
  379. }
  380. response = self.client.chat(self.model_name, req)
  381. ans = response.choices[0].message.content.strip()
  382. if response.choices[0].finish_reason == "length":
  383. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  384. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  385. return ans, response.usage.total_tokens
  386. except Exception as e:
  387. return "**ERROR**: " + str(e), 0
  388. def chat_streamly(self, system, history, gen_conf):
  389. if system:
  390. history.insert(0, {"role": "system", "content": system})
  391. ans = ""
  392. tk_count = 0
  393. try:
  394. req = {
  395. "parameters": {
  396. "min_new_tokens": gen_conf.get("min_new_tokens", 1),
  397. "top_k": gen_conf.get("top_k", 0),
  398. "max_prompt_tokens": gen_conf.get("max_prompt_tokens", 30000),
  399. "temperature": gen_conf.get("temperature", 0.1),
  400. "max_new_tokens": gen_conf.get("max_tokens", 1000),
  401. "top_p": gen_conf.get("top_p", 0.3),
  402. },
  403. "messages": history
  404. }
  405. stream = self.client.stream_chat(self.model_name, req)
  406. for resp in stream:
  407. if not resp.choices[0].message.content:
  408. continue
  409. ans += resp.choices[0].message.content
  410. if resp.choices[0].finish_reason == "stop":
  411. tk_count = resp.usage.total_tokens
  412. yield ans
  413. except Exception as e:
  414. yield ans + "\n**ERROR**: " + str(e)
  415. yield tk_count