Nelze vybrat více než 25 témat Téma musí začínat písmenem nebo číslem, může obsahovat pomlčky („-“) a může být dlouhé až 35 znaků.

chat_model.py 15KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. from zhipuai import ZhipuAI
  17. from dashscope import Generation
  18. from abc import ABC
  19. from openai import OpenAI
  20. import openai
  21. from ollama import Client
  22. from volcengine.maas.v2 import MaasService
  23. from rag.nlp import is_english
  24. from rag.utils import num_tokens_from_string
  25. class Base(ABC):
  26. def __init__(self, key, model_name, base_url):
  27. self.client = OpenAI(api_key=key, base_url=base_url)
  28. self.model_name = model_name
  29. def chat(self, system, history, gen_conf):
  30. if system:
  31. history.insert(0, {"role": "system", "content": system})
  32. try:
  33. response = self.client.chat.completions.create(
  34. model=self.model_name,
  35. messages=history,
  36. **gen_conf)
  37. ans = response.choices[0].message.content.strip()
  38. if response.choices[0].finish_reason == "length":
  39. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  40. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  41. return ans, response.usage.total_tokens
  42. except openai.APIError as e:
  43. return "**ERROR**: " + str(e), 0
  44. def chat_streamly(self, system, history, gen_conf):
  45. if system:
  46. history.insert(0, {"role": "system", "content": system})
  47. ans = ""
  48. total_tokens = 0
  49. try:
  50. response = self.client.chat.completions.create(
  51. model=self.model_name,
  52. messages=history,
  53. stream=True,
  54. **gen_conf)
  55. for resp in response:
  56. if not resp.choices or not resp.choices[0].delta.content:continue
  57. ans += resp.choices[0].delta.content
  58. total_tokens += 1
  59. if resp.choices[0].finish_reason == "length":
  60. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  61. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  62. yield ans
  63. except openai.APIError as e:
  64. yield ans + "\n**ERROR**: " + str(e)
  65. yield total_tokens
  66. class GptTurbo(Base):
  67. def __init__(self, key, model_name="gpt-3.5-turbo", base_url="https://api.openai.com/v1"):
  68. if not base_url: base_url="https://api.openai.com/v1"
  69. super().__init__(key, model_name, base_url)
  70. class MoonshotChat(Base):
  71. def __init__(self, key, model_name="moonshot-v1-8k", base_url="https://api.moonshot.cn/v1"):
  72. if not base_url: base_url="https://api.moonshot.cn/v1"
  73. super().__init__(key, model_name, base_url)
  74. class XinferenceChat(Base):
  75. def __init__(self, key=None, model_name="", base_url=""):
  76. key = "xxx"
  77. super().__init__(key, model_name, base_url)
  78. class DeepSeekChat(Base):
  79. def __init__(self, key, model_name="deepseek-chat", base_url="https://api.deepseek.com/v1"):
  80. if not base_url: base_url="https://api.deepseek.com/v1"
  81. super().__init__(key, model_name, base_url)
  82. class QWenChat(Base):
  83. def __init__(self, key, model_name=Generation.Models.qwen_turbo, **kwargs):
  84. import dashscope
  85. dashscope.api_key = key
  86. self.model_name = model_name
  87. def chat(self, system, history, gen_conf):
  88. from http import HTTPStatus
  89. if system:
  90. history.insert(0, {"role": "system", "content": system})
  91. response = Generation.call(
  92. self.model_name,
  93. messages=history,
  94. result_format='message',
  95. **gen_conf
  96. )
  97. ans = ""
  98. tk_count = 0
  99. if response.status_code == HTTPStatus.OK:
  100. ans += response.output.choices[0]['message']['content']
  101. tk_count += response.usage.total_tokens
  102. if response.output.choices[0].get("finish_reason", "") == "length":
  103. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  104. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  105. return ans, tk_count
  106. return "**ERROR**: " + response.message, tk_count
  107. def chat_streamly(self, system, history, gen_conf):
  108. from http import HTTPStatus
  109. if system:
  110. history.insert(0, {"role": "system", "content": system})
  111. ans = ""
  112. try:
  113. response = Generation.call(
  114. self.model_name,
  115. messages=history,
  116. result_format='message',
  117. stream=True,
  118. **gen_conf
  119. )
  120. tk_count = 0
  121. for resp in response:
  122. if resp.status_code == HTTPStatus.OK:
  123. ans = resp.output.choices[0]['message']['content']
  124. tk_count = resp.usage.total_tokens
  125. if resp.output.choices[0].get("finish_reason", "") == "length":
  126. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  127. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  128. yield ans
  129. else:
  130. yield ans + "\n**ERROR**: " + resp.message if str(resp.message).find("Access")<0 else "Out of credit. Please set the API key in **settings > Model providers.**"
  131. except Exception as e:
  132. yield ans + "\n**ERROR**: " + str(e)
  133. yield tk_count
  134. class ZhipuChat(Base):
  135. def __init__(self, key, model_name="glm-3-turbo", **kwargs):
  136. self.client = ZhipuAI(api_key=key)
  137. self.model_name = model_name
  138. def chat(self, system, history, gen_conf):
  139. if system:
  140. history.insert(0, {"role": "system", "content": system})
  141. try:
  142. if "presence_penalty" in gen_conf: del gen_conf["presence_penalty"]
  143. if "frequency_penalty" in gen_conf: del gen_conf["frequency_penalty"]
  144. response = self.client.chat.completions.create(
  145. model=self.model_name,
  146. messages=history,
  147. **gen_conf
  148. )
  149. ans = response.choices[0].message.content.strip()
  150. if response.choices[0].finish_reason == "length":
  151. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  152. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  153. return ans, response.usage.total_tokens
  154. except Exception as e:
  155. return "**ERROR**: " + str(e), 0
  156. def chat_streamly(self, system, history, gen_conf):
  157. if system:
  158. history.insert(0, {"role": "system", "content": system})
  159. if "presence_penalty" in gen_conf: del gen_conf["presence_penalty"]
  160. if "frequency_penalty" in gen_conf: del gen_conf["frequency_penalty"]
  161. ans = ""
  162. try:
  163. response = self.client.chat.completions.create(
  164. model=self.model_name,
  165. messages=history,
  166. stream=True,
  167. **gen_conf
  168. )
  169. tk_count = 0
  170. for resp in response:
  171. if not resp.choices[0].delta.content:continue
  172. delta = resp.choices[0].delta.content
  173. ans += delta
  174. if resp.choices[0].finish_reason == "length":
  175. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  176. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  177. tk_count = resp.usage.total_tokens
  178. if resp.choices[0].finish_reason == "stop": tk_count = resp.usage.total_tokens
  179. yield ans
  180. except Exception as e:
  181. yield ans + "\n**ERROR**: " + str(e)
  182. yield tk_count
  183. class OllamaChat(Base):
  184. def __init__(self, key, model_name, **kwargs):
  185. self.client = Client(host=kwargs["base_url"])
  186. self.model_name = model_name
  187. def chat(self, system, history, gen_conf):
  188. if system:
  189. history.insert(0, {"role": "system", "content": system})
  190. try:
  191. options = {}
  192. if "temperature" in gen_conf: options["temperature"] = gen_conf["temperature"]
  193. if "max_tokens" in gen_conf: options["num_predict"] = gen_conf["max_tokens"]
  194. if "top_p" in gen_conf: options["top_k"] = gen_conf["top_p"]
  195. if "presence_penalty" in gen_conf: options["presence_penalty"] = gen_conf["presence_penalty"]
  196. if "frequency_penalty" in gen_conf: options["frequency_penalty"] = gen_conf["frequency_penalty"]
  197. response = self.client.chat(
  198. model=self.model_name,
  199. messages=history,
  200. options=options
  201. )
  202. ans = response["message"]["content"].strip()
  203. return ans, response["eval_count"] + response.get("prompt_eval_count", 0)
  204. except Exception as e:
  205. return "**ERROR**: " + str(e), 0
  206. def chat_streamly(self, system, history, gen_conf):
  207. if system:
  208. history.insert(0, {"role": "system", "content": system})
  209. options = {}
  210. if "temperature" in gen_conf: options["temperature"] = gen_conf["temperature"]
  211. if "max_tokens" in gen_conf: options["num_predict"] = gen_conf["max_tokens"]
  212. if "top_p" in gen_conf: options["top_k"] = gen_conf["top_p"]
  213. if "presence_penalty" in gen_conf: options["presence_penalty"] = gen_conf["presence_penalty"]
  214. if "frequency_penalty" in gen_conf: options["frequency_penalty"] = gen_conf["frequency_penalty"]
  215. ans = ""
  216. try:
  217. response = self.client.chat(
  218. model=self.model_name,
  219. messages=history,
  220. stream=True,
  221. options=options
  222. )
  223. for resp in response:
  224. if resp["done"]:
  225. yield resp.get("prompt_eval_count", 0) + resp.get("eval_count", 0)
  226. ans += resp["message"]["content"]
  227. yield ans
  228. except Exception as e:
  229. yield ans + "\n**ERROR**: " + str(e)
  230. yield 0
  231. class LocalLLM(Base):
  232. class RPCProxy:
  233. def __init__(self, host, port):
  234. self.host = host
  235. self.port = int(port)
  236. self.__conn()
  237. def __conn(self):
  238. from multiprocessing.connection import Client
  239. self._connection = Client(
  240. (self.host, self.port), authkey=b'infiniflow-token4kevinhu')
  241. def __getattr__(self, name):
  242. import pickle
  243. def do_rpc(*args, **kwargs):
  244. for _ in range(3):
  245. try:
  246. self._connection.send(
  247. pickle.dumps((name, args, kwargs)))
  248. return pickle.loads(self._connection.recv())
  249. except Exception as e:
  250. self.__conn()
  251. raise Exception("RPC connection lost!")
  252. return do_rpc
  253. def __init__(self, key, model_name="glm-3-turbo"):
  254. self.client = LocalLLM.RPCProxy("127.0.0.1", 7860)
  255. def chat(self, system, history, gen_conf):
  256. if system:
  257. history.insert(0, {"role": "system", "content": system})
  258. try:
  259. ans = self.client.chat(
  260. history,
  261. gen_conf
  262. )
  263. return ans, num_tokens_from_string(ans)
  264. except Exception as e:
  265. return "**ERROR**: " + str(e), 0
  266. def chat_streamly(self, system, history, gen_conf):
  267. if system:
  268. history.insert(0, {"role": "system", "content": system})
  269. token_count = 0
  270. answer = ""
  271. try:
  272. for ans in self.client.chat_streamly(history, gen_conf):
  273. answer += ans
  274. token_count += 1
  275. yield answer
  276. except Exception as e:
  277. yield answer + "\n**ERROR**: " + str(e)
  278. yield token_count
  279. class VolcEngineChat(Base):
  280. def __init__(self, key, model_name, base_url):
  281. """
  282. Since do not want to modify the original database fields, and the VolcEngine authentication method is quite special,
  283. Assemble ak, sk, ep_id into api_key, store it as a dictionary type, and parse it for use
  284. model_name is for display only
  285. """
  286. self.client = MaasService('maas-api.ml-platform-cn-beijing.volces.com', 'cn-beijing')
  287. self.volc_ak = eval(key).get('volc_ak', '')
  288. self.volc_sk = eval(key).get('volc_sk', '')
  289. self.client.set_ak(self.volc_ak)
  290. self.client.set_sk(self.volc_sk)
  291. self.model_name = eval(key).get('ep_id', '')
  292. def chat(self, system, history, gen_conf):
  293. if system:
  294. history.insert(0, {"role": "system", "content": system})
  295. try:
  296. req = {
  297. "parameters": {
  298. "min_new_tokens": gen_conf.get("min_new_tokens", 1),
  299. "top_k": gen_conf.get("top_k", 0),
  300. "max_prompt_tokens": gen_conf.get("max_prompt_tokens", 30000),
  301. "temperature": gen_conf.get("temperature", 0.1),
  302. "max_new_tokens": gen_conf.get("max_tokens", 1000),
  303. "top_p": gen_conf.get("top_p", 0.3),
  304. },
  305. "messages": history
  306. }
  307. response = self.client.chat(self.model_name, req)
  308. ans = response.choices[0].message.content.strip()
  309. if response.choices[0].finish_reason == "length":
  310. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  311. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  312. return ans, response.usage.total_tokens
  313. except Exception as e:
  314. return "**ERROR**: " + str(e), 0
  315. def chat_streamly(self, system, history, gen_conf):
  316. if system:
  317. history.insert(0, {"role": "system", "content": system})
  318. ans = ""
  319. try:
  320. req = {
  321. "parameters": {
  322. "min_new_tokens": gen_conf.get("min_new_tokens", 1),
  323. "top_k": gen_conf.get("top_k", 0),
  324. "max_prompt_tokens": gen_conf.get("max_prompt_tokens", 30000),
  325. "temperature": gen_conf.get("temperature", 0.1),
  326. "max_new_tokens": gen_conf.get("max_tokens", 1000),
  327. "top_p": gen_conf.get("top_p", 0.3),
  328. },
  329. "messages": history
  330. }
  331. stream = self.client.stream_chat(self.model_name, req)
  332. for resp in stream:
  333. if not resp.choices[0].message.content:
  334. continue
  335. ans += resp.choices[0].message.content
  336. if resp.choices[0].finish_reason == "stop":
  337. tk_count = resp.usage.total_tokens
  338. yield ans
  339. except Exception as e:
  340. yield ans + "\n**ERROR**: " + str(e)
  341. yield tk_count