Du kan inte välja fler än 25 ämnen Ämnen måste starta med en bokstav eller siffra, kan innehålla bindestreck ('-') och vara max 35 tecken långa.

chat_model.py 11KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. from zhipuai import ZhipuAI
  17. from dashscope import Generation
  18. from abc import ABC
  19. from openai import OpenAI
  20. import openai
  21. from ollama import Client
  22. from rag.nlp import is_english
  23. class Base(ABC):
  24. def __init__(self, key, model_name, base_url):
  25. self.client = OpenAI(api_key=key, base_url=base_url)
  26. self.model_name = model_name
  27. def chat(self, system, history, gen_conf):
  28. if system:
  29. history.insert(0, {"role": "system", "content": system})
  30. try:
  31. response = self.client.chat.completions.create(
  32. model=self.model_name,
  33. messages=history,
  34. **gen_conf)
  35. ans = response.choices[0].message.content.strip()
  36. if response.choices[0].finish_reason == "length":
  37. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  38. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  39. return ans, response.usage.total_tokens
  40. except openai.APIError as e:
  41. return "**ERROR**: " + str(e), 0
  42. def chat_streamly(self, system, history, gen_conf):
  43. if system:
  44. history.insert(0, {"role": "system", "content": system})
  45. ans = ""
  46. total_tokens = 0
  47. try:
  48. response = self.client.chat.completions.create(
  49. model=self.model_name,
  50. messages=history,
  51. stream=True,
  52. **gen_conf)
  53. for resp in response:
  54. if not resp.choices[0].delta.content:continue
  55. ans += resp.choices[0].delta.content
  56. total_tokens += 1
  57. if resp.choices[0].finish_reason == "length":
  58. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  59. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  60. yield ans
  61. except openai.APIError as e:
  62. yield ans + "\n**ERROR**: " + str(e)
  63. yield total_tokens
  64. class GptTurbo(Base):
  65. def __init__(self, key, model_name="gpt-3.5-turbo", base_url="https://api.openai.com/v1"):
  66. if not base_url: base_url="https://api.openai.com/v1"
  67. super().__init__(key, model_name, base_url)
  68. class MoonshotChat(Base):
  69. def __init__(self, key, model_name="moonshot-v1-8k", base_url="https://api.moonshot.cn/v1"):
  70. if not base_url: base_url="https://api.moonshot.cn/v1"
  71. super().__init__(key, model_name, base_url)
  72. class XinferenceChat(Base):
  73. def __init__(self, key=None, model_name="", base_url=""):
  74. key = "xxx"
  75. super().__init__(key, model_name, base_url)
  76. class DeepSeekChat(Base):
  77. def __init__(self, key, model_name="deepseek-chat", base_url="https://api.deepseek.com/v1"):
  78. if not base_url: base_url="https://api.deepseek.com/v1"
  79. super().__init__(key, model_name, base_url)
  80. class QWenChat(Base):
  81. def __init__(self, key, model_name=Generation.Models.qwen_turbo, **kwargs):
  82. import dashscope
  83. dashscope.api_key = key
  84. self.model_name = model_name
  85. def chat(self, system, history, gen_conf):
  86. from http import HTTPStatus
  87. if system:
  88. history.insert(0, {"role": "system", "content": system})
  89. response = Generation.call(
  90. self.model_name,
  91. messages=history,
  92. result_format='message',
  93. **gen_conf
  94. )
  95. ans = ""
  96. tk_count = 0
  97. if response.status_code == HTTPStatus.OK:
  98. ans += response.output.choices[0]['message']['content']
  99. tk_count += response.usage.total_tokens
  100. if response.output.choices[0].get("finish_reason", "") == "length":
  101. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  102. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  103. return ans, tk_count
  104. return "**ERROR**: " + response.message, tk_count
  105. def chat_streamly(self, system, history, gen_conf):
  106. from http import HTTPStatus
  107. if system:
  108. history.insert(0, {"role": "system", "content": system})
  109. ans = ""
  110. try:
  111. response = Generation.call(
  112. self.model_name,
  113. messages=history,
  114. result_format='message',
  115. stream=True,
  116. **gen_conf
  117. )
  118. tk_count = 0
  119. for resp in response:
  120. if resp.status_code == HTTPStatus.OK:
  121. ans = resp.output.choices[0]['message']['content']
  122. tk_count = resp.usage.total_tokens
  123. if resp.output.choices[0].get("finish_reason", "") == "length":
  124. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  125. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  126. yield ans
  127. else:
  128. yield ans + "\n**ERROR**: " + resp.message if str(resp.message).find("Access")<0 else "Out of credit. Please set the API key in **settings > Model providers.**"
  129. except Exception as e:
  130. yield ans + "\n**ERROR**: " + str(e)
  131. yield tk_count
  132. class ZhipuChat(Base):
  133. def __init__(self, key, model_name="glm-3-turbo", **kwargs):
  134. self.client = ZhipuAI(api_key=key)
  135. self.model_name = model_name
  136. def chat(self, system, history, gen_conf):
  137. if system:
  138. history.insert(0, {"role": "system", "content": system})
  139. try:
  140. if "presence_penalty" in gen_conf: del gen_conf["presence_penalty"]
  141. if "frequency_penalty" in gen_conf: del gen_conf["frequency_penalty"]
  142. response = self.client.chat.completions.create(
  143. model=self.model_name,
  144. messages=history,
  145. **gen_conf
  146. )
  147. ans = response.choices[0].message.content.strip()
  148. if response.choices[0].finish_reason == "length":
  149. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  150. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  151. return ans, response.usage.total_tokens
  152. except Exception as e:
  153. return "**ERROR**: " + str(e), 0
  154. def chat_streamly(self, system, history, gen_conf):
  155. if system:
  156. history.insert(0, {"role": "system", "content": system})
  157. if "presence_penalty" in gen_conf: del gen_conf["presence_penalty"]
  158. if "frequency_penalty" in gen_conf: del gen_conf["frequency_penalty"]
  159. ans = ""
  160. try:
  161. response = self.client.chat.completions.create(
  162. model=self.model_name,
  163. messages=history,
  164. stream=True,
  165. **gen_conf
  166. )
  167. tk_count = 0
  168. for resp in response:
  169. if not resp.choices[0].delta.content:continue
  170. delta = resp.choices[0].delta.content
  171. ans += delta
  172. if resp.choices[0].finish_reason == "length":
  173. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  174. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  175. tk_count = resp.usage.total_tokens
  176. if resp.choices[0].finish_reason == "stop": tk_count = resp.usage.total_tokens
  177. yield ans
  178. except Exception as e:
  179. yield ans + "\n**ERROR**: " + str(e)
  180. yield tk_count
  181. class OllamaChat(Base):
  182. def __init__(self, key, model_name, **kwargs):
  183. self.client = Client(host=kwargs["base_url"])
  184. self.model_name = model_name
  185. def chat(self, system, history, gen_conf):
  186. if system:
  187. history.insert(0, {"role": "system", "content": system})
  188. try:
  189. options = {}
  190. if "temperature" in gen_conf: options["temperature"] = gen_conf["temperature"]
  191. if "max_tokens" in gen_conf: options["num_predict"] = gen_conf["max_tokens"]
  192. if "top_p" in gen_conf: options["top_k"] = gen_conf["top_p"]
  193. if "presence_penalty" in gen_conf: options["presence_penalty"] = gen_conf["presence_penalty"]
  194. if "frequency_penalty" in gen_conf: options["frequency_penalty"] = gen_conf["frequency_penalty"]
  195. response = self.client.chat(
  196. model=self.model_name,
  197. messages=history,
  198. options=options
  199. )
  200. ans = response["message"]["content"].strip()
  201. return ans, response["eval_count"] + response.get("prompt_eval_count", 0)
  202. except Exception as e:
  203. return "**ERROR**: " + str(e), 0
  204. def chat_streamly(self, system, history, gen_conf):
  205. if system:
  206. history.insert(0, {"role": "system", "content": system})
  207. options = {}
  208. if "temperature" in gen_conf: options["temperature"] = gen_conf["temperature"]
  209. if "max_tokens" in gen_conf: options["num_predict"] = gen_conf["max_tokens"]
  210. if "top_p" in gen_conf: options["top_k"] = gen_conf["top_p"]
  211. if "presence_penalty" in gen_conf: options["presence_penalty"] = gen_conf["presence_penalty"]
  212. if "frequency_penalty" in gen_conf: options["frequency_penalty"] = gen_conf["frequency_penalty"]
  213. ans = ""
  214. try:
  215. response = self.client.chat(
  216. model=self.model_name,
  217. messages=history,
  218. stream=True,
  219. options=options
  220. )
  221. for resp in response:
  222. if resp["done"]:
  223. yield resp.get("prompt_eval_count", 0) + resp.get("eval_count", 0)
  224. ans += resp["message"]["content"]
  225. yield ans
  226. except Exception as e:
  227. yield ans + "\n**ERROR**: " + str(e)
  228. yield 0