Nelze vybrat více než 25 témat Téma musí začínat písmenem nebo číslem, může obsahovat pomlčky („-“) a může být dlouhé až 35 znaků.

chat_model.py 5.9KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. from zhipuai import ZhipuAI
  17. from dashscope import Generation
  18. from abc import ABC
  19. from openai import OpenAI
  20. import openai
  21. from ollama import Client
  22. from rag.nlp import is_english
  23. from rag.utils import num_tokens_from_string
  24. class Base(ABC):
  25. def __init__(self, key, model_name, base_url):
  26. self.client = OpenAI(api_key=key, base_url=base_url)
  27. self.model_name = model_name
  28. def chat(self, system, history, gen_conf):
  29. if system:
  30. history.insert(0, {"role": "system", "content": system})
  31. try:
  32. response = self.client.chat.completions.create(
  33. model=self.model_name,
  34. messages=history,
  35. **gen_conf)
  36. ans = response.choices[0].message.content.strip()
  37. if response.choices[0].finish_reason == "length":
  38. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  39. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  40. return ans, response.usage.total_tokens
  41. except openai.APIError as e:
  42. return "**ERROR**: " + str(e), 0
  43. class GptTurbo(Base):
  44. def __init__(self, key, model_name="gpt-3.5-turbo", base_url="https://api.openai.com/v1"):
  45. if not base_url: base_url="https://api.openai.com/v1"
  46. super().__init__(key, model_name, base_url)
  47. class MoonshotChat(Base):
  48. def __init__(self, key, model_name="moonshot-v1-8k", base_url="https://api.moonshot.cn/v1"):
  49. if not base_url: base_url="https://api.moonshot.cn/v1"
  50. super().__init__(key, model_name, base_url)
  51. class XinferenceChat(Base):
  52. def __init__(self, key=None, model_name="", base_url=""):
  53. key = "xxx"
  54. super().__init__(key, model_name, base_url)
  55. class DeepSeekChat(Base):
  56. def __init__(self, key, model_name="deepseek-chat", base_url="https://api.deepseek.com/v1"):
  57. if not base_url: base_url="https://api.deepseek.com/v1"
  58. super().__init__(key, model_name, base_url)
  59. class QWenChat(Base):
  60. def __init__(self, key, model_name=Generation.Models.qwen_turbo, **kwargs):
  61. import dashscope
  62. dashscope.api_key = key
  63. self.model_name = model_name
  64. def chat(self, system, history, gen_conf):
  65. from http import HTTPStatus
  66. if system:
  67. history.insert(0, {"role": "system", "content": system})
  68. response = Generation.call(
  69. self.model_name,
  70. messages=history,
  71. result_format='message',
  72. **gen_conf
  73. )
  74. ans = ""
  75. tk_count = 0
  76. if response.status_code == HTTPStatus.OK:
  77. ans += response.output.choices[0]['message']['content']
  78. tk_count += response.usage.total_tokens
  79. if response.output.choices[0].get("finish_reason", "") == "length":
  80. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  81. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  82. return ans, tk_count
  83. return "**ERROR**: " + response.message, tk_count
  84. class ZhipuChat(Base):
  85. def __init__(self, key, model_name="glm-3-turbo", **kwargs):
  86. self.client = ZhipuAI(api_key=key)
  87. self.model_name = model_name
  88. def chat(self, system, history, gen_conf):
  89. if system:
  90. history.insert(0, {"role": "system", "content": system})
  91. try:
  92. if "presence_penalty" in gen_conf: del gen_conf["presence_penalty"]
  93. if "frequency_penalty" in gen_conf: del gen_conf["frequency_penalty"]
  94. response = self.client.chat.completions.create(
  95. model=self.model_name,
  96. messages=history,
  97. **gen_conf
  98. )
  99. ans = response.choices[0].message.content.strip()
  100. if response.choices[0].finish_reason == "length":
  101. ans += "...\nFor the content length reason, it stopped, continue?" if is_english(
  102. [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  103. return ans, response.usage.total_tokens
  104. except Exception as e:
  105. return "**ERROR**: " + str(e), 0
  106. class OllamaChat(Base):
  107. def __init__(self, key, model_name, **kwargs):
  108. self.client = Client(host=kwargs["base_url"])
  109. self.model_name = model_name
  110. def chat(self, system, history, gen_conf):
  111. if system:
  112. history.insert(0, {"role": "system", "content": system})
  113. try:
  114. options = {}
  115. if "temperature" in gen_conf: options["temperature"] = gen_conf["temperature"]
  116. if "max_tokens" in gen_conf: options["num_predict"] = gen_conf["max_tokens"]
  117. if "top_p" in gen_conf: options["top_k"] = gen_conf["top_p"]
  118. if "presence_penalty" in gen_conf: options["presence_penalty"] = gen_conf["presence_penalty"]
  119. if "frequency_penalty" in gen_conf: options["frequency_penalty"] = gen_conf["frequency_penalty"]
  120. response = self.client.chat(
  121. model=self.model_name,
  122. messages=history,
  123. options=options
  124. )
  125. ans = response["message"]["content"].strip()
  126. return ans, response["eval_count"] + response.get("prompt_eval_count", 0)
  127. except Exception as e:
  128. return "**ERROR**: " + str(e), 0