You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import base64
  17. import io
  18. import json
  19. import os
  20. from abc import ABC
  21. from io import BytesIO
  22. from urllib.parse import urljoin
  23. import requests
  24. from ollama import Client
  25. from openai import OpenAI
  26. from openai.lib.azure import AzureOpenAI
  27. from PIL import Image
  28. from zhipuai import ZhipuAI
  29. from api.utils import get_uuid
  30. from api.utils.file_utils import get_project_base_directory
  31. from rag.nlp import is_english
  32. from rag.prompts import vision_llm_describe_prompt
  33. from rag.utils import num_tokens_from_string
  34. class Base(ABC):
  35. def __init__(self, key, model_name):
  36. pass
  37. def describe(self, image):
  38. raise NotImplementedError("Please implement encode method!")
  39. def describe_with_prompt(self, image, prompt=None):
  40. raise NotImplementedError("Please implement encode method!")
  41. def chat(self, system, history, gen_conf, image=""):
  42. if system:
  43. history[-1]["content"] = system + history[-1]["content"] + "user query: " + history[-1]["content"]
  44. try:
  45. for his in history:
  46. if his["role"] == "user":
  47. his["content"] = self.chat_prompt(his["content"], image)
  48. response = self.client.chat.completions.create(
  49. model=self.model_name,
  50. messages=history,
  51. temperature=gen_conf.get("temperature", 0.3),
  52. top_p=gen_conf.get("top_p", 0.7),
  53. )
  54. return response.choices[0].message.content.strip(), response.usage.total_tokens
  55. except Exception as e:
  56. return "**ERROR**: " + str(e), 0
  57. def chat_streamly(self, system, history, gen_conf, image=""):
  58. if system:
  59. history[-1]["content"] = system + history[-1]["content"] + "user query: " + history[-1]["content"]
  60. ans = ""
  61. tk_count = 0
  62. try:
  63. for his in history:
  64. if his["role"] == "user":
  65. his["content"] = self.chat_prompt(his["content"], image)
  66. response = self.client.chat.completions.create(
  67. model=self.model_name,
  68. messages=history,
  69. temperature=gen_conf.get("temperature", 0.3),
  70. top_p=gen_conf.get("top_p", 0.7),
  71. stream=True,
  72. )
  73. for resp in response:
  74. if not resp.choices[0].delta.content:
  75. continue
  76. delta = resp.choices[0].delta.content
  77. ans += delta
  78. if resp.choices[0].finish_reason == "length":
  79. ans += "...\nFor the content length reason, it stopped, continue?" if is_english([ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  80. tk_count = resp.usage.total_tokens
  81. if resp.choices[0].finish_reason == "stop":
  82. tk_count = resp.usage.total_tokens
  83. yield ans
  84. except Exception as e:
  85. yield ans + "\n**ERROR**: " + str(e)
  86. yield tk_count
  87. def image2base64(self, image):
  88. if isinstance(image, bytes):
  89. return base64.b64encode(image).decode("utf-8")
  90. if isinstance(image, BytesIO):
  91. return base64.b64encode(image.getvalue()).decode("utf-8")
  92. buffered = BytesIO()
  93. try:
  94. image.save(buffered, format="JPEG")
  95. except Exception:
  96. image.save(buffered, format="PNG")
  97. return base64.b64encode(buffered.getvalue()).decode("utf-8")
  98. def prompt(self, b64):
  99. return [
  100. {
  101. "role": "user",
  102. "content": [
  103. {
  104. "type": "image_url",
  105. "image_url": {"url": f"data:image/jpeg;base64,{b64}"},
  106. },
  107. {
  108. "text": "请用中文详细描述一下图中的内容,比如时间,地点,人物,事情,人物心情等,如果有数据请提取出数据。"
  109. if self.lang.lower() == "chinese"
  110. else "Please describe the content of this picture, like where, when, who, what happen. If it has number data, please extract them out.",
  111. },
  112. ],
  113. }
  114. ]
  115. def vision_llm_prompt(self, b64, prompt=None):
  116. return [
  117. {
  118. "role": "user",
  119. "content": [
  120. {
  121. "type": "image_url",
  122. "image_url": {"url": f"data:image/jpeg;base64,{b64}"},
  123. },
  124. {
  125. "type": "text",
  126. "text": prompt if prompt else vision_llm_describe_prompt(),
  127. },
  128. ],
  129. }
  130. ]
  131. def chat_prompt(self, text, b64):
  132. return [
  133. {
  134. "type": "image_url",
  135. "image_url": {
  136. "url": f"data:image/jpeg;base64,{b64}",
  137. },
  138. },
  139. {"type": "text", "text": text},
  140. ]
  141. class GptV4(Base):
  142. _FACTORY_NAME = "OpenAI"
  143. def __init__(self, key, model_name="gpt-4-vision-preview", lang="Chinese", base_url="https://api.openai.com/v1"):
  144. if not base_url:
  145. base_url = "https://api.openai.com/v1"
  146. self.client = OpenAI(api_key=key, base_url=base_url)
  147. self.model_name = model_name
  148. self.lang = lang
  149. def describe(self, image):
  150. b64 = self.image2base64(image)
  151. prompt = self.prompt(b64)
  152. for i in range(len(prompt)):
  153. for c in prompt[i]["content"]:
  154. if "text" in c:
  155. c["type"] = "text"
  156. res = self.client.chat.completions.create(
  157. model=self.model_name,
  158. messages=prompt,
  159. )
  160. return res.choices[0].message.content.strip(), res.usage.total_tokens
  161. def describe_with_prompt(self, image, prompt=None):
  162. b64 = self.image2base64(image)
  163. vision_prompt = self.vision_llm_prompt(b64, prompt) if prompt else self.vision_llm_prompt(b64)
  164. res = self.client.chat.completions.create(
  165. model=self.model_name,
  166. messages=vision_prompt,
  167. )
  168. return res.choices[0].message.content.strip(), res.usage.total_tokens
  169. class AzureGptV4(Base):
  170. _FACTORY_NAME = "Azure-OpenAI"
  171. def __init__(self, key, model_name, lang="Chinese", **kwargs):
  172. api_key = json.loads(key).get("api_key", "")
  173. api_version = json.loads(key).get("api_version", "2024-02-01")
  174. self.client = AzureOpenAI(api_key=api_key, azure_endpoint=kwargs["base_url"], api_version=api_version)
  175. self.model_name = model_name
  176. self.lang = lang
  177. def describe(self, image):
  178. b64 = self.image2base64(image)
  179. prompt = self.prompt(b64)
  180. for i in range(len(prompt)):
  181. for c in prompt[i]["content"]:
  182. if "text" in c:
  183. c["type"] = "text"
  184. res = self.client.chat.completions.create(model=self.model_name, messages=prompt)
  185. return res.choices[0].message.content.strip(), res.usage.total_tokens
  186. def describe_with_prompt(self, image, prompt=None):
  187. b64 = self.image2base64(image)
  188. vision_prompt = self.vision_llm_prompt(b64, prompt) if prompt else self.vision_llm_prompt(b64)
  189. res = self.client.chat.completions.create(
  190. model=self.model_name,
  191. messages=vision_prompt,
  192. )
  193. return res.choices[0].message.content.strip(), res.usage.total_tokens
  194. class xAICV(Base):
  195. _FACTORY_NAME = "xAI"
  196. def __init__(self, key, model_name="grok-3", base_url=None, **kwargs):
  197. if not base_url:
  198. base_url = "https://api.x.ai/v1"
  199. super().__init__(key, model_name, base_url=base_url, **kwargs)
  200. return
  201. class QWenCV(Base):
  202. _FACTORY_NAME = "Tongyi-Qianwen"
  203. def __init__(self, key, model_name="qwen-vl-chat-v1", lang="Chinese", **kwargs):
  204. import dashscope
  205. dashscope.api_key = key
  206. self.model_name = model_name
  207. self.lang = lang
  208. def prompt(self, binary):
  209. # stupid as hell
  210. tmp_dir = get_project_base_directory("tmp")
  211. if not os.path.exists(tmp_dir):
  212. os.makedirs(tmp_dir, exist_ok=True)
  213. path = os.path.join(tmp_dir, "%s.jpg" % get_uuid())
  214. Image.open(io.BytesIO(binary)).save(path)
  215. return [
  216. {
  217. "role": "user",
  218. "content": [
  219. {"image": f"file://{path}"},
  220. {
  221. "text": "请用中文详细描述一下图中的内容,比如时间,地点,人物,事情,人物心情等,如果有数据请提取出数据。"
  222. if self.lang.lower() == "chinese"
  223. else "Please describe the content of this picture, like where, when, who, what happen. If it has number data, please extract them out.",
  224. },
  225. ],
  226. }
  227. ]
  228. def vision_llm_prompt(self, binary, prompt=None):
  229. # stupid as hell
  230. tmp_dir = get_project_base_directory("tmp")
  231. if not os.path.exists(tmp_dir):
  232. os.makedirs(tmp_dir, exist_ok=True)
  233. path = os.path.join(tmp_dir, "%s.jpg" % get_uuid())
  234. Image.open(io.BytesIO(binary)).save(path)
  235. return [
  236. {
  237. "role": "user",
  238. "content": [
  239. {"image": f"file://{path}"},
  240. {
  241. "text": prompt if prompt else vision_llm_describe_prompt(),
  242. },
  243. ],
  244. }
  245. ]
  246. def chat_prompt(self, text, b64):
  247. return [
  248. {"image": f"{b64}"},
  249. {"text": text},
  250. ]
  251. def describe(self, image):
  252. from http import HTTPStatus
  253. from dashscope import MultiModalConversation
  254. response = MultiModalConversation.call(model=self.model_name, messages=self.prompt(image))
  255. if response.status_code == HTTPStatus.OK:
  256. return response.output.choices[0]["message"]["content"][0]["text"], response.usage.output_tokens
  257. return response.message, 0
  258. def describe_with_prompt(self, image, prompt=None):
  259. from http import HTTPStatus
  260. from dashscope import MultiModalConversation
  261. vision_prompt = self.vision_llm_prompt(image, prompt) if prompt else self.vision_llm_prompt(image)
  262. response = MultiModalConversation.call(model=self.model_name, messages=vision_prompt)
  263. if response.status_code == HTTPStatus.OK:
  264. return response.output.choices[0]["message"]["content"][0]["text"], response.usage.output_tokens
  265. return response.message, 0
  266. def chat(self, system, history, gen_conf, image=""):
  267. from http import HTTPStatus
  268. from dashscope import MultiModalConversation
  269. if system:
  270. history[-1]["content"] = system + history[-1]["content"] + "user query: " + history[-1]["content"]
  271. for his in history:
  272. if his["role"] == "user":
  273. his["content"] = self.chat_prompt(his["content"], image)
  274. response = MultiModalConversation.call(
  275. model=self.model_name,
  276. messages=history,
  277. temperature=gen_conf.get("temperature", 0.3),
  278. top_p=gen_conf.get("top_p", 0.7),
  279. )
  280. ans = ""
  281. tk_count = 0
  282. if response.status_code == HTTPStatus.OK:
  283. ans = response.output.choices[0]["message"]["content"]
  284. if isinstance(ans, list):
  285. ans = ans[0]["text"] if ans else ""
  286. tk_count += response.usage.total_tokens
  287. if response.output.choices[0].get("finish_reason", "") == "length":
  288. ans += "...\nFor the content length reason, it stopped, continue?" if is_english([ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  289. return ans, tk_count
  290. return "**ERROR**: " + response.message, tk_count
  291. def chat_streamly(self, system, history, gen_conf, image=""):
  292. from http import HTTPStatus
  293. from dashscope import MultiModalConversation
  294. if system:
  295. history[-1]["content"] = system + history[-1]["content"] + "user query: " + history[-1]["content"]
  296. for his in history:
  297. if his["role"] == "user":
  298. his["content"] = self.chat_prompt(his["content"], image)
  299. ans = ""
  300. tk_count = 0
  301. try:
  302. response = MultiModalConversation.call(
  303. model=self.model_name,
  304. messages=history,
  305. temperature=gen_conf.get("temperature", 0.3),
  306. top_p=gen_conf.get("top_p", 0.7),
  307. stream=True,
  308. )
  309. for resp in response:
  310. if resp.status_code == HTTPStatus.OK:
  311. cnt = resp.output.choices[0]["message"]["content"]
  312. if isinstance(cnt, list):
  313. cnt = cnt[0]["text"] if ans else ""
  314. ans += cnt
  315. tk_count = resp.usage.total_tokens
  316. if resp.output.choices[0].get("finish_reason", "") == "length":
  317. ans += "...\nFor the content length reason, it stopped, continue?" if is_english([ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  318. yield ans
  319. else:
  320. yield ans + "\n**ERROR**: " + resp.message if str(resp.message).find("Access") < 0 else "Out of credit. Please set the API key in **settings > Model providers.**"
  321. except Exception as e:
  322. yield ans + "\n**ERROR**: " + str(e)
  323. yield tk_count
  324. class Zhipu4V(Base):
  325. _FACTORY_NAME = "ZHIPU-AI"
  326. def __init__(self, key, model_name="glm-4v", lang="Chinese", **kwargs):
  327. self.client = ZhipuAI(api_key=key)
  328. self.model_name = model_name
  329. self.lang = lang
  330. def describe(self, image):
  331. b64 = self.image2base64(image)
  332. prompt = self.prompt(b64)
  333. prompt[0]["content"][1]["type"] = "text"
  334. res = self.client.chat.completions.create(
  335. model=self.model_name,
  336. messages=prompt,
  337. )
  338. return res.choices[0].message.content.strip(), res.usage.total_tokens
  339. def describe_with_prompt(self, image, prompt=None):
  340. b64 = self.image2base64(image)
  341. vision_prompt = self.vision_llm_prompt(b64, prompt) if prompt else self.vision_llm_prompt(b64)
  342. res = self.client.chat.completions.create(model=self.model_name, messages=vision_prompt)
  343. return res.choices[0].message.content.strip(), res.usage.total_tokens
  344. def chat(self, system, history, gen_conf, image=""):
  345. if system:
  346. history[-1]["content"] = system + history[-1]["content"] + "user query: " + history[-1]["content"]
  347. try:
  348. for his in history:
  349. if his["role"] == "user":
  350. his["content"] = self.chat_prompt(his["content"], image)
  351. response = self.client.chat.completions.create(
  352. model=self.model_name,
  353. messages=history,
  354. temperature=gen_conf.get("temperature", 0.3),
  355. top_p=gen_conf.get("top_p", 0.7),
  356. )
  357. return response.choices[0].message.content.strip(), response.usage.total_tokens
  358. except Exception as e:
  359. return "**ERROR**: " + str(e), 0
  360. def chat_streamly(self, system, history, gen_conf, image=""):
  361. if system:
  362. history[-1]["content"] = system + history[-1]["content"] + "user query: " + history[-1]["content"]
  363. ans = ""
  364. tk_count = 0
  365. try:
  366. for his in history:
  367. if his["role"] == "user":
  368. his["content"] = self.chat_prompt(his["content"], image)
  369. response = self.client.chat.completions.create(
  370. model=self.model_name,
  371. messages=history,
  372. temperature=gen_conf.get("temperature", 0.3),
  373. top_p=gen_conf.get("top_p", 0.7),
  374. stream=True,
  375. )
  376. for resp in response:
  377. if not resp.choices[0].delta.content:
  378. continue
  379. delta = resp.choices[0].delta.content
  380. ans += delta
  381. if resp.choices[0].finish_reason == "length":
  382. ans += "...\nFor the content length reason, it stopped, continue?" if is_english([ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  383. tk_count = resp.usage.total_tokens
  384. if resp.choices[0].finish_reason == "stop":
  385. tk_count = resp.usage.total_tokens
  386. yield ans
  387. except Exception as e:
  388. yield ans + "\n**ERROR**: " + str(e)
  389. yield tk_count
  390. class OllamaCV(Base):
  391. _FACTORY_NAME = "Ollama"
  392. def __init__(self, key, model_name, lang="Chinese", **kwargs):
  393. self.client = Client(host=kwargs["base_url"])
  394. self.model_name = model_name
  395. self.lang = lang
  396. def describe(self, image):
  397. prompt = self.prompt("")
  398. try:
  399. response = self.client.generate(
  400. model=self.model_name,
  401. prompt=prompt[0]["content"][1]["text"],
  402. images=[image],
  403. )
  404. ans = response["response"].strip()
  405. return ans, 128
  406. except Exception as e:
  407. return "**ERROR**: " + str(e), 0
  408. def describe_with_prompt(self, image, prompt=None):
  409. vision_prompt = self.vision_llm_prompt("", prompt) if prompt else self.vision_llm_prompt("")
  410. try:
  411. response = self.client.generate(
  412. model=self.model_name,
  413. prompt=vision_prompt[0]["content"][1]["text"],
  414. images=[image],
  415. )
  416. ans = response["response"].strip()
  417. return ans, 128
  418. except Exception as e:
  419. return "**ERROR**: " + str(e), 0
  420. def chat(self, system, history, gen_conf, image=""):
  421. if system:
  422. history[-1]["content"] = system + history[-1]["content"] + "user query: " + history[-1]["content"]
  423. try:
  424. for his in history:
  425. if his["role"] == "user":
  426. his["images"] = [image]
  427. options = {}
  428. if "temperature" in gen_conf:
  429. options["temperature"] = gen_conf["temperature"]
  430. if "top_p" in gen_conf:
  431. options["top_k"] = gen_conf["top_p"]
  432. if "presence_penalty" in gen_conf:
  433. options["presence_penalty"] = gen_conf["presence_penalty"]
  434. if "frequency_penalty" in gen_conf:
  435. options["frequency_penalty"] = gen_conf["frequency_penalty"]
  436. response = self.client.chat(
  437. model=self.model_name,
  438. messages=history,
  439. options=options,
  440. keep_alive=-1,
  441. )
  442. ans = response["message"]["content"].strip()
  443. return ans, response["eval_count"] + response.get("prompt_eval_count", 0)
  444. except Exception as e:
  445. return "**ERROR**: " + str(e), 0
  446. def chat_streamly(self, system, history, gen_conf, image=""):
  447. if system:
  448. history[-1]["content"] = system + history[-1]["content"] + "user query: " + history[-1]["content"]
  449. for his in history:
  450. if his["role"] == "user":
  451. his["images"] = [image]
  452. options = {}
  453. if "temperature" in gen_conf:
  454. options["temperature"] = gen_conf["temperature"]
  455. if "top_p" in gen_conf:
  456. options["top_k"] = gen_conf["top_p"]
  457. if "presence_penalty" in gen_conf:
  458. options["presence_penalty"] = gen_conf["presence_penalty"]
  459. if "frequency_penalty" in gen_conf:
  460. options["frequency_penalty"] = gen_conf["frequency_penalty"]
  461. ans = ""
  462. try:
  463. response = self.client.chat(
  464. model=self.model_name,
  465. messages=history,
  466. stream=True,
  467. options=options,
  468. keep_alive=-1,
  469. )
  470. for resp in response:
  471. if resp["done"]:
  472. yield resp.get("prompt_eval_count", 0) + resp.get("eval_count", 0)
  473. ans += resp["message"]["content"]
  474. yield ans
  475. except Exception as e:
  476. yield ans + "\n**ERROR**: " + str(e)
  477. yield 0
  478. class LocalAICV(GptV4):
  479. _FACTORY_NAME = "LocalAI"
  480. def __init__(self, key, model_name, base_url, lang="Chinese"):
  481. if not base_url:
  482. raise ValueError("Local cv model url cannot be None")
  483. base_url = urljoin(base_url, "v1")
  484. self.client = OpenAI(api_key="empty", base_url=base_url)
  485. self.model_name = model_name.split("___")[0]
  486. self.lang = lang
  487. class XinferenceCV(Base):
  488. _FACTORY_NAME = "Xinference"
  489. def __init__(self, key, model_name="", lang="Chinese", base_url=""):
  490. base_url = urljoin(base_url, "v1")
  491. self.client = OpenAI(api_key=key, base_url=base_url)
  492. self.model_name = model_name
  493. self.lang = lang
  494. def describe(self, image):
  495. b64 = self.image2base64(image)
  496. res = self.client.chat.completions.create(model=self.model_name, messages=self.prompt(b64))
  497. return res.choices[0].message.content.strip(), res.usage.total_tokens
  498. def describe_with_prompt(self, image, prompt=None):
  499. b64 = self.image2base64(image)
  500. vision_prompt = self.vision_llm_prompt(b64, prompt) if prompt else self.vision_llm_prompt(b64)
  501. res = self.client.chat.completions.create(
  502. model=self.model_name,
  503. messages=vision_prompt,
  504. )
  505. return res.choices[0].message.content.strip(), res.usage.total_tokens
  506. class GeminiCV(Base):
  507. _FACTORY_NAME = "Gemini"
  508. def __init__(self, key, model_name="gemini-1.0-pro-vision-latest", lang="Chinese", **kwargs):
  509. from google.generativeai import GenerativeModel, client
  510. client.configure(api_key=key)
  511. _client = client.get_default_generative_client()
  512. self.model_name = model_name
  513. self.model = GenerativeModel(model_name=self.model_name)
  514. self.model._client = _client
  515. self.lang = lang
  516. def describe(self, image):
  517. from PIL.Image import open
  518. prompt = (
  519. "请用中文详细描述一下图中的内容,比如时间,地点,人物,事情,人物心情等,如果有数据请提取出数据。"
  520. if self.lang.lower() == "chinese"
  521. else "Please describe the content of this picture, like where, when, who, what happen. If it has number data, please extract them out."
  522. )
  523. b64 = self.image2base64(image)
  524. img = open(BytesIO(base64.b64decode(b64)))
  525. input = [prompt, img]
  526. res = self.model.generate_content(input)
  527. return res.text, res.usage_metadata.total_token_count
  528. def describe_with_prompt(self, image, prompt=None):
  529. from PIL.Image import open
  530. b64 = self.image2base64(image)
  531. vision_prompt = self.vision_llm_prompt(b64, prompt) if prompt else self.vision_llm_prompt(b64)
  532. img = open(BytesIO(base64.b64decode(b64)))
  533. input = [vision_prompt, img]
  534. res = self.model.generate_content(
  535. input,
  536. )
  537. return res.text, res.usage_metadata.total_token_count
  538. def chat(self, system, history, gen_conf, image=""):
  539. from transformers import GenerationConfig
  540. if system:
  541. history[-1]["content"] = system + history[-1]["content"] + "user query: " + history[-1]["content"]
  542. try:
  543. for his in history:
  544. if his["role"] == "assistant":
  545. his["role"] = "model"
  546. his["parts"] = [his["content"]]
  547. his.pop("content")
  548. if his["role"] == "user":
  549. his["parts"] = [his["content"]]
  550. his.pop("content")
  551. history[-1]["parts"].append("data:image/jpeg;base64," + image)
  552. response = self.model.generate_content(history, generation_config=GenerationConfig(temperature=gen_conf.get("temperature", 0.3), top_p=gen_conf.get("top_p", 0.7)))
  553. ans = response.text
  554. return ans, response.usage_metadata.total_token_count
  555. except Exception as e:
  556. return "**ERROR**: " + str(e), 0
  557. def chat_streamly(self, system, history, gen_conf, image=""):
  558. from transformers import GenerationConfig
  559. if system:
  560. history[-1]["content"] = system + history[-1]["content"] + "user query: " + history[-1]["content"]
  561. ans = ""
  562. try:
  563. for his in history:
  564. if his["role"] == "assistant":
  565. his["role"] = "model"
  566. his["parts"] = [his["content"]]
  567. his.pop("content")
  568. if his["role"] == "user":
  569. his["parts"] = [his["content"]]
  570. his.pop("content")
  571. history[-1]["parts"].append("data:image/jpeg;base64," + image)
  572. response = self.model.generate_content(
  573. history,
  574. generation_config=GenerationConfig(temperature=gen_conf.get("temperature", 0.3), top_p=gen_conf.get("top_p", 0.7)),
  575. stream=True,
  576. )
  577. for resp in response:
  578. if not resp.text:
  579. continue
  580. ans += resp.text
  581. yield ans
  582. except Exception as e:
  583. yield ans + "\n**ERROR**: " + str(e)
  584. yield response._chunks[-1].usage_metadata.total_token_count
  585. class OpenRouterCV(GptV4):
  586. _FACTORY_NAME = "OpenRouter"
  587. def __init__(
  588. self,
  589. key,
  590. model_name,
  591. lang="Chinese",
  592. base_url="https://openrouter.ai/api/v1",
  593. ):
  594. if not base_url:
  595. base_url = "https://openrouter.ai/api/v1"
  596. self.client = OpenAI(api_key=key, base_url=base_url)
  597. self.model_name = model_name
  598. self.lang = lang
  599. class LocalCV(Base):
  600. _FACTORY_NAME = "Moonshot"
  601. def __init__(self, key, model_name="glm-4v", lang="Chinese", **kwargs):
  602. pass
  603. def describe(self, image):
  604. return "", 0
  605. class NvidiaCV(Base):
  606. _FACTORY_NAME = "NVIDIA"
  607. def __init__(
  608. self,
  609. key,
  610. model_name,
  611. lang="Chinese",
  612. base_url="https://ai.api.nvidia.com/v1/vlm",
  613. ):
  614. if not base_url:
  615. base_url = ("https://ai.api.nvidia.com/v1/vlm",)
  616. self.lang = lang
  617. factory, llm_name = model_name.split("/")
  618. if factory != "liuhaotian":
  619. self.base_url = urljoin(base_url, f"{factory}/{llm_name}")
  620. else:
  621. self.base_url = urljoin(f"{base_url}/community", llm_name.replace("-v1.6", "16"))
  622. self.key = key
  623. def describe(self, image):
  624. b64 = self.image2base64(image)
  625. response = requests.post(
  626. url=self.base_url,
  627. headers={
  628. "accept": "application/json",
  629. "content-type": "application/json",
  630. "Authorization": f"Bearer {self.key}",
  631. },
  632. json={"messages": self.prompt(b64)},
  633. )
  634. response = response.json()
  635. return (
  636. response["choices"][0]["message"]["content"].strip(),
  637. response["usage"]["total_tokens"],
  638. )
  639. def describe_with_prompt(self, image, prompt=None):
  640. b64 = self.image2base64(image)
  641. vision_prompt = self.vision_llm_prompt(b64, prompt) if prompt else self.vision_llm_prompt(b64)
  642. response = requests.post(
  643. url=self.base_url,
  644. headers={
  645. "accept": "application/json",
  646. "content-type": "application/json",
  647. "Authorization": f"Bearer {self.key}",
  648. },
  649. json={
  650. "messages": vision_prompt,
  651. },
  652. )
  653. response = response.json()
  654. return (
  655. response["choices"][0]["message"]["content"].strip(),
  656. response["usage"]["total_tokens"],
  657. )
  658. def prompt(self, b64):
  659. return [
  660. {
  661. "role": "user",
  662. "content": (
  663. "请用中文详细描述一下图中的内容,比如时间,地点,人物,事情,人物心情等,如果有数据请提取出数据。"
  664. if self.lang.lower() == "chinese"
  665. else "Please describe the content of this picture, like where, when, who, what happen. If it has number data, please extract them out."
  666. )
  667. + f' <img src="data:image/jpeg;base64,{b64}"/>',
  668. }
  669. ]
  670. def vision_llm_prompt(self, b64, prompt=None):
  671. return [
  672. {
  673. "role": "user",
  674. "content": (prompt if prompt else vision_llm_describe_prompt()) + f' <img src="data:image/jpeg;base64,{b64}"/>',
  675. }
  676. ]
  677. def chat_prompt(self, text, b64):
  678. return [
  679. {
  680. "role": "user",
  681. "content": text + f' <img src="data:image/jpeg;base64,{b64}"/>',
  682. }
  683. ]
  684. class StepFunCV(GptV4):
  685. _FACTORY_NAME = "StepFun"
  686. def __init__(self, key, model_name="step-1v-8k", lang="Chinese", base_url="https://api.stepfun.com/v1"):
  687. if not base_url:
  688. base_url = "https://api.stepfun.com/v1"
  689. self.client = OpenAI(api_key=key, base_url=base_url)
  690. self.model_name = model_name
  691. self.lang = lang
  692. class LmStudioCV(GptV4):
  693. _FACTORY_NAME = "LM-Studio"
  694. def __init__(self, key, model_name, lang="Chinese", base_url=""):
  695. if not base_url:
  696. raise ValueError("Local llm url cannot be None")
  697. base_url = urljoin(base_url, "v1")
  698. self.client = OpenAI(api_key="lm-studio", base_url=base_url)
  699. self.model_name = model_name
  700. self.lang = lang
  701. class OpenAI_APICV(GptV4):
  702. _FACTORY_NAME = ["VLLM", "OpenAI-API-Compatible"]
  703. def __init__(self, key, model_name, lang="Chinese", base_url=""):
  704. if not base_url:
  705. raise ValueError("url cannot be None")
  706. base_url = urljoin(base_url, "v1")
  707. self.client = OpenAI(api_key=key, base_url=base_url)
  708. self.model_name = model_name.split("___")[0]
  709. self.lang = lang
  710. class TogetherAICV(GptV4):
  711. _FACTORY_NAME = "TogetherAI"
  712. def __init__(self, key, model_name, lang="Chinese", base_url="https://api.together.xyz/v1"):
  713. if not base_url:
  714. base_url = "https://api.together.xyz/v1"
  715. super().__init__(key, model_name, lang, base_url)
  716. class YiCV(GptV4):
  717. _FACTORY_NAME = "01.AI"
  718. def __init__(
  719. self,
  720. key,
  721. model_name,
  722. lang="Chinese",
  723. base_url="https://api.lingyiwanwu.com/v1",
  724. ):
  725. if not base_url:
  726. base_url = "https://api.lingyiwanwu.com/v1"
  727. super().__init__(key, model_name, lang, base_url)
  728. class SILICONFLOWCV(GptV4):
  729. _FACTORY_NAME = "SILICONFLOW"
  730. def __init__(
  731. self,
  732. key,
  733. model_name,
  734. lang="Chinese",
  735. base_url="https://api.siliconflow.cn/v1",
  736. ):
  737. if not base_url:
  738. base_url = "https://api.siliconflow.cn/v1"
  739. super().__init__(key, model_name, lang, base_url)
  740. class HunyuanCV(Base):
  741. _FACTORY_NAME = "Tencent Hunyuan"
  742. def __init__(self, key, model_name, lang="Chinese", base_url=None):
  743. from tencentcloud.common import credential
  744. from tencentcloud.hunyuan.v20230901 import hunyuan_client
  745. key = json.loads(key)
  746. sid = key.get("hunyuan_sid", "")
  747. sk = key.get("hunyuan_sk", "")
  748. cred = credential.Credential(sid, sk)
  749. self.model_name = model_name
  750. self.client = hunyuan_client.HunyuanClient(cred, "")
  751. self.lang = lang
  752. def describe(self, image):
  753. from tencentcloud.common.exception.tencent_cloud_sdk_exception import (
  754. TencentCloudSDKException,
  755. )
  756. from tencentcloud.hunyuan.v20230901 import models
  757. b64 = self.image2base64(image)
  758. req = models.ChatCompletionsRequest()
  759. params = {"Model": self.model_name, "Messages": self.prompt(b64)}
  760. req.from_json_string(json.dumps(params))
  761. ans = ""
  762. try:
  763. response = self.client.ChatCompletions(req)
  764. ans = response.Choices[0].Message.Content
  765. return ans, response.Usage.TotalTokens
  766. except TencentCloudSDKException as e:
  767. return ans + "\n**ERROR**: " + str(e), 0
  768. def describe_with_prompt(self, image, prompt=None):
  769. from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
  770. from tencentcloud.hunyuan.v20230901 import models
  771. b64 = self.image2base64(image)
  772. vision_prompt = self.vision_llm_prompt(b64, prompt) if prompt else self.vision_llm_prompt(b64)
  773. req = models.ChatCompletionsRequest()
  774. params = {"Model": self.model_name, "Messages": vision_prompt}
  775. req.from_json_string(json.dumps(params))
  776. ans = ""
  777. try:
  778. response = self.client.ChatCompletions(req)
  779. ans = response.Choices[0].Message.Content
  780. return ans, response.Usage.TotalTokens
  781. except TencentCloudSDKException as e:
  782. return ans + "\n**ERROR**: " + str(e), 0
  783. def prompt(self, b64):
  784. return [
  785. {
  786. "Role": "user",
  787. "Contents": [
  788. {
  789. "Type": "image_url",
  790. "ImageUrl": {"Url": f"data:image/jpeg;base64,{b64}"},
  791. },
  792. {
  793. "Type": "text",
  794. "Text": "请用中文详细描述一下图中的内容,比如时间,地点,人物,事情,人物心情等,如果有数据请提取出数据。"
  795. if self.lang.lower() == "chinese"
  796. else "Please describe the content of this picture, like where, when, who, what happen. If it has number data, please extract them out.",
  797. },
  798. ],
  799. }
  800. ]
  801. class AnthropicCV(Base):
  802. _FACTORY_NAME = "Anthropic"
  803. def __init__(self, key, model_name, base_url=None):
  804. import anthropic
  805. self.client = anthropic.Anthropic(api_key=key)
  806. self.model_name = model_name
  807. self.system = ""
  808. self.max_tokens = 8192
  809. if "haiku" in self.model_name or "opus" in self.model_name:
  810. self.max_tokens = 4096
  811. def prompt(self, b64, prompt):
  812. return [
  813. {
  814. "role": "user",
  815. "content": [
  816. {
  817. "type": "image",
  818. "source": {
  819. "type": "base64",
  820. "media_type": "image/jpeg",
  821. "data": b64,
  822. },
  823. },
  824. {"type": "text", "text": prompt},
  825. ],
  826. }
  827. ]
  828. def describe(self, image):
  829. b64 = self.image2base64(image)
  830. prompt = self.prompt(
  831. b64,
  832. "请用中文详细描述一下图中的内容,比如时间,地点,人物,事情,人物心情等,如果有数据请提取出数据。"
  833. if self.lang.lower() == "chinese"
  834. else "Please describe the content of this picture, like where, when, who, what happen. If it has number data, please extract them out.",
  835. )
  836. response = self.client.messages.create(model=self.model_name, max_tokens=self.max_tokens, messages=prompt)
  837. return response["content"][0]["text"].strip(), response["usage"]["input_tokens"] + response["usage"]["output_tokens"]
  838. def describe_with_prompt(self, image, prompt=None):
  839. b64 = self.image2base64(image)
  840. prompt = self.prompt(b64, prompt if prompt else vision_llm_describe_prompt())
  841. response = self.client.messages.create(model=self.model_name, max_tokens=self.max_tokens, messages=prompt)
  842. return response["content"][0]["text"].strip(), response["usage"]["input_tokens"] + response["usage"]["output_tokens"]
  843. def chat(self, system, history, gen_conf):
  844. if "presence_penalty" in gen_conf:
  845. del gen_conf["presence_penalty"]
  846. if "frequency_penalty" in gen_conf:
  847. del gen_conf["frequency_penalty"]
  848. gen_conf["max_tokens"] = self.max_tokens
  849. ans = ""
  850. try:
  851. response = self.client.messages.create(
  852. model=self.model_name,
  853. messages=history,
  854. system=system,
  855. stream=False,
  856. **gen_conf,
  857. ).to_dict()
  858. ans = response["content"][0]["text"]
  859. if response["stop_reason"] == "max_tokens":
  860. ans += "...\nFor the content length reason, it stopped, continue?" if is_english([ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
  861. return (
  862. ans,
  863. response["usage"]["input_tokens"] + response["usage"]["output_tokens"],
  864. )
  865. except Exception as e:
  866. return ans + "\n**ERROR**: " + str(e), 0
  867. def chat_streamly(self, system, history, gen_conf):
  868. if "presence_penalty" in gen_conf:
  869. del gen_conf["presence_penalty"]
  870. if "frequency_penalty" in gen_conf:
  871. del gen_conf["frequency_penalty"]
  872. gen_conf["max_tokens"] = self.max_tokens
  873. ans = ""
  874. total_tokens = 0
  875. try:
  876. response = self.client.messages.create(
  877. model=self.model_name,
  878. messages=history,
  879. system=system,
  880. stream=True,
  881. **gen_conf,
  882. )
  883. for res in response:
  884. if res.type == "content_block_delta":
  885. if res.delta.type == "thinking_delta" and res.delta.thinking:
  886. if ans.find("<think>") < 0:
  887. ans += "<think>"
  888. ans = ans.replace("</think>", "")
  889. ans += res.delta.thinking + "</think>"
  890. else:
  891. text = res.delta.text
  892. ans += text
  893. total_tokens += num_tokens_from_string(text)
  894. yield ans
  895. except Exception as e:
  896. yield ans + "\n**ERROR**: " + str(e)
  897. yield total_tokens
  898. class GPUStackCV(GptV4):
  899. _FACTORY_NAME = "GPUStack"
  900. def __init__(self, key, model_name, lang="Chinese", base_url=""):
  901. if not base_url:
  902. raise ValueError("Local llm url cannot be None")
  903. base_url = urljoin(base_url, "v1")
  904. self.client = OpenAI(api_key=key, base_url=base_url)
  905. self.model_name = model_name
  906. self.lang = lang
  907. class GoogleCV(Base):
  908. _FACTORY_NAME = "Google Cloud"
  909. def __init__(self, key, model_name, lang="Chinese", base_url=None, **kwargs):
  910. import base64
  911. from google.oauth2 import service_account
  912. key = json.loads(key)
  913. access_token = json.loads(base64.b64decode(key.get("google_service_account_key", "")))
  914. project_id = key.get("google_project_id", "")
  915. region = key.get("google_region", "")
  916. scopes = ["https://www.googleapis.com/auth/cloud-platform"]
  917. self.model_name = model_name
  918. self.lang = lang
  919. if "claude" in self.model_name:
  920. from anthropic import AnthropicVertex
  921. from google.auth.transport.requests import Request
  922. if access_token:
  923. credits = service_account.Credentials.from_service_account_info(access_token, scopes=scopes)
  924. request = Request()
  925. credits.refresh(request)
  926. token = credits.token
  927. self.client = AnthropicVertex(region=region, project_id=project_id, access_token=token)
  928. else:
  929. self.client = AnthropicVertex(region=region, project_id=project_id)
  930. else:
  931. import vertexai.generative_models as glm
  932. from google.cloud import aiplatform
  933. if access_token:
  934. credits = service_account.Credentials.from_service_account_info(access_token)
  935. aiplatform.init(credentials=credits, project=project_id, location=region)
  936. else:
  937. aiplatform.init(project=project_id, location=region)
  938. self.client = glm.GenerativeModel(model_name=self.model_name)
  939. def describe(self, image):
  940. prompt = (
  941. "请用中文详细描述一下图中的内容,比如时间,地点,人物,事情,人物心情等,如果有数据请提取出数据。"
  942. if self.lang.lower() == "chinese"
  943. else "Please describe the content of this picture, like where, when, who, what happen. If it has number data, please extract them out."
  944. )
  945. if "claude" in self.model_name:
  946. b64 = self.image2base64(image)
  947. vision_prompt = [
  948. {
  949. "role": "user",
  950. "content": [
  951. {
  952. "type": "image",
  953. "source": {
  954. "type": "base64",
  955. "media_type": "image/jpeg",
  956. "data": b64,
  957. },
  958. },
  959. {"type": "text", "text": prompt},
  960. ],
  961. }
  962. ]
  963. response = self.client.messages.create(
  964. model=self.model_name,
  965. max_tokens=8192,
  966. messages=vision_prompt,
  967. )
  968. return response.content[0].text.strip(), response.usage.input_tokens + response.usage.output_tokens
  969. else:
  970. import vertexai.generative_models as glm
  971. b64 = self.image2base64(image)
  972. # Create proper image part for Gemini
  973. image_part = glm.Part.from_data(data=base64.b64decode(b64), mime_type="image/jpeg")
  974. input = [prompt, image_part]
  975. res = self.client.generate_content(input)
  976. return res.text, res.usage_metadata.total_token_count
  977. def describe_with_prompt(self, image, prompt=None):
  978. if "claude" in self.model_name:
  979. b64 = self.image2base64(image)
  980. vision_prompt = [
  981. {
  982. "role": "user",
  983. "content": [
  984. {
  985. "type": "image",
  986. "source": {
  987. "type": "base64",
  988. "media_type": "image/jpeg",
  989. "data": b64,
  990. },
  991. },
  992. {"type": "text", "text": prompt if prompt else vision_llm_describe_prompt()},
  993. ],
  994. }
  995. ]
  996. response = self.client.messages.create(model=self.model_name, max_tokens=8192, messages=vision_prompt)
  997. return response.content[0].text.strip(), response.usage.input_tokens + response.usage.output_tokens
  998. else:
  999. import vertexai.generative_models as glm
  1000. b64 = self.image2base64(image)
  1001. vision_prompt = prompt if prompt else vision_llm_describe_prompt()
  1002. # Create proper image part for Gemini
  1003. image_part = glm.Part.from_data(data=base64.b64decode(b64), mime_type="image/jpeg")
  1004. input = [vision_prompt, image_part]
  1005. res = self.client.generate_content(input)
  1006. return res.text, res.usage_metadata.total_token_count
  1007. def chat(self, system, history, gen_conf, image=""):
  1008. if "claude" in self.model_name:
  1009. if system:
  1010. history[-1]["content"] = system + history[-1]["content"] + "user query: " + history[-1]["content"]
  1011. try:
  1012. for his in history:
  1013. if his["role"] == "user":
  1014. his["content"] = [
  1015. {
  1016. "type": "image",
  1017. "source": {
  1018. "type": "base64",
  1019. "media_type": "image/jpeg",
  1020. "data": image,
  1021. },
  1022. },
  1023. {"type": "text", "text": his["content"]},
  1024. ]
  1025. response = self.client.messages.create(model=self.model_name, max_tokens=8192, messages=history, temperature=gen_conf.get("temperature", 0.3), top_p=gen_conf.get("top_p", 0.7))
  1026. return response.content[0].text.strip(), response.usage.input_tokens + response.usage.output_tokens
  1027. except Exception as e:
  1028. return "**ERROR**: " + str(e), 0
  1029. else:
  1030. import vertexai.generative_models as glm
  1031. from transformers import GenerationConfig
  1032. if system:
  1033. history[-1]["content"] = system + history[-1]["content"] + "user query: " + history[-1]["content"]
  1034. try:
  1035. for his in history:
  1036. if his["role"] == "assistant":
  1037. his["role"] = "model"
  1038. his["parts"] = [his["content"]]
  1039. his.pop("content")
  1040. if his["role"] == "user":
  1041. his["parts"] = [his["content"]]
  1042. his.pop("content")
  1043. # Create proper image part for Gemini
  1044. img_bytes = base64.b64decode(image)
  1045. image_part = glm.Part.from_data(data=img_bytes, mime_type="image/jpeg")
  1046. history[-1]["parts"].append(image_part)
  1047. response = self.client.generate_content(history, generation_config=GenerationConfig(temperature=gen_conf.get("temperature", 0.3), top_p=gen_conf.get("top_p", 0.7)))
  1048. ans = response.text
  1049. return ans, response.usage_metadata.total_token_count
  1050. except Exception as e:
  1051. return "**ERROR**: " + str(e), 0