| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802 |
- #
- # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- #
- import base64
- import json
- import os
- from abc import ABC
- from copy import deepcopy
- from io import BytesIO
- from urllib.parse import urljoin
- import requests
- from openai import OpenAI
- from openai.lib.azure import AzureOpenAI
- from zhipuai import ZhipuAI
- from rag.nlp import is_english
- from rag.prompts import vision_llm_describe_prompt
- from rag.utils import num_tokens_from_string
-
-
- class Base(ABC):
- def __init__(self, **kwargs):
- # Configure retry parameters
- self.max_retries = kwargs.get("max_retries", int(os.environ.get("LLM_MAX_RETRIES", 5)))
- self.base_delay = kwargs.get("retry_interval", float(os.environ.get("LLM_BASE_DELAY", 2.0)))
- self.max_rounds = kwargs.get("max_rounds", 5)
- self.is_tools = False
- self.tools = []
- self.toolcall_sessions = {}
-
- def describe(self, image):
- raise NotImplementedError("Please implement encode method!")
-
- def describe_with_prompt(self, image, prompt=None):
- raise NotImplementedError("Please implement encode method!")
-
- def _form_history(self, system, history, images=[]):
- hist = []
- if system:
- hist.append({"role": "system", "content": system})
- for h in history:
- if images and h["role"] == "user":
- h["content"] = self._image_prompt(h["content"], images)
- images = []
- hist.append(h)
- return hist
-
- def _image_prompt(self, text, images):
- if not images:
- return text
-
- if isinstance(images, str):
- images = [images]
-
- pmpt = [{"type": "text", "text": text}]
- for img in images:
- pmpt.append({
- "type": "image_url",
- "image_url": {
- "url": f"data:image/jpeg;base64,{img}" if img[:4] != "data" else img
- }
- })
- return pmpt
-
- def chat(self, system, history, gen_conf, images=[], **kwargs):
- try:
- response = self.client.chat.completions.create(
- model=self.model_name,
- messages=self._form_history(system, history, images)
- )
- return response.choices[0].message.content.strip(), response.usage.total_tokens
- except Exception as e:
- return "**ERROR**: " + str(e), 0
-
- def chat_streamly(self, system, history, gen_conf, images=[], **kwargs):
- ans = ""
- tk_count = 0
- try:
- response = self.client.chat.completions.create(
- model=self.model_name,
- messages=self._form_history(system, history, images),
- stream=True
- )
- for resp in response:
- if not resp.choices[0].delta.content:
- continue
- delta = resp.choices[0].delta.content
- ans = delta
- if resp.choices[0].finish_reason == "length":
- ans += "...\nFor the content length reason, it stopped, continue?" if is_english([ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
- if resp.choices[0].finish_reason == "stop":
- tk_count += resp.usage.total_tokens
- yield ans
- except Exception as e:
- yield ans + "\n**ERROR**: " + str(e)
-
- yield tk_count
-
- @staticmethod
- def image2base64(image):
- if isinstance(image, bytes):
- return base64.b64encode(image).decode("utf-8")
- if isinstance(image, BytesIO):
- return base64.b64encode(image.getvalue()).decode("utf-8")
- buffered = BytesIO()
- try:
- image.save(buffered, format="JPEG")
- except Exception:
- image.save(buffered, format="PNG")
- return base64.b64encode(buffered.getvalue()).decode("utf-8")
-
- def prompt(self, b64):
- return [
- {
- "role": "user",
- "content": self._image_prompt(
- "请用中文详细描述一下图中的内容,比如时间,地点,人物,事情,人物心情等,如果有数据请提取出数据。"
- if self.lang.lower() == "chinese"
- else "Please describe the content of this picture, like where, when, who, what happen. If it has number data, please extract them out.",
- b64
- )
- }
- ]
-
- def vision_llm_prompt(self, b64, prompt=None):
- return [
- {
- "role": "user",
- "content": self._image_prompt(prompt if prompt else vision_llm_describe_prompt(), b64)
- }
- ]
-
-
- class GptV4(Base):
- _FACTORY_NAME = "OpenAI"
-
- def __init__(self, key, model_name="gpt-4-vision-preview", lang="Chinese", base_url="https://api.openai.com/v1", **kwargs):
- if not base_url:
- base_url = "https://api.openai.com/v1"
- self.client = OpenAI(api_key=key, base_url=base_url)
- self.model_name = model_name
- self.lang = lang
- super().__init__(**kwargs)
-
- def describe(self, image):
- b64 = self.image2base64(image)
- res = self.client.chat.completions.create(
- model=self.model_name,
- messages=self.prompt(b64),
- )
- return res.choices[0].message.content.strip(), res.usage.total_tokens
-
- def describe_with_prompt(self, image, prompt=None):
- b64 = self.image2base64(image)
- res = self.client.chat.completions.create(
- model=self.model_name,
- messages=self.vision_llm_prompt(b64, prompt),
- )
- return res.choices[0].message.content.strip(), res.usage.total_tokens
-
-
- class AzureGptV4(GptV4):
- _FACTORY_NAME = "Azure-OpenAI"
-
- def __init__(self, key, model_name, lang="Chinese", **kwargs):
- api_key = json.loads(key).get("api_key", "")
- api_version = json.loads(key).get("api_version", "2024-02-01")
- self.client = AzureOpenAI(api_key=api_key, azure_endpoint=kwargs["base_url"], api_version=api_version)
- self.model_name = model_name
- self.lang = lang
- Base.__init__(self, **kwargs)
-
-
- class xAICV(GptV4):
- _FACTORY_NAME = "xAI"
-
- def __init__(self, key, model_name="grok-3", lang="Chinese", base_url=None, **kwargs):
- if not base_url:
- base_url = "https://api.x.ai/v1"
- super().__init__(key, model_name, lang=lang, base_url=base_url, **kwargs)
-
-
- class QWenCV(GptV4):
- _FACTORY_NAME = "Tongyi-Qianwen"
-
- def __init__(self, key, model_name="qwen-vl-chat-v1", lang="Chinese", base_url=None, **kwargs):
- if not base_url:
- base_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
- super().__init__(key, model_name, lang=lang, base_url=base_url, **kwargs)
-
-
- class HunyuanCV(GptV4):
- _FACTORY_NAME = "Tencent Hunyuan"
-
- def __init__(self, key, model_name, lang="Chinese", base_url=None, **kwargs):
- if not base_url:
- base_url = "https://api.hunyuan.cloud.tencent.com/v1"
- super().__init__(key, model_name, lang=lang, base_url=base_url, **kwargs)
-
-
- class Zhipu4V(GptV4):
- _FACTORY_NAME = "ZHIPU-AI"
-
- def __init__(self, key, model_name="glm-4v", lang="Chinese", **kwargs):
- self.client = ZhipuAI(api_key=key)
- self.model_name = model_name
- self.lang = lang
- Base.__init__(self, **kwargs)
-
-
- class StepFunCV(GptV4):
- _FACTORY_NAME = "StepFun"
-
- def __init__(self, key, model_name="step-1v-8k", lang="Chinese", base_url="https://api.stepfun.com/v1", **kwargs):
- if not base_url:
- base_url = "https://api.stepfun.com/v1"
- self.client = OpenAI(api_key=key, base_url=base_url)
- self.model_name = model_name
- self.lang = lang
- Base.__init__(self, **kwargs)
-
-
- class LmStudioCV(GptV4):
- _FACTORY_NAME = "LM-Studio"
-
- def __init__(self, key, model_name, lang="Chinese", base_url="", **kwargs):
- if not base_url:
- raise ValueError("Local llm url cannot be None")
- base_url = urljoin(base_url, "v1")
- self.client = OpenAI(api_key="lm-studio", base_url=base_url)
- self.model_name = model_name
- self.lang = lang
- Base.__init__(self, **kwargs)
-
-
- class OpenAI_APICV(GptV4):
- _FACTORY_NAME = ["VLLM", "OpenAI-API-Compatible"]
-
- def __init__(self, key, model_name, lang="Chinese", base_url="", **kwargs):
- if not base_url:
- raise ValueError("url cannot be None")
- base_url = urljoin(base_url, "v1")
- self.client = OpenAI(api_key=key, base_url=base_url)
- self.model_name = model_name.split("___")[0]
- self.lang = lang
- Base.__init__(self, **kwargs)
-
-
- class TogetherAICV(GptV4):
- _FACTORY_NAME = "TogetherAI"
-
- def __init__(self, key, model_name, lang="Chinese", base_url="https://api.together.xyz/v1", **kwargs):
- if not base_url:
- base_url = "https://api.together.xyz/v1"
- super().__init__(key, model_name, lang, base_url, **kwargs)
-
-
- class YiCV(GptV4):
- _FACTORY_NAME = "01.AI"
-
- def __init__(
- self,
- key,
- model_name,
- lang="Chinese",
- base_url="https://api.lingyiwanwu.com/v1", **kwargs
- ):
- if not base_url:
- base_url = "https://api.lingyiwanwu.com/v1"
- super().__init__(key, model_name, lang, base_url, **kwargs)
-
-
- class SILICONFLOWCV(GptV4):
- _FACTORY_NAME = "SILICONFLOW"
-
- def __init__(
- self,
- key,
- model_name,
- lang="Chinese",
- base_url="https://api.siliconflow.cn/v1", **kwargs
- ):
- if not base_url:
- base_url = "https://api.siliconflow.cn/v1"
- super().__init__(key, model_name, lang, base_url, **kwargs)
-
-
- class OpenRouterCV(GptV4):
- _FACTORY_NAME = "OpenRouter"
-
- def __init__(
- self,
- key,
- model_name,
- lang="Chinese",
- base_url="https://openrouter.ai/api/v1", **kwargs
- ):
- if not base_url:
- base_url = "https://openrouter.ai/api/v1"
- self.client = OpenAI(api_key=key, base_url=base_url)
- self.model_name = model_name
- self.lang = lang
- Base.__init__(self, **kwargs)
-
-
- class LocalAICV(GptV4):
- _FACTORY_NAME = "LocalAI"
-
- def __init__(self, key, model_name, base_url, lang="Chinese", **kwargs):
- if not base_url:
- raise ValueError("Local cv model url cannot be None")
- base_url = urljoin(base_url, "v1")
- self.client = OpenAI(api_key="empty", base_url=base_url)
- self.model_name = model_name.split("___")[0]
- self.lang = lang
- Base.__init__(self, **kwargs)
-
-
- class XinferenceCV(GptV4):
- _FACTORY_NAME = "Xinference"
-
- def __init__(self, key, model_name="", lang="Chinese", base_url="", **kwargs):
- base_url = urljoin(base_url, "v1")
- self.client = OpenAI(api_key=key, base_url=base_url)
- self.model_name = model_name
- self.lang = lang
- Base.__init__(self, **kwargs)
-
-
- class GPUStackCV(GptV4):
- _FACTORY_NAME = "GPUStack"
-
- def __init__(self, key, model_name, lang="Chinese", base_url="", **kwargs):
- if not base_url:
- raise ValueError("Local llm url cannot be None")
- base_url = urljoin(base_url, "v1")
- self.client = OpenAI(api_key=key, base_url=base_url)
- self.model_name = model_name
- self.lang = lang
- Base.__init__(self, **kwargs)
-
-
- class LocalCV(Base):
- _FACTORY_NAME = "Moonshot"
-
- def __init__(self, key, model_name="glm-4v", lang="Chinese", **kwargs):
- pass
-
- def describe(self, image):
- return "", 0
-
-
- class OllamaCV(Base):
- _FACTORY_NAME = "Ollama"
-
- def __init__(self, key, model_name, lang="Chinese", **kwargs):
- from ollama import Client
- self.client = Client(host=kwargs["base_url"])
- self.model_name = model_name
- self.lang = lang
- self.keep_alive = kwargs.get("ollama_keep_alive", int(os.environ.get("OLLAMA_KEEP_ALIVE", -1)))
- Base.__init__(self, **kwargs)
-
- def _clean_conf(self, gen_conf):
- options = {}
- if "temperature" in gen_conf:
- options["temperature"] = gen_conf["temperature"]
- if "top_p" in gen_conf:
- options["top_k"] = gen_conf["top_p"]
- if "presence_penalty" in gen_conf:
- options["presence_penalty"] = gen_conf["presence_penalty"]
- if "frequency_penalty" in gen_conf:
- options["frequency_penalty"] = gen_conf["frequency_penalty"]
- return options
-
- def _form_history(self, system, history, images=[]):
- hist = deepcopy(history)
- if system and hist[0]["role"] == "user":
- hist.insert(0, {"role": "system", "content": system})
- if not images:
- return hist
- for his in hist:
- if his["role"] == "user":
- his["images"] = images
- break
- return hist
-
- def describe(self, image):
- prompt = self.prompt("")
- try:
- response = self.client.generate(
- model=self.model_name,
- prompt=prompt[0]["content"][0]["text"],
- images=[image],
- )
- ans = response["response"].strip()
- return ans, 128
- except Exception as e:
- return "**ERROR**: " + str(e), 0
-
- def describe_with_prompt(self, image, prompt=None):
- vision_prompt = self.vision_llm_prompt("", prompt) if prompt else self.vision_llm_prompt("")
- try:
- response = self.client.generate(
- model=self.model_name,
- prompt=vision_prompt[0]["content"][0]["text"],
- images=[image],
- )
- ans = response["response"].strip()
- return ans, 128
- except Exception as e:
- return "**ERROR**: " + str(e), 0
-
- def chat(self, system, history, gen_conf, images=[]):
- try:
- response = self.client.chat(
- model=self.model_name,
- messages=self._form_history(system, history, images),
- options=self._clean_conf(gen_conf),
- keep_alive=self.keep_alive
- )
-
- ans = response["message"]["content"].strip()
- return ans, response["eval_count"] + response.get("prompt_eval_count", 0)
- except Exception as e:
- return "**ERROR**: " + str(e), 0
-
- def chat_streamly(self, system, history, gen_conf, images=[]):
- ans = ""
- try:
- response = self.client.chat(
- model=self.model_name,
- messages=self._form_history(system, history, images),
- stream=True,
- options=self._clean_conf(gen_conf),
- keep_alive=self.keep_alive
- )
- for resp in response:
- if resp["done"]:
- yield resp.get("prompt_eval_count", 0) + resp.get("eval_count", 0)
- ans = resp["message"]["content"]
- yield ans
- except Exception as e:
- yield ans + "\n**ERROR**: " + str(e)
- yield 0
-
-
- class GeminiCV(Base):
- _FACTORY_NAME = "Gemini"
-
- def __init__(self, key, model_name="gemini-1.0-pro-vision-latest", lang="Chinese", **kwargs):
- from google.generativeai import GenerativeModel, client
-
- client.configure(api_key=key)
- _client = client.get_default_generative_client()
- self.model_name = model_name
- self.model = GenerativeModel(model_name=self.model_name)
- self.model._client = _client
- self.lang = lang
- Base.__init__(self, **kwargs)
-
- def _form_history(self, system, history, images=[]):
- hist = []
- if system:
- hist.append({"role": "user", "parts": [system, history[0]["content"]]})
- for img in images:
- hist[0]["parts"].append(("data:image/jpeg;base64," + img) if img[:4]!="data" else img)
- for h in history[1:]:
- hist.append({"role": "user" if h["role"]=="user" else "model", "parts": [h["content"]]})
- return hist
-
- def describe(self, image):
- from PIL.Image import open
-
- prompt = (
- "请用中文详细描述一下图中的内容,比如时间,地点,人物,事情,人物心情等,如果有数据请提取出数据。"
- if self.lang.lower() == "chinese"
- else "Please describe the content of this picture, like where, when, who, what happen. If it has number data, please extract them out."
- )
- b64 = self.image2base64(image)
- img = open(BytesIO(base64.b64decode(b64)))
- input = [prompt, img]
- res = self.model.generate_content(input)
- img.close()
- return res.text, res.usage_metadata.total_token_count
-
- def describe_with_prompt(self, image, prompt=None):
- from PIL.Image import open
-
- b64 = self.image2base64(image)
- vision_prompt = prompt if prompt else vision_llm_describe_prompt()
- img = open(BytesIO(base64.b64decode(b64)))
- input = [vision_prompt, img]
- res = self.model.generate_content(
- input,
- )
- img.close()
- return res.text, res.usage_metadata.total_token_count
-
- def chat(self, system, history, gen_conf, images=[]):
- from transformers import GenerationConfig
- try:
- response = self.model.generate_content(
- self._form_history(system, history, images),
- generation_config=GenerationConfig(temperature=gen_conf.get("temperature", 0.3), top_p=gen_conf.get("top_p", 0.7)))
- ans = response.text
- return ans, response.usage_metadata.total_token_count
- except Exception as e:
- return "**ERROR**: " + str(e), 0
-
- def chat_streamly(self, system, history, gen_conf, images=[]):
- from transformers import GenerationConfig
- ans = ""
- try:
- response = self.model.generate_content(
- self._form_history(system, history, images),
- generation_config=GenerationConfig(temperature=gen_conf.get("temperature", 0.3), top_p=gen_conf.get("top_p", 0.7)),
- stream=True,
- )
-
- for resp in response:
- if not resp.text:
- continue
- ans = resp.text
- yield ans
- except Exception as e:
- yield ans + "\n**ERROR**: " + str(e)
-
- yield response._chunks[-1].usage_metadata.total_token_count
-
-
- class NvidiaCV(Base):
- _FACTORY_NAME = "NVIDIA"
-
- def __init__(
- self,
- key,
- model_name,
- lang="Chinese",
- base_url="https://ai.api.nvidia.com/v1/vlm", **kwargs
- ):
- if not base_url:
- base_url = ("https://ai.api.nvidia.com/v1/vlm",)
- self.lang = lang
- factory, llm_name = model_name.split("/")
- if factory != "liuhaotian":
- self.base_url = urljoin(base_url, f"{factory}/{llm_name}")
- else:
- self.base_url = urljoin(f"{base_url}/community", llm_name.replace("-v1.6", "16"))
- self.key = key
- Base.__init__(self, **kwargs)
-
- def _image_prompt(self, text, images):
- if not images:
- return text
- htmls = ""
- for img in images:
- htmls += ' <img src="{}"/>'.format(f"data:image/jpeg;base64,{img}" if img[:4] != "data" else img)
- return text + htmls
-
- def describe(self, image):
- b64 = self.image2base64(image)
- response = requests.post(
- url=self.base_url,
- headers={
- "accept": "application/json",
- "content-type": "application/json",
- "Authorization": f"Bearer {self.key}",
- },
- json={"messages": self.prompt(b64)},
- )
- response = response.json()
- return (
- response["choices"][0]["message"]["content"].strip(),
- response["usage"]["total_tokens"],
- )
-
- def _request(self, msg, gen_conf={}):
- response = requests.post(
- url=self.base_url,
- headers={
- "accept": "application/json",
- "content-type": "application/json",
- "Authorization": f"Bearer {self.key}",
- },
- json={
- "messages": msg, **gen_conf
- },
- )
- return response.json()
-
- def describe_with_prompt(self, image, prompt=None):
- b64 = self.image2base64(image)
- vision_prompt = self.vision_llm_prompt(b64, prompt) if prompt else self.vision_llm_prompt(b64)
- response = self._request(vision_prompt)
- return (
- response["choices"][0]["message"]["content"].strip(),
- response["usage"]["total_tokens"],
- )
-
- def chat(self, system, history, gen_conf, images=[], **kwargs):
- try:
- response = self._request(self._form_history(system, history, images), gen_conf)
- return (
- response["choices"][0]["message"]["content"].strip(),
- response["usage"]["total_tokens"],
- )
- except Exception as e:
- return "**ERROR**: " + str(e), 0
-
- def chat_streamly(self, system, history, gen_conf, images=[], **kwargs):
- try:
- response = self._request(self._form_history(system, history, images), gen_conf)
- cnt = response["choices"][0]["message"]["content"]
- for resp in cnt:
- yield resp
- except Exception as e:
- yield "\n**ERROR**: " + str(e)
-
- yield response["usage"]["total_tokens"]
-
-
- class AnthropicCV(Base):
- _FACTORY_NAME = "Anthropic"
-
- def __init__(self, key, model_name, base_url=None, **kwargs):
- import anthropic
-
- self.client = anthropic.Anthropic(api_key=key)
- self.model_name = model_name
- self.system = ""
- self.max_tokens = 8192
- if "haiku" in self.model_name or "opus" in self.model_name:
- self.max_tokens = 4096
- Base.__init__(self, **kwargs)
-
- def _image_prompt(self, text, images):
- if not images:
- return text
- pmpt = [{"type": "text", "text": text}]
- for img in images:
- pmpt.append({
- "type": "image",
- "source": {
- "type": "base64",
- "media_type": "image/jpeg" if img[:4] != "data" else img.split(":")[1].split(";")[0],
- "data": img if img[:4] != "data" else img.split(",")[1]
- },
- }
- )
- return pmpt
-
- def describe(self, image):
- b64 = self.image2base64(image)
- response = self.client.messages.create(model=self.model_name, max_tokens=self.max_tokens, messages=self.prompt(b64))
- return response["content"][0]["text"].strip(), response["usage"]["input_tokens"] + response["usage"]["output_tokens"]
-
- def describe_with_prompt(self, image, prompt=None):
- b64 = self.image2base64(image)
- prompt = self.prompt(b64, prompt if prompt else vision_llm_describe_prompt())
-
- response = self.client.messages.create(model=self.model_name, max_tokens=self.max_tokens, messages=prompt)
- return response["content"][0]["text"].strip(), response["usage"]["input_tokens"] + response["usage"]["output_tokens"]
-
- def _clean_conf(self, gen_conf):
- if "presence_penalty" in gen_conf:
- del gen_conf["presence_penalty"]
- if "frequency_penalty" in gen_conf:
- del gen_conf["frequency_penalty"]
- if "max_token" in gen_conf:
- gen_conf["max_tokens"] = self.max_tokens
- return gen_conf
-
- def chat(self, system, history, gen_conf, images=[]):
- gen_conf = self._clean_conf(gen_conf)
- ans = ""
- try:
- response = self.client.messages.create(
- model=self.model_name,
- messages=self._form_history(system, history, images),
- system=system,
- stream=False,
- **gen_conf,
- ).to_dict()
- ans = response["content"][0]["text"]
- if response["stop_reason"] == "max_tokens":
- ans += "...\nFor the content length reason, it stopped, continue?" if is_english([ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?"
- return (
- ans,
- response["usage"]["input_tokens"] + response["usage"]["output_tokens"],
- )
- except Exception as e:
- return ans + "\n**ERROR**: " + str(e), 0
-
- def chat_streamly(self, system, history, gen_conf, images=[]):
- gen_conf = self._clean_conf(gen_conf)
- total_tokens = 0
- try:
- response = self.client.messages.create(
- model=self.model_name,
- messages=self._form_history(system, history, images),
- system=system,
- stream=True,
- **gen_conf,
- )
- think = False
- for res in response:
- if res.type == "content_block_delta":
- if res.delta.type == "thinking_delta" and res.delta.thinking:
- if not think:
- yield "<think>"
- think = True
- yield res.delta.thinking
- total_tokens += num_tokens_from_string(res.delta.thinking)
- elif think:
- yield "</think>"
- else:
- yield res.delta.text
- total_tokens += num_tokens_from_string(res.delta.text)
- except Exception as e:
- yield "\n**ERROR**: " + str(e)
-
- yield total_tokens
-
-
- class GoogleCV(AnthropicCV, GeminiCV):
- _FACTORY_NAME = "Google Cloud"
-
- def __init__(self, key, model_name, lang="Chinese", base_url=None, **kwargs):
- import base64
-
- from google.oauth2 import service_account
-
- key = json.loads(key)
- access_token = json.loads(base64.b64decode(key.get("google_service_account_key", "")))
- project_id = key.get("google_project_id", "")
- region = key.get("google_region", "")
-
- scopes = ["https://www.googleapis.com/auth/cloud-platform"]
- self.model_name = model_name
- self.lang = lang
-
- if "claude" in self.model_name:
- from anthropic import AnthropicVertex
- from google.auth.transport.requests import Request
-
- if access_token:
- credits = service_account.Credentials.from_service_account_info(access_token, scopes=scopes)
- request = Request()
- credits.refresh(request)
- token = credits.token
- self.client = AnthropicVertex(region=region, project_id=project_id, access_token=token)
- else:
- self.client = AnthropicVertex(region=region, project_id=project_id)
- else:
- import vertexai.generative_models as glm
- from google.cloud import aiplatform
-
- if access_token:
- credits = service_account.Credentials.from_service_account_info(access_token)
- aiplatform.init(credentials=credits, project=project_id, location=region)
- else:
- aiplatform.init(project=project_id, location=region)
- self.client = glm.GenerativeModel(model_name=self.model_name)
- Base.__init__(self, **kwargs)
-
- def describe(self, image):
- if "claude" in self.model_name:
- return AnthropicCV.describe(self, image)
- else:
- return GeminiCV.describe(self, image)
-
- def describe_with_prompt(self, image, prompt=None):
- if "claude" in self.model_name:
- return AnthropicCV.describe_with_prompt(self, image, prompt)
- else:
- return GeminiCV.describe_with_prompt(self, image, prompt)
-
- def chat(self, system, history, gen_conf, images=[]):
- if "claude" in self.model_name:
- return AnthropicCV.chat(self, system, history, gen_conf, images)
- else:
- return GeminiCV.chat(self, system, history, gen_conf, images)
-
- def chat_streamly(self, system, history, gen_conf, images=[]):
- if "claude" in self.model_name:
- for ans in AnthropicCV.chat_streamly(self, system, history, gen_conf, images):
- yield ans
- else:
- for ans in GeminiCV.chat_streamly(self, system, history, gen_conf, images):
- yield ans
|