#1432 #1447 This PR adds support for the GROQ LLM (Large Language Model). Groq is an AI solutions company delivering ultra-low latency inference with the first-ever LPU™ Inference Engine. The Groq API enables developers to integrate state-of-the-art LLMs, such as Llama-2 and llama3-70b-8192, into low latency applications with the request limits specified below. Learn more at [groq.com](https://groq.com/). Supported Models | ID | Requests per Minute | Requests per Day | Tokens per Minute | |----------------------|---------------------|------------------|-------------------| | gemma-7b-it | 30 | 14,400 | 15,000 | | gemma2-9b-it | 30 | 14,400 | 15,000 | | llama3-70b-8192 | 30 | 14,400 | 6,000 | | llama3-8b-8192 | 30 | 14,400 | 30,000 | | mixtral-8x7b-32768 | 30 | 14,400 | 5,000 | --------- Co-authored-by: paresh0628 <paresh.tuvoc@gmail.com> Co-authored-by: Kevin Hu <kevinhu.sh@gmail.com>tags/v0.9.0
| @@ -180,6 +180,12 @@ factory_infos = [{ | |||
| "logo": "", | |||
| "tags": "LLM,TEXT EMBEDDING,IMAGE2TEXT", | |||
| "status": "1", | |||
| }, | |||
| { | |||
| "name": "Groq", | |||
| "logo": "", | |||
| "tags": "LLM", | |||
| "status": "1", | |||
| } | |||
| # { | |||
| # "name": "文心一言", | |||
| @@ -933,6 +939,47 @@ def init_llm_factory(): | |||
| "tags": "TEXT EMBEDDING", | |||
| "max_tokens": 2048, | |||
| "model_type": LLMType.EMBEDDING.value | |||
| }, | |||
| # ------------------------ Groq ----------------------- | |||
| { | |||
| "fid": factory_infos[18]["name"], | |||
| "llm_name": "gemma-7b-it", | |||
| "tags": "LLM,CHAT,15k", | |||
| "max_tokens": 8192, | |||
| "model_type": LLMType.CHAT.value | |||
| }, | |||
| { | |||
| "fid": factory_infos[18]["name"], | |||
| "llm_name": "gemma2-9b-it", | |||
| "tags": "LLM,CHAT,15k", | |||
| "max_tokens": 8192, | |||
| "model_type": LLMType.CHAT.value | |||
| }, | |||
| { | |||
| "fid": factory_infos[18]["name"], | |||
| "llm_name": "llama3-70b-8192", | |||
| "tags": "LLM,CHAT,6k", | |||
| "max_tokens": 8192, | |||
| "model_type": LLMType.CHAT.value | |||
| }, | |||
| { | |||
| "fid": factory_infos[18]["name"], | |||
| "llm_name": "llama3-8b-8192", | |||
| "tags": "LLM,CHAT,30k", | |||
| "max_tokens": 8192, | |||
| "model_type": LLMType.CHAT.value | |||
| }, | |||
| { | |||
| "fid": factory_infos[18]["name"], | |||
| "llm_name": "mixtral-8x7b-32768", | |||
| "tags": "LLM,CHAT,5k", | |||
| "max_tokens": 32768, | |||
| "model_type": LLMType.CHAT.value | |||
| } | |||
| ] | |||
| for info in factory_infos: | |||
| @@ -32,7 +32,8 @@ EmbeddingModel = { | |||
| "Jina": JinaEmbed, | |||
| "BAAI": DefaultEmbedding, | |||
| "Mistral": MistralEmbed, | |||
| "Bedrock": BedrockEmbed | |||
| "Bedrock": BedrockEmbed, | |||
| "Groq": GroqChat | |||
| } | |||
| @@ -23,6 +23,7 @@ from ollama import Client | |||
| from volcengine.maas.v2 import MaasService | |||
| from rag.nlp import is_english | |||
| from rag.utils import num_tokens_from_string | |||
| from groq import Groq | |||
| class Base(ABC): | |||
| @@ -681,4 +682,63 @@ class GeminiChat(Base): | |||
| except Exception as e: | |||
| yield ans + "\n**ERROR**: " + str(e) | |||
| yield response._chunks[-1].usage_metadata.total_token_count | |||
| yield response._chunks[-1].usage_metadata.total_token_count | |||
| class GroqChat: | |||
| def __init__(self, key, model_name,base_url=''): | |||
| self.client = Groq(api_key=key) | |||
| self.model_name = model_name | |||
| def chat(self, system, history, gen_conf): | |||
| if system: | |||
| history.insert(0, {"role": "system", "content": system}) | |||
| for k in list(gen_conf.keys()): | |||
| if k not in ["temperature", "top_p", "max_tokens"]: | |||
| del gen_conf[k] | |||
| ans = "" | |||
| try: | |||
| response = self.client.chat.completions.create( | |||
| model=self.model_name, | |||
| messages=history, | |||
| **gen_conf | |||
| ) | |||
| ans = response.choices[0].message.content | |||
| if response.choices[0].finish_reason == "length": | |||
| ans += "...\nFor the content length reason, it stopped, continue?" if self.is_english( | |||
| [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?" | |||
| return ans, response.usage.total_tokens | |||
| except Exception as e: | |||
| return ans + "\n**ERROR**: " + str(e), 0 | |||
| def chat_streamly(self, system, history, gen_conf): | |||
| if system: | |||
| history.insert(0, {"role": "system", "content": system}) | |||
| for k in list(gen_conf.keys()): | |||
| if k not in ["temperature", "top_p", "max_tokens"]: | |||
| del gen_conf[k] | |||
| ans = "" | |||
| total_tokens = 0 | |||
| try: | |||
| response = self.client.chat.completions.create( | |||
| model=self.model_name, | |||
| messages=history, | |||
| stream=True, | |||
| **gen_conf | |||
| ) | |||
| for resp in response: | |||
| if not resp.choices or not resp.choices[0].delta.content: | |||
| continue | |||
| ans += resp.choices[0].delta.content | |||
| total_tokens += 1 | |||
| if resp.choices[0].finish_reason == "length": | |||
| ans += "...\nFor the content length reason, it stopped, continue?" if self.is_english( | |||
| [ans]) else "······\n由于长度的原因,回答被截断了,要继续吗?" | |||
| yield ans | |||
| except Exception as e: | |||
| yield ans + "\n**ERROR**: " + str(e) | |||
| yield total_tokens | |||
| @@ -147,4 +147,5 @@ markdown==3.6 | |||
| mistralai==0.4.2 | |||
| boto3==1.34.140 | |||
| duckduckgo_search==6.1.9 | |||
| google-generativeai==0.7.2 | |||
| google-generativeai==0.7.2 | |||
| groq==0.9.0 | |||
| @@ -148,4 +148,5 @@ markdown==3.6 | |||
| mistralai==0.4.2 | |||
| boto3==1.34.140 | |||
| duckduckgo_search==6.1.9 | |||
| google-generativeai==0.7.2 | |||
| google-generativeai==0.7.2 | |||
| groq==0.9.0 | |||
| @@ -133,4 +133,5 @@ markdown==3.6 | |||
| mistralai==0.4.2 | |||
| boto3==1.34.140 | |||
| duckduckgo_search==6.1.9 | |||
| google-generativeai==0.7.2 | |||
| google-generativeai==0.7.2 | |||
| groq==0.9.0 | |||
| @@ -0,0 +1 @@ | |||
| <svg aria-label="groq logo" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 833.7 304.8" class="w-[64px] sm:w-[82px] h-[40px] fill-black dark:fill-white"><g clip-path="url(#a)"><path d="M499.3.5c-57.6 0-104.4 46.7-104.4 104.4s46.7 104.4 104.4 104.4 104.4-46.7 104.4-104.4C603.6 47.3 556.9.6 499.3.5Zm0 169.6c-36 0-65.2-29.2-65.2-65.2s29.2-65.2 65.2-65.2 65.2 29.2 65.2 65.2-29.2 65.2-65.2 65.2ZM355.2.9c-3.6-.4-7.1-.6-10.7-.6-1.8 0-3.5 0-5.2.1-1.7.1-3.5.2-5.2.3-7 .5-14 1.7-20.8 3.6-13.9 3.7-27 10.3-38.3 19.2-11.6 9.2-20.8 21-27.1 34.4-3.1 6.7-5.4 13.8-6.9 21-.7 3.6-1.2 7.2-1.6 10.8-.1 1.8-.3 3.6-.3 5.4l-.1 2.7v2.5l.2 69.9.2 34.9h39.1l.2-34.9.2-69.9v-3.7c0-1.1.2-2.2.2-3.3.2-2.2.6-4.3 1-6.5.9-4.2 2.2-8.3 4-12.2 3.6-7.8 9-14.6 15.7-20 7-5.6 15.1-9.7 23.7-12 4.4-1.2 9-2 13.6-2.4 1.2-.1 2.3-.2 3.5-.2 1.2 0 2.4-.1 3.5-.1 2.2 0 4.5.1 6.7.3 8.9.9 17.5 3.6 25.4 8l19.5-33.9C383.3 7.1 369.5 2.5 355.2.9ZM105.3 0C47.7-.5.5 45.8 0 103.4-.5 161 45.8 208.2 103.4 208.7h36.2v-39.1h-34.3c-36 .4-65.6-28.4-66-64.5-.4-36.1 28.4-65.6 64.5-66h1.5c36 0 65.2 29.2 65.4 65.2v96.1c0 35.7-29.1 64.8-64.7 65.2-17.1-.1-33.4-7-45.4-19.1l-27.7 27.7c19.2 19.3 45.2 30.3 72.4 30.5h1.4c56.9-.8 102.6-47 102.9-103.9v-99.1C208.2 45.2 161.9.1 105.3 0ZM729.7.5c-57.6 0-104.4 46.7-104.3 104.4 0 57.6 46.7 104.3 104.3 104.3h35.7v-39.1h-35.7c-36 0-65.2-29.2-65.2-65.2s29.2-65.2 65.2-65.2c33.8 0 62 25.9 65 59.6h-.1v200.4h39.1V104.9C833.7 47.3 787.2.5 729.7.5Z"></path></g><defs><clipPath id="a"><path fill="#fff" d="M0 0h833.7v304.8H0z"></path></clipPath></defs></svg> | |||
| @@ -62,6 +62,7 @@ const IconMap = { | |||
| 'Azure-OpenAI': 'azure', | |||
| Bedrock: 'bedrock', | |||
| Gemini:'gemini', | |||
| Groq: 'Groq', | |||
| }; | |||
| const LlmIcon = ({ name }: { name: string }) => { | |||