### What problem does this PR solve? Add xAI provider (experimental feature, requires user feedback). ### Type of change - [x] New Feature (non-breaking change which adds functionality)tags/v0.20.0
| @@ -141,6 +141,61 @@ | |||
| } | |||
| ] | |||
| }, | |||
| { | |||
| "name": "xAI", | |||
| "logo": "", | |||
| "tags": "LLM", | |||
| "status": "1", | |||
| "llm": [ | |||
| { | |||
| "llm_name": "grok-4", | |||
| "tags": "LLM,CHAT,256k", | |||
| "max_tokens": 256000, | |||
| "model_type": "chat", | |||
| "is_tools": true | |||
| }, | |||
| { | |||
| "llm_name": "grok-3", | |||
| "tags": "LLM,CHAT,130k", | |||
| "max_tokens": 131072, | |||
| "model_type": "chat", | |||
| "is_tools": true | |||
| }, | |||
| { | |||
| "llm_name": "grok-3-fast", | |||
| "tags": "LLM,CHAT,130k", | |||
| "max_tokens": 131072, | |||
| "model_type": "chat", | |||
| "is_tools": true | |||
| }, | |||
| { | |||
| "llm_name": "grok-3-mini", | |||
| "tags": "LLM,CHAT,130k", | |||
| "max_tokens": 131072, | |||
| "model_type": "chat", | |||
| "is_tools": true | |||
| }, | |||
| { | |||
| "llm_name": "grok-3-mini-mini-fast", | |||
| "tags": "LLM,CHAT,130k", | |||
| "max_tokens": 131072, | |||
| "model_type": "chat", | |||
| "is_tools": true | |||
| }, | |||
| { | |||
| "llm_name": "grok-2-vision", | |||
| "tags": "LLM,CHAT,IMAGE2TEXT,32k", | |||
| "max_tokens": 32768, | |||
| "model_type": "image2text", | |||
| "is_tools": true | |||
| } | |||
| ] | |||
| }, | |||
| { | |||
| "name": "Tongyi-Qianwen", | |||
| "logo": "", | |||
| @@ -58,6 +58,7 @@ A complete list of models supported by RAGFlow, which will continue to expand. | |||
| | Voyage AI | | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | | | |||
| | Xinference | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |||
| | XunFei Spark | :heavy_check_mark: | | | | | :heavy_check_mark: | | |||
| | xAI | :heavy_check_mark: | | | :heavy_check_mark: | | | | |||
| | Youdao | | :heavy_check_mark: | :heavy_check_mark: | | | | | |||
| | ZHIPU-AI | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | | | |||
| | 01.AI | :heavy_check_mark: | | | | | | | |||
| @@ -568,6 +568,16 @@ class BaiChuanChat(Base): | |||
| yield total_tokens | |||
| class xAIChat(Base): | |||
| _FACTORY_NAME = "xAI" | |||
| def __init__(self, key, model_name="grok-3", base_url=None, **kwargs): | |||
| if not base_url: | |||
| base_url = "https://api.x.ai/v1" | |||
| super().__init__(key, model_name, base_url=base_url, **kwargs) | |||
| return | |||
| class QWenChat(Base): | |||
| _FACTORY_NAME = "Tongyi-Qianwen" | |||
| @@ -223,6 +223,16 @@ class AzureGptV4(Base): | |||
| return res.choices[0].message.content.strip(), res.usage.total_tokens | |||
| class xAICV(Base): | |||
| _FACTORY_NAME = "xAI" | |||
| def __init__(self, key, model_name="grok-3", base_url=None, **kwargs): | |||
| if not base_url: | |||
| base_url = "https://api.x.ai/v1" | |||
| super().__init__(key, model_name, base_url=base_url, **kwargs) | |||
| return | |||
| class QWenCV(Base): | |||
| _FACTORY_NAME = "Tongyi-Qianwen" | |||