### What problem does this PR solve? #2794 ### Type of change - [x] New Feature (non-breaking change which adds functionality)tags/v0.13.0
| @@ -107,6 +107,7 @@ ChatModel = { | |||
| "BaiduYiyan": BaiduYiyanChat, | |||
| "Anthropic": AnthropicChat, | |||
| "Google Cloud": GoogleChat, | |||
| "HuggingFace": HuggingFaceChat, | |||
| } | |||
| RerankModel = { | |||
| @@ -104,7 +104,13 @@ class XinferenceChat(Base): | |||
| if base_url.split("/")[-1] != "v1": | |||
| base_url = os.path.join(base_url, "v1") | |||
| super().__init__(key, model_name, base_url) | |||
| class HuggingFaceChat(Base): | |||
| def __init__(self, key=None, model_name="", base_url=""): | |||
| if not base_url: | |||
| raise ValueError("Local llm url cannot be None") | |||
| if base_url.split("/")[-1] != "v1": | |||
| base_url = os.path.join(base_url, "v1") | |||
| super().__init__(key, model_name, base_url) | |||
| class DeepSeekChat(Base): | |||
| def __init__(self, key, model_name="deepseek-chat", base_url="https://api.deepseek.com/v1"): | |||
| @@ -54,7 +54,10 @@ const OllamaModal = ({ | |||
| llmFactoryToUrlMap[llmFactory as LlmFactory] || | |||
| 'https://github.com/infiniflow/ragflow/blob/main/docs/guides/deploy_local_llm.mdx'; | |||
| const optionsMap = { | |||
| HuggingFace: [{ value: 'embedding', label: 'embedding' }], | |||
| HuggingFace: [ | |||
| { value: 'embedding', label: 'embedding' }, | |||
| { value: 'chat', label: 'chat' }, | |||
| ], | |||
| Xinference: [ | |||
| { value: 'chat', label: 'chat' }, | |||
| { value: 'embedding', label: 'embedding' }, | |||