浏览代码

Fix: url path join issue. (#8013)

### What problem does this PR solve?

Close #7980

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
tags/v0.19.1
Kevin Hu 5 个月前
父节点
当前提交
156290f8d0
没有帐户链接到提交者的电子邮件
共有 4 个文件被更改,包括 25 次插入41 次删除
  1. 8
    14
      rag/llm/chat_model.py
  2. 8
    14
      rag/llm/cv_model.py
  3. 7
    10
      rag/llm/embedding_model.py
  4. 2
    3
      rag/llm/rerank_model.py

+ 8
- 14
rag/llm/chat_model.py 查看文件

@@ -22,6 +22,7 @@ import re
import time
from abc import ABC
from typing import Any, Protocol
from urllib.parse import urljoin

import openai
import requests
@@ -445,8 +446,7 @@ class XinferenceChat(Base):
def __init__(self, key=None, model_name="", base_url=""):
if not base_url:
raise ValueError("Local llm url cannot be None")
if base_url.split("/")[-1] != "v1":
base_url = os.path.join(base_url, "v1")
base_url = urljoin(base_url, "v1")
super().__init__(key, model_name, base_url)


@@ -454,8 +454,7 @@ class HuggingFaceChat(Base):
def __init__(self, key=None, model_name="", base_url=""):
if not base_url:
raise ValueError("Local llm url cannot be None")
if base_url.split("/")[-1] != "v1":
base_url = os.path.join(base_url, "v1")
base_url = urljoin(base_url, "v1")
super().__init__(key, model_name.split("___")[0], base_url)


@@ -463,9 +462,7 @@ class ModelScopeChat(Base):
def __init__(self, key=None, model_name="", base_url=""):
if not base_url:
raise ValueError("Local llm url cannot be None")
base_url = base_url.rstrip("/")
if base_url.split("/")[-1] != "v1":
base_url = os.path.join(base_url, "v1")
base_url = urljoin(base_url, "v1")
super().__init__(key, model_name.split("___")[0], base_url)


@@ -983,8 +980,7 @@ class LocalAIChat(Base):

if not base_url:
raise ValueError("Local llm url cannot be None")
if base_url.split("/")[-1] != "v1":
base_url = os.path.join(base_url, "v1")
base_url = urljoin(base_url, "v1")
self.client = OpenAI(api_key="empty", base_url=base_url)
self.model_name = model_name.split("___")[0]

@@ -1442,8 +1438,7 @@ class LmStudioChat(Base):
def __init__(self, key, model_name, base_url):
if not base_url:
raise ValueError("Local llm url cannot be None")
if base_url.split("/")[-1] != "v1":
base_url = os.path.join(base_url, "v1")
base_url = urljoin(base_url, "v1")
super().__init__(key, model_name, base_url)
self.client = OpenAI(api_key="lm-studio", base_url=base_url)
self.model_name = model_name
@@ -1542,7 +1537,7 @@ class CoHereChat(Base):
class LeptonAIChat(Base):
def __init__(self, key, model_name, base_url=None):
if not base_url:
base_url = os.path.join("https://" + model_name + ".lepton.run", "api", "v1")
base_url = urljoin("https://" + model_name + ".lepton.run", "api/v1")
super().__init__(key, model_name, base_url)


@@ -2016,6 +2011,5 @@ class GPUStackChat(Base):
def __init__(self, key=None, model_name="", base_url=""):
if not base_url:
raise ValueError("Local llm url cannot be None")
if base_url.split("/")[-1] != "v1":
base_url = os.path.join(base_url, "v1")
base_url = urljoin(base_url, "v1")
super().__init__(key, model_name, base_url)

+ 8
- 14
rag/llm/cv_model.py 查看文件

@@ -19,6 +19,7 @@ import json
import os
from abc import ABC
from io import BytesIO
from urllib.parse import urljoin

import requests
from ollama import Client
@@ -546,8 +547,7 @@ class LocalAICV(GptV4):
def __init__(self, key, model_name, base_url, lang="Chinese"):
if not base_url:
raise ValueError("Local cv model url cannot be None")
if base_url.split("/")[-1] != "v1":
base_url = os.path.join(base_url, "v1")
base_url = urljoin(base_url, "v1")
self.client = OpenAI(api_key="empty", base_url=base_url)
self.model_name = model_name.split("___")[0]
self.lang = lang
@@ -555,8 +555,7 @@ class LocalAICV(GptV4):

class XinferenceCV(Base):
def __init__(self, key, model_name="", lang="Chinese", base_url=""):
if base_url.split("/")[-1] != "v1":
base_url = os.path.join(base_url, "v1")
base_url = urljoin(base_url, "v1")
self.client = OpenAI(api_key=key, base_url=base_url)
self.model_name = model_name
self.lang = lang
@@ -706,11 +705,9 @@ class NvidiaCV(Base):
self.lang = lang
factory, llm_name = model_name.split("/")
if factory != "liuhaotian":
self.base_url = os.path.join(base_url, factory, llm_name)
self.base_url = urljoin(base_url, f"{factory}/{llm_name}")
else:
self.base_url = os.path.join(
base_url, "community", llm_name.replace("-v1.6", "16")
)
self.base_url = urljoin(f"{base_url}/community", llm_name.replace("-v1.6", "16"))
self.key = key

def describe(self, image):
@@ -799,8 +796,7 @@ class LmStudioCV(GptV4):
def __init__(self, key, model_name, lang="Chinese", base_url=""):
if not base_url:
raise ValueError("Local llm url cannot be None")
if base_url.split("/")[-1] != "v1":
base_url = os.path.join(base_url, "v1")
base_url = urljoin(base_url, "v1")
self.client = OpenAI(api_key="lm-studio", base_url=base_url)
self.model_name = model_name
self.lang = lang
@@ -810,8 +806,7 @@ class OpenAI_APICV(GptV4):
def __init__(self, key, model_name, lang="Chinese", base_url=""):
if not base_url:
raise ValueError("url cannot be None")
if base_url.split("/")[-1] != "v1":
base_url = os.path.join(base_url, "v1")
base_url = urljoin(base_url, "v1")
self.client = OpenAI(api_key=key, base_url=base_url)
self.model_name = model_name.split("___")[0]
self.lang = lang
@@ -1032,8 +1027,7 @@ class GPUStackCV(GptV4):
def __init__(self, key, model_name, lang="Chinese", base_url=""):
if not base_url:
raise ValueError("Local llm url cannot be None")
if base_url.split("/")[-1] != "v1":
base_url = os.path.join(base_url, "v1")
base_url = urljoin(base_url, "v1")
self.client = OpenAI(api_key=key, base_url=base_url)
self.model_name = model_name
self.lang = lang

+ 7
- 10
rag/llm/embedding_model.py 查看文件

@@ -16,6 +16,8 @@
import logging
import re
import threading
from urllib.parse import urljoin

import requests
from huggingface_hub import snapshot_download
from zhipuai import ZhipuAI
@@ -141,8 +143,7 @@ class LocalAIEmbed(Base):
def __init__(self, key, model_name, base_url):
if not base_url:
raise ValueError("Local embedding model url cannot be None")
if base_url.split("/")[-1] != "v1":
base_url = os.path.join(base_url, "v1")
base_url = urljoin(base_url, "v1")
self.client = OpenAI(api_key="empty", base_url=base_url)
self.model_name = model_name.split("___")[0]

@@ -322,8 +323,7 @@ class FastEmbed(DefaultEmbedding):

class XinferenceEmbed(Base):
def __init__(self, key, model_name="", base_url=""):
if base_url.split("/")[-1] != "v1":
base_url = os.path.join(base_url, "v1")
base_url = urljoin(base_url, "v1")
self.client = OpenAI(api_key=key, base_url=base_url)
self.model_name = model_name

@@ -598,8 +598,7 @@ class LmStudioEmbed(LocalAIEmbed):
def __init__(self, key, model_name, base_url):
if not base_url:
raise ValueError("Local llm url cannot be None")
if base_url.split("/")[-1] != "v1":
base_url = os.path.join(base_url, "v1")
base_url = urljoin(base_url, "v1")
self.client = OpenAI(api_key="lm-studio", base_url=base_url)
self.model_name = model_name

@@ -608,8 +607,7 @@ class OpenAI_APIEmbed(OpenAIEmbed):
def __init__(self, key, model_name, base_url):
if not base_url:
raise ValueError("url cannot be None")
if base_url.split("/")[-1] != "v1":
base_url = os.path.join(base_url, "v1")
base_url = urljoin(base_url, "v1")
self.client = OpenAI(api_key=key, base_url=base_url)
self.model_name = model_name.split("___")[0]

@@ -833,8 +831,7 @@ class GPUStackEmbed(OpenAIEmbed):
def __init__(self, key, model_name, base_url):
if not base_url:
raise ValueError("url cannot be None")
if base_url.split("/")[-1] != "v1":
base_url = os.path.join(base_url, "v1")
base_url = urljoin(base_url, "v1")

self.client = OpenAI(api_key=key, base_url=base_url)
self.model_name = model_name

+ 2
- 3
rag/llm/rerank_model.py 查看文件

@@ -296,12 +296,11 @@ class NvidiaRerank(Base):
self.model_name = model_name

if self.model_name == "nvidia/nv-rerankqa-mistral-4b-v3":
self.base_url = os.path.join(
base_url, "nv-rerankqa-mistral-4b-v3", "reranking"
self.base_url = urljoin(base_url, "nv-rerankqa-mistral-4b-v3/reranking"
)

if self.model_name == "nvidia/rerank-qa-mistral-4b":
self.base_url = os.path.join(base_url, "reranking")
self.base_url = urljoin(base_url, "reranking")
self.model_name = "nv-rerank-qa-mistral-4b:1"

self.headers = {

正在加载...
取消
保存