|
|
|
|
|
|
|
|
from rag.utils import num_tokens_from_string, truncate |
|
|
from rag.utils import num_tokens_from_string, truncate |
|
|
import json |
|
|
import json |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def sigmoid(x): |
|
|
def sigmoid(x): |
|
|
return 1 / (1 + np.exp(-x)) |
|
|
return 1 / (1 + np.exp(-x)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Base(ABC): |
|
|
class Base(ABC): |
|
|
def __init__(self, key, model_name): |
|
|
def __init__(self, key, model_name): |
|
|
pass |
|
|
pass |
|
|
|
|
|
|
|
|
with DefaultRerank._model_lock: |
|
|
with DefaultRerank._model_lock: |
|
|
if not DefaultRerank._model: |
|
|
if not DefaultRerank._model: |
|
|
try: |
|
|
try: |
|
|
DefaultRerank._model = FlagReranker(os.path.join(get_home_cache_dir(), re.sub(r"^[a-zA-Z]+/", "", model_name)), use_fp16=torch.cuda.is_available()) |
|
|
|
|
|
|
|
|
DefaultRerank._model = FlagReranker( |
|
|
|
|
|
os.path.join(get_home_cache_dir(), re.sub(r"^[a-zA-Z]+/", "", model_name)), |
|
|
|
|
|
use_fp16=torch.cuda.is_available()) |
|
|
except Exception as e: |
|
|
except Exception as e: |
|
|
model_dir = snapshot_download(repo_id= model_name, |
|
|
|
|
|
local_dir=os.path.join(get_home_cache_dir(), re.sub(r"^[a-zA-Z]+/", "", model_name)), |
|
|
|
|
|
|
|
|
model_dir = snapshot_download(repo_id=model_name, |
|
|
|
|
|
local_dir=os.path.join(get_home_cache_dir(), |
|
|
|
|
|
re.sub(r"^[a-zA-Z]+/", "", model_name)), |
|
|
local_dir_use_symlinks=False) |
|
|
local_dir_use_symlinks=False) |
|
|
DefaultRerank._model = FlagReranker(model_dir, use_fp16=torch.cuda.is_available()) |
|
|
DefaultRerank._model = FlagReranker(model_dir, use_fp16=torch.cuda.is_available()) |
|
|
self._model = DefaultRerank._model |
|
|
self._model = DefaultRerank._model |
|
|
|
|
|
|
|
|
def similarity(self, query: str, texts: list): |
|
|
def similarity(self, query: str, texts: list): |
|
|
pairs = [(query,truncate(t, 2048)) for t in texts] |
|
|
|
|
|
|
|
|
pairs = [(query, truncate(t, 2048)) for t in texts] |
|
|
token_count = 0 |
|
|
token_count = 0 |
|
|
for _, t in pairs: |
|
|
for _, t in pairs: |
|
|
token_count += num_tokens_from_string(t) |
|
|
token_count += num_tokens_from_string(t) |
|
|
|
|
|
|
|
|
for i in range(0, len(pairs), batch_size): |
|
|
for i in range(0, len(pairs), batch_size): |
|
|
scores = self._model.compute_score(pairs[i:i + batch_size], max_length=2048) |
|
|
scores = self._model.compute_score(pairs[i:i + batch_size], max_length=2048) |
|
|
scores = sigmoid(np.array(scores)).tolist() |
|
|
scores = sigmoid(np.array(scores)).tolist() |
|
|
if isinstance(scores, float): res.append(scores) |
|
|
|
|
|
else: res.extend(scores) |
|
|
|
|
|
|
|
|
if isinstance(scores, float): |
|
|
|
|
|
res.append(scores) |
|
|
|
|
|
else: |
|
|
|
|
|
res.extend(scores) |
|
|
return np.array(res), token_count |
|
|
return np.array(res), token_count |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"top_n": len(texts) |
|
|
"top_n": len(texts) |
|
|
} |
|
|
} |
|
|
res = requests.post(self.base_url, headers=self.headers, json=data).json() |
|
|
res = requests.post(self.base_url, headers=self.headers, json=data).json() |
|
|
return np.array([d["relevance_score"] for d in res["results"]]), res["usage"]["total_tokens"] |
|
|
|
|
|
|
|
|
rank = np.zeros(len(texts), dtype=float) |
|
|
|
|
|
for d in res["results"]: |
|
|
|
|
|
rank[d["index"]] = d["relevance_score"] |
|
|
|
|
|
return rank, res["usage"]["total_tokens"] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class YoudaoRerank(DefaultRerank): |
|
|
class YoudaoRerank(DefaultRerank): |
|
|
|
|
|
|
|
|
"maidalun1020", "InfiniFlow")) |
|
|
"maidalun1020", "InfiniFlow")) |
|
|
|
|
|
|
|
|
self._model = YoudaoRerank._model |
|
|
self._model = YoudaoRerank._model |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def similarity(self, query: str, texts: list): |
|
|
def similarity(self, query: str, texts: list): |
|
|
pairs = [(query, truncate(t, self._model.max_length)) for t in texts] |
|
|
pairs = [(query, truncate(t, self._model.max_length)) for t in texts] |
|
|
token_count = 0 |
|
|
token_count = 0 |
|
|
|
|
|
|
|
|
for i in range(0, len(pairs), batch_size): |
|
|
for i in range(0, len(pairs), batch_size): |
|
|
scores = self._model.compute_score(pairs[i:i + batch_size], max_length=self._model.max_length) |
|
|
scores = self._model.compute_score(pairs[i:i + batch_size], max_length=self._model.max_length) |
|
|
scores = sigmoid(np.array(scores)).tolist() |
|
|
scores = sigmoid(np.array(scores)).tolist() |
|
|
if isinstance(scores, float): res.append(scores) |
|
|
|
|
|
else: res.extend(scores) |
|
|
|
|
|
|
|
|
if isinstance(scores, float): |
|
|
|
|
|
res.append(scores) |
|
|
|
|
|
else: |
|
|
|
|
|
res.extend(scores) |
|
|
return np.array(res), token_count |
|
|
return np.array(res), token_count |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"documents": texts |
|
|
"documents": texts |
|
|
} |
|
|
} |
|
|
res = requests.post(self.base_url, headers=self.headers, json=data).json() |
|
|
res = requests.post(self.base_url, headers=self.headers, json=data).json() |
|
|
return np.array([d["relevance_score"] for d in res["results"]]), res["meta"]["tokens"]["input_tokens"]+res["meta"]["tokens"]["output_tokens"] |
|
|
|
|
|
|
|
|
rank = np.zeros(len(texts), dtype=float) |
|
|
|
|
|
for d in res["results"]: |
|
|
|
|
|
rank[d["index"]] = d["relevance_score"] |
|
|
|
|
|
return rank, res["meta"]["tokens"]["input_tokens"] + res["meta"]["tokens"]["output_tokens"] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class LocalAIRerank(Base): |
|
|
class LocalAIRerank(Base): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class NvidiaRerank(Base): |
|
|
class NvidiaRerank(Base): |
|
|
def __init__( |
|
|
def __init__( |
|
|
self, key, model_name, base_url="https://ai.api.nvidia.com/v1/retrieval/nvidia/" |
|
|
|
|
|
|
|
|
self, key, model_name, base_url="https://ai.api.nvidia.com/v1/retrieval/nvidia/" |
|
|
): |
|
|
): |
|
|
if not base_url: |
|
|
if not base_url: |
|
|
base_url = "https://ai.api.nvidia.com/v1/retrieval/nvidia/" |
|
|
base_url = "https://ai.api.nvidia.com/v1/retrieval/nvidia/" |
|
|
|
|
|
|
|
|
"top_n": len(texts), |
|
|
"top_n": len(texts), |
|
|
} |
|
|
} |
|
|
res = requests.post(self.base_url, headers=self.headers, json=data).json() |
|
|
res = requests.post(self.base_url, headers=self.headers, json=data).json() |
|
|
rank = np.array([d["logit"] for d in res["rankings"]]) |
|
|
|
|
|
indexs = [d["index"] for d in res["rankings"]] |
|
|
|
|
|
return rank[indexs], token_count |
|
|
|
|
|
|
|
|
rank = np.zeros(len(texts), dtype=float) |
|
|
|
|
|
for d in res["rankings"]: |
|
|
|
|
|
rank[d["index"]] = d["logit"] |
|
|
|
|
|
return rank, token_count |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class LmStudioRerank(Base): |
|
|
class LmStudioRerank(Base): |
|
|
|
|
|
|
|
|
top_n=len(texts), |
|
|
top_n=len(texts), |
|
|
return_documents=False, |
|
|
return_documents=False, |
|
|
) |
|
|
) |
|
|
rank = np.array([d.relevance_score for d in res.results]) |
|
|
|
|
|
indexs = [d.index for d in res.results] |
|
|
|
|
|
return rank[indexs], token_count |
|
|
|
|
|
|
|
|
rank = np.zeros(len(texts), dtype=float) |
|
|
|
|
|
for d in res.results: |
|
|
|
|
|
rank[d.index] = d.relevance_score |
|
|
|
|
|
return rank, token_count |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TogetherAIRerank(Base): |
|
|
class TogetherAIRerank(Base): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SILICONFLOWRerank(Base): |
|
|
class SILICONFLOWRerank(Base): |
|
|
def __init__( |
|
|
def __init__( |
|
|
self, key, model_name, base_url="https://api.siliconflow.cn/v1/rerank" |
|
|
|
|
|
|
|
|
self, key, model_name, base_url="https://api.siliconflow.cn/v1/rerank" |
|
|
): |
|
|
): |
|
|
if not base_url: |
|
|
if not base_url: |
|
|
base_url = "https://api.siliconflow.cn/v1/rerank" |
|
|
base_url = "https://api.siliconflow.cn/v1/rerank" |
|
|
|
|
|
|
|
|
response = requests.post( |
|
|
response = requests.post( |
|
|
self.base_url, json=payload, headers=self.headers |
|
|
self.base_url, json=payload, headers=self.headers |
|
|
).json() |
|
|
).json() |
|
|
rank = np.array([d["relevance_score"] for d in response["results"]]) |
|
|
|
|
|
indexs = [d["index"] for d in response["results"]] |
|
|
|
|
|
|
|
|
rank = np.zeros(len(texts), dtype=float) |
|
|
|
|
|
for d in response["results"]: |
|
|
|
|
|
rank[d["index"]] = d["relevance_score"] |
|
|
return ( |
|
|
return ( |
|
|
rank[indexs], |
|
|
|
|
|
|
|
|
rank, |
|
|
response["meta"]["tokens"]["input_tokens"] + response["meta"]["tokens"]["output_tokens"], |
|
|
response["meta"]["tokens"]["input_tokens"] + response["meta"]["tokens"]["output_tokens"], |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
documents=texts, |
|
|
documents=texts, |
|
|
top_n=len(texts), |
|
|
top_n=len(texts), |
|
|
).body |
|
|
).body |
|
|
rank = np.array([d["relevance_score"] for d in res["results"]]) |
|
|
|
|
|
indexs = [d["index"] for d in res["results"]] |
|
|
|
|
|
return rank[indexs], res["usage"]["total_tokens"] |
|
|
|
|
|
|
|
|
rank = np.zeros(len(texts), dtype=float) |
|
|
|
|
|
for d in res["results"]: |
|
|
|
|
|
rank[d["index"]] = d["relevance_score"] |
|
|
|
|
|
return rank, res["usage"]["total_tokens"] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class VoyageRerank(Base): |
|
|
class VoyageRerank(Base): |
|
|
|
|
|
|
|
|
res = self.client.rerank( |
|
|
res = self.client.rerank( |
|
|
query=query, documents=texts, model=self.model_name, top_k=len(texts) |
|
|
query=query, documents=texts, model=self.model_name, top_k=len(texts) |
|
|
) |
|
|
) |
|
|
rank = np.array([r.relevance_score for r in res.results]) |
|
|
|
|
|
indexs = [r.index for r in res.results] |
|
|
|
|
|
return rank[indexs], res.total_tokens |
|
|
|
|
|
|
|
|
rank = np.zeros(len(texts), dtype=float) |
|
|
|
|
|
for r in res.results: |
|
|
|
|
|
rank[r.index] = r.relevance_score |
|
|
|
|
|
return rank, res.total_tokens |