You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import re
  17. import threading
  18. from urllib.parse import urljoin
  19. import requests
  20. from huggingface_hub import snapshot_download
  21. import os
  22. from abc import ABC
  23. import numpy as np
  24. from api import settings
  25. from api.utils.file_utils import get_home_cache_dir
  26. from rag.utils import num_tokens_from_string, truncate
  27. import json
  28. def sigmoid(x):
  29. return 1 / (1 + np.exp(-x))
  30. class Base(ABC):
  31. def __init__(self, key, model_name):
  32. pass
  33. def similarity(self, query: str, texts: list):
  34. raise NotImplementedError("Please implement encode method!")
  35. class DefaultRerank(Base):
  36. _model = None
  37. _model_lock = threading.Lock()
  38. def __init__(self, key, model_name, **kwargs):
  39. """
  40. If you have trouble downloading HuggingFace models, -_^ this might help!!
  41. For Linux:
  42. export HF_ENDPOINT=https://hf-mirror.com
  43. For Windows:
  44. Good luck
  45. ^_-
  46. """
  47. if not settings.LIGHTEN and not DefaultRerank._model:
  48. import torch
  49. from FlagEmbedding import FlagReranker
  50. with DefaultRerank._model_lock:
  51. if not DefaultRerank._model:
  52. try:
  53. DefaultRerank._model = FlagReranker(
  54. os.path.join(get_home_cache_dir(), re.sub(r"^[a-zA-Z0-9]+/", "", model_name)),
  55. use_fp16=torch.cuda.is_available())
  56. except Exception:
  57. model_dir = snapshot_download(repo_id=model_name,
  58. local_dir=os.path.join(get_home_cache_dir(),
  59. re.sub(r"^[a-zA-Z0-9]+/", "", model_name)),
  60. local_dir_use_symlinks=False)
  61. DefaultRerank._model = FlagReranker(model_dir, use_fp16=torch.cuda.is_available())
  62. self._model = DefaultRerank._model
  63. def similarity(self, query: str, texts: list):
  64. pairs = [(query, truncate(t, 2048)) for t in texts]
  65. token_count = 0
  66. for _, t in pairs:
  67. token_count += num_tokens_from_string(t)
  68. batch_size = 4096
  69. res = []
  70. for i in range(0, len(pairs), batch_size):
  71. scores = self._model.compute_score(pairs[i:i + batch_size], max_length=2048)
  72. scores = sigmoid(np.array(scores)).tolist()
  73. if isinstance(scores, float):
  74. res.append(scores)
  75. else:
  76. res.extend(scores)
  77. return np.array(res), token_count
  78. class JinaRerank(Base):
  79. def __init__(self, key, model_name="jina-reranker-v2-base-multilingual",
  80. base_url="https://api.jina.ai/v1/rerank"):
  81. self.base_url = "https://api.jina.ai/v1/rerank"
  82. self.headers = {
  83. "Content-Type": "application/json",
  84. "Authorization": f"Bearer {key}"
  85. }
  86. self.model_name = model_name
  87. def similarity(self, query: str, texts: list):
  88. texts = [truncate(t, 8196) for t in texts]
  89. data = {
  90. "model": self.model_name,
  91. "query": query,
  92. "documents": texts,
  93. "top_n": len(texts)
  94. }
  95. res = requests.post(self.base_url, headers=self.headers, json=data).json()
  96. rank = np.zeros(len(texts), dtype=float)
  97. for d in res["results"]:
  98. rank[d["index"]] = d["relevance_score"]
  99. return rank, res["usage"]["total_tokens"]
  100. class YoudaoRerank(DefaultRerank):
  101. _model = None
  102. _model_lock = threading.Lock()
  103. def __init__(self, key=None, model_name="maidalun1020/bce-reranker-base_v1", **kwargs):
  104. if not settings.LIGHTEN and not YoudaoRerank._model:
  105. from BCEmbedding import RerankerModel
  106. with YoudaoRerank._model_lock:
  107. if not YoudaoRerank._model:
  108. try:
  109. YoudaoRerank._model = RerankerModel(model_name_or_path=os.path.join(
  110. get_home_cache_dir(),
  111. re.sub(r"^[a-zA-Z0-9]+/", "", model_name)))
  112. except Exception:
  113. YoudaoRerank._model = RerankerModel(
  114. model_name_or_path=model_name.replace(
  115. "maidalun1020", "InfiniFlow"))
  116. self._model = YoudaoRerank._model
  117. def similarity(self, query: str, texts: list):
  118. pairs = [(query, truncate(t, self._model.max_length)) for t in texts]
  119. token_count = 0
  120. for _, t in pairs:
  121. token_count += num_tokens_from_string(t)
  122. batch_size = 8
  123. res = []
  124. for i in range(0, len(pairs), batch_size):
  125. scores = self._model.compute_score(pairs[i:i + batch_size], max_length=self._model.max_length)
  126. scores = sigmoid(np.array(scores)).tolist()
  127. if isinstance(scores, float):
  128. res.append(scores)
  129. else:
  130. res.extend(scores)
  131. return np.array(res), token_count
  132. class XInferenceRerank(Base):
  133. def __init__(self, key="xxxxxxx", model_name="", base_url=""):
  134. if base_url.find("/v1") == -1:
  135. base_url = urljoin(base_url, "/v1/rerank")
  136. if base_url.find("/rerank") == -1:
  137. base_url = urljoin(base_url, "/v1/rerank")
  138. self.model_name = model_name
  139. self.base_url = base_url
  140. self.headers = {
  141. "Content-Type": "application/json",
  142. "accept": "application/json",
  143. "Authorization": f"Bearer {key}"
  144. }
  145. def similarity(self, query: str, texts: list):
  146. if len(texts) == 0:
  147. return np.array([]), 0
  148. data = {
  149. "model": self.model_name,
  150. "query": query,
  151. "return_documents": "true",
  152. "return_len": "true",
  153. "documents": texts
  154. }
  155. res = requests.post(self.base_url, headers=self.headers, json=data).json()
  156. rank = np.zeros(len(texts), dtype=float)
  157. for d in res["results"]:
  158. rank[d["index"]] = d["relevance_score"]
  159. return rank, res["meta"]["tokens"]["input_tokens"] + res["meta"]["tokens"]["output_tokens"]
  160. class LocalAIRerank(Base):
  161. def __init__(self, key, model_name, base_url):
  162. if base_url.find("/rerank") == -1:
  163. self.base_url = urljoin(base_url, "/rerank")
  164. else:
  165. self.base_url = base_url
  166. self.headers = {
  167. "Content-Type": "application/json",
  168. "Authorization": f"Bearer {key}"
  169. }
  170. self.model_name = model_name.split("___")[0]
  171. def similarity(self, query: str, texts: list):
  172. # noway to config Ragflow , use fix setting
  173. texts = [truncate(t, 500) for t in texts]
  174. data = {
  175. "model": self.model_name,
  176. "query": query,
  177. "documents": texts,
  178. "top_n": len(texts),
  179. }
  180. token_count = 0
  181. for t in texts:
  182. token_count += num_tokens_from_string(t)
  183. res = requests.post(self.base_url, headers=self.headers, json=data).json()
  184. rank = np.zeros(len(texts), dtype=float)
  185. if 'results' not in res:
  186. raise ValueError("response not contains results\n" + str(res))
  187. for d in res["results"]:
  188. rank[d["index"]] = d["relevance_score"]
  189. # Normalize the rank values to the range 0 to 1
  190. min_rank = np.min(rank)
  191. max_rank = np.max(rank)
  192. # Avoid division by zero if all ranks are identical
  193. if max_rank - min_rank != 0:
  194. rank = (rank - min_rank) / (max_rank - min_rank)
  195. else:
  196. rank = np.zeros_like(rank)
  197. return rank, token_count
  198. class NvidiaRerank(Base):
  199. def __init__(
  200. self, key, model_name, base_url="https://ai.api.nvidia.com/v1/retrieval/nvidia/"
  201. ):
  202. if not base_url:
  203. base_url = "https://ai.api.nvidia.com/v1/retrieval/nvidia/"
  204. self.model_name = model_name
  205. if self.model_name == "nvidia/nv-rerankqa-mistral-4b-v3":
  206. self.base_url = os.path.join(
  207. base_url, "nv-rerankqa-mistral-4b-v3", "reranking"
  208. )
  209. if self.model_name == "nvidia/rerank-qa-mistral-4b":
  210. self.base_url = os.path.join(base_url, "reranking")
  211. self.model_name = "nv-rerank-qa-mistral-4b:1"
  212. self.headers = {
  213. "accept": "application/json",
  214. "Content-Type": "application/json",
  215. "Authorization": f"Bearer {key}",
  216. }
  217. def similarity(self, query: str, texts: list):
  218. token_count = num_tokens_from_string(query) + sum(
  219. [num_tokens_from_string(t) for t in texts]
  220. )
  221. data = {
  222. "model": self.model_name,
  223. "query": {"text": query},
  224. "passages": [{"text": text} for text in texts],
  225. "truncate": "END",
  226. "top_n": len(texts),
  227. }
  228. res = requests.post(self.base_url, headers=self.headers, json=data).json()
  229. rank = np.zeros(len(texts), dtype=float)
  230. for d in res["rankings"]:
  231. rank[d["index"]] = d["logit"]
  232. return rank, token_count
  233. class LmStudioRerank(Base):
  234. def __init__(self, key, model_name, base_url):
  235. pass
  236. def similarity(self, query: str, texts: list):
  237. raise NotImplementedError("The LmStudioRerank has not been implement")
  238. class OpenAI_APIRerank(Base):
  239. def __init__(self, key, model_name, base_url):
  240. if base_url.find("/rerank") == -1:
  241. self.base_url = urljoin(base_url, "/rerank")
  242. else:
  243. self.base_url = base_url
  244. self.headers = {
  245. "Content-Type": "application/json",
  246. "Authorization": f"Bearer {key}"
  247. }
  248. self.model_name = model_name.split("___")[0]
  249. def similarity(self, query: str, texts: list):
  250. # noway to config Ragflow , use fix setting
  251. texts = [truncate(t, 500) for t in texts]
  252. data = {
  253. "model": self.model_name,
  254. "query": query,
  255. "documents": texts,
  256. "top_n": len(texts),
  257. }
  258. token_count = 0
  259. for t in texts:
  260. token_count += num_tokens_from_string(t)
  261. res = requests.post(self.base_url, headers=self.headers, json=data).json()
  262. rank = np.zeros(len(texts), dtype=float)
  263. if 'results' not in res:
  264. raise ValueError("response not contains results\n" + str(res))
  265. for d in res["results"]:
  266. rank[d["index"]] = d["relevance_score"]
  267. # Normalize the rank values to the range 0 to 1
  268. min_rank = np.min(rank)
  269. max_rank = np.max(rank)
  270. # Avoid division by zero if all ranks are identical
  271. if max_rank - min_rank != 0:
  272. rank = (rank - min_rank) / (max_rank - min_rank)
  273. else:
  274. rank = np.zeros_like(rank)
  275. return rank, token_count
  276. class CoHereRerank(Base):
  277. def __init__(self, key, model_name, base_url=None):
  278. from cohere import Client
  279. self.client = Client(api_key=key)
  280. self.model_name = model_name
  281. def similarity(self, query: str, texts: list):
  282. token_count = num_tokens_from_string(query) + sum(
  283. [num_tokens_from_string(t) for t in texts]
  284. )
  285. res = self.client.rerank(
  286. model=self.model_name,
  287. query=query,
  288. documents=texts,
  289. top_n=len(texts),
  290. return_documents=False,
  291. )
  292. rank = np.zeros(len(texts), dtype=float)
  293. for d in res.results:
  294. rank[d.index] = d.relevance_score
  295. return rank, token_count
  296. class TogetherAIRerank(Base):
  297. def __init__(self, key, model_name, base_url):
  298. pass
  299. def similarity(self, query: str, texts: list):
  300. raise NotImplementedError("The api has not been implement")
  301. class SILICONFLOWRerank(Base):
  302. def __init__(
  303. self, key, model_name, base_url="https://api.siliconflow.cn/v1/rerank"
  304. ):
  305. if not base_url:
  306. base_url = "https://api.siliconflow.cn/v1/rerank"
  307. self.model_name = model_name
  308. self.base_url = base_url
  309. self.headers = {
  310. "accept": "application/json",
  311. "content-type": "application/json",
  312. "authorization": f"Bearer {key}",
  313. }
  314. def similarity(self, query: str, texts: list):
  315. payload = {
  316. "model": self.model_name,
  317. "query": query,
  318. "documents": texts,
  319. "top_n": len(texts),
  320. "return_documents": False,
  321. "max_chunks_per_doc": 1024,
  322. "overlap_tokens": 80,
  323. }
  324. response = requests.post(
  325. self.base_url, json=payload, headers=self.headers
  326. ).json()
  327. rank = np.zeros(len(texts), dtype=float)
  328. if "results" not in response:
  329. return rank, 0
  330. for d in response["results"]:
  331. rank[d["index"]] = d["relevance_score"]
  332. return (
  333. rank,
  334. response["meta"]["tokens"]["input_tokens"] + response["meta"]["tokens"]["output_tokens"],
  335. )
  336. class BaiduYiyanRerank(Base):
  337. def __init__(self, key, model_name, base_url=None):
  338. from qianfan.resources import Reranker
  339. key = json.loads(key)
  340. ak = key.get("yiyan_ak", "")
  341. sk = key.get("yiyan_sk", "")
  342. self.client = Reranker(ak=ak, sk=sk)
  343. self.model_name = model_name
  344. def similarity(self, query: str, texts: list):
  345. res = self.client.do(
  346. model=self.model_name,
  347. query=query,
  348. documents=texts,
  349. top_n=len(texts),
  350. ).body
  351. rank = np.zeros(len(texts), dtype=float)
  352. for d in res["results"]:
  353. rank[d["index"]] = d["relevance_score"]
  354. return rank, res["usage"]["total_tokens"]
  355. class VoyageRerank(Base):
  356. def __init__(self, key, model_name, base_url=None):
  357. import voyageai
  358. self.client = voyageai.Client(api_key=key)
  359. self.model_name = model_name
  360. def similarity(self, query: str, texts: list):
  361. rank = np.zeros(len(texts), dtype=float)
  362. if not texts:
  363. return rank, 0
  364. res = self.client.rerank(
  365. query=query, documents=texts, model=self.model_name, top_k=len(texts)
  366. )
  367. for r in res.results:
  368. rank[r.index] = r.relevance_score
  369. return rank, res.total_tokens
  370. class QWenRerank(Base):
  371. def __init__(self, key, model_name='gte-rerank', base_url=None, **kwargs):
  372. import dashscope
  373. self.api_key = key
  374. self.model_name = dashscope.TextReRank.Models.gte_rerank if model_name is None else model_name
  375. def similarity(self, query: str, texts: list):
  376. import dashscope
  377. from http import HTTPStatus
  378. resp = dashscope.TextReRank.call(
  379. api_key=self.api_key,
  380. model=self.model_name,
  381. query=query,
  382. documents=texts,
  383. top_n=len(texts),
  384. return_documents=False
  385. )
  386. rank = np.zeros(len(texts), dtype=float)
  387. if resp.status_code == HTTPStatus.OK:
  388. for r in resp.output.results:
  389. rank[r.index] = r.relevance_score
  390. return rank, resp.usage.total_tokens
  391. else:
  392. raise ValueError(f"Error calling QWenRerank model {self.model_name}: {resp.status_code} - {resp.text}")