You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

rerank_model.py 16KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import re
  17. import threading
  18. from urllib.parse import urljoin
  19. import requests
  20. from huggingface_hub import snapshot_download
  21. import os
  22. from abc import ABC
  23. import numpy as np
  24. from api import settings
  25. from api.utils.file_utils import get_home_cache_dir
  26. from rag.utils import num_tokens_from_string, truncate
  27. import json
  28. def sigmoid(x):
  29. return 1 / (1 + np.exp(-x))
  30. class Base(ABC):
  31. def __init__(self, key, model_name):
  32. pass
  33. def similarity(self, query: str, texts: list):
  34. raise NotImplementedError("Please implement encode method!")
  35. class DefaultRerank(Base):
  36. _model = None
  37. _model_lock = threading.Lock()
  38. def __init__(self, key, model_name, **kwargs):
  39. """
  40. If you have trouble downloading HuggingFace models, -_^ this might help!!
  41. For Linux:
  42. export HF_ENDPOINT=https://hf-mirror.com
  43. For Windows:
  44. Good luck
  45. ^_-
  46. """
  47. if not settings.LIGHTEN and not DefaultRerank._model:
  48. import torch
  49. from FlagEmbedding import FlagReranker
  50. with DefaultRerank._model_lock:
  51. if not DefaultRerank._model:
  52. try:
  53. DefaultRerank._model = FlagReranker(
  54. os.path.join(get_home_cache_dir(), re.sub(r"^[a-zA-Z0-9]+/", "", model_name)),
  55. use_fp16=torch.cuda.is_available())
  56. except Exception:
  57. model_dir = snapshot_download(repo_id=model_name,
  58. local_dir=os.path.join(get_home_cache_dir(),
  59. re.sub(r"^[a-zA-Z0-9]+/", "", model_name)),
  60. local_dir_use_symlinks=False)
  61. DefaultRerank._model = FlagReranker(model_dir, use_fp16=torch.cuda.is_available())
  62. self._model = DefaultRerank._model
  63. def similarity(self, query: str, texts: list):
  64. pairs = [(query, truncate(t, 2048)) for t in texts]
  65. token_count = 0
  66. for _, t in pairs:
  67. token_count += num_tokens_from_string(t)
  68. batch_size = 4096
  69. res = []
  70. for i in range(0, len(pairs), batch_size):
  71. scores = self._model.compute_score(pairs[i:i + batch_size], max_length=2048)
  72. scores = sigmoid(np.array(scores)).tolist()
  73. if isinstance(scores, float):
  74. res.append(scores)
  75. else:
  76. res.extend(scores)
  77. return np.array(res), token_count
  78. class JinaRerank(Base):
  79. def __init__(self, key, model_name="jina-reranker-v1-base-en",
  80. base_url="https://api.jina.ai/v1/rerank"):
  81. self.base_url = "https://api.jina.ai/v1/rerank"
  82. self.headers = {
  83. "Content-Type": "application/json",
  84. "Authorization": f"Bearer {key}"
  85. }
  86. self.model_name = model_name
  87. def similarity(self, query: str, texts: list):
  88. texts = [truncate(t, 8196) for t in texts]
  89. data = {
  90. "model": self.model_name,
  91. "query": query,
  92. "documents": texts,
  93. "top_n": len(texts)
  94. }
  95. res = requests.post(self.base_url, headers=self.headers, json=data).json()
  96. rank = np.zeros(len(texts), dtype=float)
  97. for d in res["results"]:
  98. rank[d["index"]] = d["relevance_score"]
  99. return rank, res["usage"]["total_tokens"]
  100. class YoudaoRerank(DefaultRerank):
  101. _model = None
  102. _model_lock = threading.Lock()
  103. def __init__(self, key=None, model_name="maidalun1020/bce-reranker-base_v1", **kwargs):
  104. if not settings.LIGHTEN and not YoudaoRerank._model:
  105. from BCEmbedding import RerankerModel
  106. with YoudaoRerank._model_lock:
  107. if not YoudaoRerank._model:
  108. try:
  109. logging.info("LOADING BCE...")
  110. YoudaoRerank._model = RerankerModel(model_name_or_path=os.path.join(
  111. get_home_cache_dir(),
  112. re.sub(r"^[a-zA-Z0-9]+/", "", model_name)))
  113. except Exception:
  114. YoudaoRerank._model = RerankerModel(
  115. model_name_or_path=model_name.replace(
  116. "maidalun1020", "InfiniFlow"))
  117. self._model = YoudaoRerank._model
  118. def similarity(self, query: str, texts: list):
  119. pairs = [(query, truncate(t, self._model.max_length)) for t in texts]
  120. token_count = 0
  121. for _, t in pairs:
  122. token_count += num_tokens_from_string(t)
  123. batch_size = 8
  124. res = []
  125. for i in range(0, len(pairs), batch_size):
  126. scores = self._model.compute_score(pairs[i:i + batch_size], max_length=self._model.max_length)
  127. scores = sigmoid(np.array(scores)).tolist()
  128. if isinstance(scores, float):
  129. res.append(scores)
  130. else:
  131. res.extend(scores)
  132. return np.array(res), token_count
  133. class XInferenceRerank(Base):
  134. def __init__(self, key="xxxxxxx", model_name="", base_url=""):
  135. if base_url.find("/v1") == -1:
  136. base_url = urljoin(base_url, "/v1/rerank")
  137. self.model_name = model_name
  138. self.base_url = base_url
  139. self.headers = {
  140. "Content-Type": "application/json",
  141. "accept": "application/json",
  142. "Authorization": f"Bearer {key}"
  143. }
  144. def similarity(self, query: str, texts: list):
  145. if len(texts) == 0:
  146. return np.array([]), 0
  147. data = {
  148. "model": self.model_name,
  149. "query": query,
  150. "return_documents": "true",
  151. "return_len": "true",
  152. "documents": texts
  153. }
  154. res = requests.post(self.base_url, headers=self.headers, json=data).json()
  155. rank = np.zeros(len(texts), dtype=float)
  156. for d in res["results"]:
  157. rank[d["index"]] = d["relevance_score"]
  158. return rank, res["meta"]["tokens"]["input_tokens"] + res["meta"]["tokens"]["output_tokens"]
  159. class LocalAIRerank(Base):
  160. def __init__(self, key, model_name, base_url):
  161. if base_url.find("/rerank") == -1:
  162. self.base_url = urljoin(base_url, "/rerank")
  163. else:
  164. self.base_url = base_url
  165. self.headers = {
  166. "Content-Type": "application/json",
  167. "Authorization": f"Bearer {key}"
  168. }
  169. self.model_name = model_name.replace("___LocalAI","")
  170. def similarity(self, query: str, texts: list):
  171. # noway to config Ragflow , use fix setting
  172. texts = [truncate(t, 500) for t in texts]
  173. data = {
  174. "model": self.model_name,
  175. "query": query,
  176. "documents": texts,
  177. "top_n": len(texts),
  178. }
  179. token_count = 0
  180. for t in texts:
  181. token_count += num_tokens_from_string(t)
  182. res = requests.post(self.base_url, headers=self.headers, json=data).json()
  183. rank = np.zeros(len(texts), dtype=float)
  184. if 'results' not in res:
  185. raise ValueError("response not contains results\n" + str(res))
  186. for d in res["results"]:
  187. rank[d["index"]] = d["relevance_score"]
  188. # Normalize the rank values to the range 0 to 1
  189. min_rank = np.min(rank)
  190. max_rank = np.max(rank)
  191. # Avoid division by zero if all ranks are identical
  192. if max_rank - min_rank != 0:
  193. rank = (rank - min_rank) / (max_rank - min_rank)
  194. else:
  195. rank = np.zeros_like(rank)
  196. return rank, token_count
  197. class NvidiaRerank(Base):
  198. def __init__(
  199. self, key, model_name, base_url="https://ai.api.nvidia.com/v1/retrieval/nvidia/"
  200. ):
  201. if not base_url:
  202. base_url = "https://ai.api.nvidia.com/v1/retrieval/nvidia/"
  203. self.model_name = model_name
  204. if self.model_name == "nvidia/nv-rerankqa-mistral-4b-v3":
  205. self.base_url = os.path.join(
  206. base_url, "nv-rerankqa-mistral-4b-v3", "reranking"
  207. )
  208. if self.model_name == "nvidia/rerank-qa-mistral-4b":
  209. self.base_url = os.path.join(base_url, "reranking")
  210. self.model_name = "nv-rerank-qa-mistral-4b:1"
  211. self.headers = {
  212. "accept": "application/json",
  213. "Content-Type": "application/json",
  214. "Authorization": f"Bearer {key}",
  215. }
  216. def similarity(self, query: str, texts: list):
  217. token_count = num_tokens_from_string(query) + sum(
  218. [num_tokens_from_string(t) for t in texts]
  219. )
  220. data = {
  221. "model": self.model_name,
  222. "query": {"text": query},
  223. "passages": [{"text": text} for text in texts],
  224. "truncate": "END",
  225. "top_n": len(texts),
  226. }
  227. res = requests.post(self.base_url, headers=self.headers, json=data).json()
  228. rank = np.zeros(len(texts), dtype=float)
  229. for d in res["rankings"]:
  230. rank[d["index"]] = d["logit"]
  231. return rank, token_count
  232. class LmStudioRerank(Base):
  233. def __init__(self, key, model_name, base_url):
  234. pass
  235. def similarity(self, query: str, texts: list):
  236. raise NotImplementedError("The LmStudioRerank has not been implement")
  237. class OpenAI_APIRerank(Base):
  238. def __init__(self, key, model_name, base_url):
  239. if base_url.find("/rerank") == -1:
  240. self.base_url = urljoin(base_url, "/rerank")
  241. else:
  242. self.base_url = base_url
  243. self.headers = {
  244. "Content-Type": "application/json",
  245. "Authorization": f"Bearer {key}"
  246. }
  247. self.model_name = model_name
  248. def similarity(self, query: str, texts: list):
  249. # noway to config Ragflow , use fix setting
  250. texts = [truncate(t, 500) for t in texts]
  251. data = {
  252. "model": self.model_name,
  253. "query": query,
  254. "documents": texts,
  255. "top_n": len(texts),
  256. }
  257. token_count = 0
  258. for t in texts:
  259. token_count += num_tokens_from_string(t)
  260. res = requests.post(self.base_url, headers=self.headers, json=data).json()
  261. rank = np.zeros(len(texts), dtype=float)
  262. if 'results' not in res:
  263. raise ValueError("response not contains results\n" + str(res))
  264. for d in res["results"]:
  265. rank[d["index"]] = d["relevance_score"]
  266. # Normalize the rank values to the range 0 to 1
  267. min_rank = np.min(rank)
  268. max_rank = np.max(rank)
  269. # Avoid division by zero if all ranks are identical
  270. if max_rank - min_rank != 0:
  271. rank = (rank - min_rank) / (max_rank - min_rank)
  272. else:
  273. rank = np.zeros_like(rank)
  274. return rank, token_count
  275. class CoHereRerank(Base):
  276. def __init__(self, key, model_name, base_url=None):
  277. from cohere import Client
  278. self.client = Client(api_key=key)
  279. self.model_name = model_name
  280. def similarity(self, query: str, texts: list):
  281. token_count = num_tokens_from_string(query) + sum(
  282. [num_tokens_from_string(t) for t in texts]
  283. )
  284. res = self.client.rerank(
  285. model=self.model_name,
  286. query=query,
  287. documents=texts,
  288. top_n=len(texts),
  289. return_documents=False,
  290. )
  291. rank = np.zeros(len(texts), dtype=float)
  292. for d in res.results:
  293. rank[d.index] = d.relevance_score
  294. return rank, token_count
  295. class TogetherAIRerank(Base):
  296. def __init__(self, key, model_name, base_url):
  297. pass
  298. def similarity(self, query: str, texts: list):
  299. raise NotImplementedError("The api has not been implement")
  300. class SILICONFLOWRerank(Base):
  301. def __init__(
  302. self, key, model_name, base_url="https://api.siliconflow.cn/v1/rerank"
  303. ):
  304. if not base_url:
  305. base_url = "https://api.siliconflow.cn/v1/rerank"
  306. self.model_name = model_name
  307. self.base_url = base_url
  308. self.headers = {
  309. "accept": "application/json",
  310. "content-type": "application/json",
  311. "authorization": f"Bearer {key}",
  312. }
  313. def similarity(self, query: str, texts: list):
  314. payload = {
  315. "model": self.model_name,
  316. "query": query,
  317. "documents": texts,
  318. "top_n": len(texts),
  319. "return_documents": False,
  320. "max_chunks_per_doc": 1024,
  321. "overlap_tokens": 80,
  322. }
  323. response = requests.post(
  324. self.base_url, json=payload, headers=self.headers
  325. ).json()
  326. rank = np.zeros(len(texts), dtype=float)
  327. if "results" not in response:
  328. return rank, 0
  329. for d in response["results"]:
  330. rank[d["index"]] = d["relevance_score"]
  331. return (
  332. rank,
  333. response["meta"]["tokens"]["input_tokens"] + response["meta"]["tokens"]["output_tokens"],
  334. )
  335. class BaiduYiyanRerank(Base):
  336. def __init__(self, key, model_name, base_url=None):
  337. from qianfan.resources import Reranker
  338. key = json.loads(key)
  339. ak = key.get("yiyan_ak", "")
  340. sk = key.get("yiyan_sk", "")
  341. self.client = Reranker(ak=ak, sk=sk)
  342. self.model_name = model_name
  343. def similarity(self, query: str, texts: list):
  344. res = self.client.do(
  345. model=self.model_name,
  346. query=query,
  347. documents=texts,
  348. top_n=len(texts),
  349. ).body
  350. rank = np.zeros(len(texts), dtype=float)
  351. for d in res["results"]:
  352. rank[d["index"]] = d["relevance_score"]
  353. return rank, res["usage"]["total_tokens"]
  354. class VoyageRerank(Base):
  355. def __init__(self, key, model_name, base_url=None):
  356. import voyageai
  357. self.client = voyageai.Client(api_key=key)
  358. self.model_name = model_name
  359. def similarity(self, query: str, texts: list):
  360. res = self.client.rerank(
  361. query=query, documents=texts, model=self.model_name, top_k=len(texts)
  362. )
  363. rank = np.zeros(len(texts), dtype=float)
  364. for r in res.results:
  365. rank[r.index] = r.relevance_score
  366. return rank, res.total_tokens
  367. class QWenRerank(Base):
  368. def __init__(self, key, model_name='gte-rerank', base_url=None, **kwargs):
  369. import dashscope
  370. self.api_key = key
  371. self.model_name = dashscope.TextReRank.Models.gte_rerank if model_name is None else model_name
  372. def similarity(self, query: str, texts: list):
  373. import dashscope
  374. from http import HTTPStatus
  375. resp = dashscope.TextReRank.call(
  376. api_key=self.api_key,
  377. model=self.model_name,
  378. query=query,
  379. documents=texts,
  380. top_n=len(texts),
  381. return_documents=False
  382. )
  383. rank = np.zeros(len(texts), dtype=float)
  384. if resp.status_code == HTTPStatus.OK:
  385. for r in resp.output.results:
  386. rank[r.index] = r.relevance_score
  387. return rank, resp.usage.total_tokens
  388. else:
  389. raise ValueError(f"Error calling QWenRerank model {self.model_name}: {resp.status_code} - {resp.text}")