Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.

rerank_model.py 20KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import re
  17. import threading
  18. from collections.abc import Iterable
  19. from urllib.parse import urljoin
  20. import requests
  21. import httpx
  22. from huggingface_hub import snapshot_download
  23. import os
  24. from abc import ABC
  25. import numpy as np
  26. from yarl import URL
  27. from api import settings
  28. from api.utils.file_utils import get_home_cache_dir
  29. from rag.utils import num_tokens_from_string, truncate
  30. import json
  31. def sigmoid(x):
  32. return 1 / (1 + np.exp(-x))
  33. class Base(ABC):
  34. def __init__(self, key, model_name):
  35. pass
  36. def similarity(self, query: str, texts: list):
  37. raise NotImplementedError("Please implement encode method!")
  38. def total_token_count(self, resp):
  39. try:
  40. return resp.usage.total_tokens
  41. except Exception:
  42. pass
  43. try:
  44. return resp["usage"]["total_tokens"]
  45. except Exception:
  46. pass
  47. return 0
  48. class DefaultRerank(Base):
  49. _model = None
  50. _model_lock = threading.Lock()
  51. def __init__(self, key, model_name, **kwargs):
  52. """
  53. If you have trouble downloading HuggingFace models, -_^ this might help!!
  54. For Linux:
  55. export HF_ENDPOINT=https://hf-mirror.com
  56. For Windows:
  57. Good luck
  58. ^_-
  59. """
  60. if not settings.LIGHTEN and not DefaultRerank._model:
  61. import torch
  62. from FlagEmbedding import FlagReranker
  63. with DefaultRerank._model_lock:
  64. if not DefaultRerank._model:
  65. try:
  66. DefaultRerank._model = FlagReranker(
  67. os.path.join(get_home_cache_dir(), re.sub(r"^[a-zA-Z0-9]+/", "", model_name)),
  68. use_fp16=torch.cuda.is_available())
  69. except Exception:
  70. model_dir = snapshot_download(repo_id=model_name,
  71. local_dir=os.path.join(get_home_cache_dir(),
  72. re.sub(r"^[a-zA-Z0-9]+/", "", model_name)),
  73. local_dir_use_symlinks=False)
  74. DefaultRerank._model = FlagReranker(model_dir, use_fp16=torch.cuda.is_available())
  75. self._model = DefaultRerank._model
  76. self._dynamic_batch_size = 8
  77. self._min_batch_size = 1
  78. def torch_empty_cache(self):
  79. try:
  80. import torch
  81. torch.cuda.empty_cache()
  82. except Exception as e:
  83. print(f"Error emptying cache: {e}")
  84. def _process_batch(self, pairs, max_batch_size=None):
  85. """template method for subclass call"""
  86. old_dynamic_batch_size = self._dynamic_batch_size
  87. if max_batch_size is not None:
  88. self._dynamic_batch_size = max_batch_size
  89. res = []
  90. i = 0
  91. while i < len(pairs):
  92. current_batch = self._dynamic_batch_size
  93. max_retries = 5
  94. retry_count = 0
  95. while retry_count < max_retries:
  96. try:
  97. # call subclass implemented batch processing calculation
  98. batch_scores = self._compute_batch_scores(pairs[i:i + current_batch])
  99. res.extend(batch_scores)
  100. i += current_batch
  101. self._dynamic_batch_size = min(self._dynamic_batch_size * 2, 8)
  102. break
  103. except RuntimeError as e:
  104. if "CUDA out of memory" in str(e) and current_batch > self._min_batch_size:
  105. current_batch = max(current_batch // 2, self._min_batch_size)
  106. self.torch_empty_cache()
  107. retry_count += 1
  108. else:
  109. raise
  110. if retry_count >= max_retries:
  111. raise RuntimeError("max retry times, still cannot process batch, please check your GPU memory")
  112. self.torch_empty_cache()
  113. self._dynamic_batch_size = old_dynamic_batch_size
  114. return np.array(res)
  115. def _compute_batch_scores(self, batch_pairs, max_length=None):
  116. if max_length is None:
  117. scores = self._model.compute_score(batch_pairs)
  118. else:
  119. scores = self._model.compute_score(batch_pairs, max_length=max_length)
  120. scores = sigmoid(np.array(scores)).tolist()
  121. if not isinstance(scores, Iterable):
  122. scores = [scores]
  123. return scores
  124. def similarity(self, query: str, texts: list):
  125. pairs = [(query, truncate(t, 2048)) for t in texts]
  126. token_count = 0
  127. for _, t in pairs:
  128. token_count += num_tokens_from_string(t)
  129. batch_size = 4096
  130. res = self._process_batch(pairs, max_batch_size=batch_size)
  131. return np.array(res), token_count
  132. class JinaRerank(Base):
  133. def __init__(self, key, model_name="jina-reranker-v2-base-multilingual",
  134. base_url="https://api.jina.ai/v1/rerank"):
  135. self.base_url = "https://api.jina.ai/v1/rerank"
  136. self.headers = {
  137. "Content-Type": "application/json",
  138. "Authorization": f"Bearer {key}"
  139. }
  140. self.model_name = model_name
  141. def similarity(self, query: str, texts: list):
  142. texts = [truncate(t, 8196) for t in texts]
  143. data = {
  144. "model": self.model_name,
  145. "query": query,
  146. "documents": texts,
  147. "top_n": len(texts)
  148. }
  149. res = requests.post(self.base_url, headers=self.headers, json=data).json()
  150. rank = np.zeros(len(texts), dtype=float)
  151. for d in res["results"]:
  152. rank[d["index"]] = d["relevance_score"]
  153. return rank, self.total_token_count(res)
  154. class YoudaoRerank(DefaultRerank):
  155. _model = None
  156. _model_lock = threading.Lock()
  157. def __init__(self, key=None, model_name="maidalun1020/bce-reranker-base_v1", **kwargs):
  158. if not settings.LIGHTEN and not YoudaoRerank._model:
  159. from BCEmbedding import RerankerModel
  160. with YoudaoRerank._model_lock:
  161. if not YoudaoRerank._model:
  162. try:
  163. YoudaoRerank._model = RerankerModel(model_name_or_path=os.path.join(
  164. get_home_cache_dir(),
  165. re.sub(r"^[a-zA-Z0-9]+/", "", model_name)))
  166. except Exception:
  167. YoudaoRerank._model = RerankerModel(
  168. model_name_or_path=model_name.replace(
  169. "maidalun1020", "InfiniFlow"))
  170. self._model = YoudaoRerank._model
  171. def similarity(self, query: str, texts: list):
  172. pairs = [(query, truncate(t, self._model.max_length)) for t in texts]
  173. token_count = 0
  174. for _, t in pairs:
  175. token_count += num_tokens_from_string(t)
  176. batch_size = 8
  177. res = self._process_batch(pairs, max_batch_size=batch_size)
  178. return np.array(res), token_count
  179. class XInferenceRerank(Base):
  180. def __init__(self, key="xxxxxxx", model_name="", base_url=""):
  181. if base_url.find("/v1") == -1:
  182. base_url = urljoin(base_url, "/v1/rerank")
  183. if base_url.find("/rerank") == -1:
  184. base_url = urljoin(base_url, "/v1/rerank")
  185. self.model_name = model_name
  186. self.base_url = base_url
  187. self.headers = {
  188. "Content-Type": "application/json",
  189. "accept": "application/json",
  190. "Authorization": f"Bearer {key}"
  191. }
  192. def similarity(self, query: str, texts: list):
  193. if len(texts) == 0:
  194. return np.array([]), 0
  195. pairs = [(query, truncate(t, 4096)) for t in texts]
  196. token_count = 0
  197. for _, t in pairs:
  198. token_count += num_tokens_from_string(t)
  199. data = {
  200. "model": self.model_name,
  201. "query": query,
  202. "return_documents": "true",
  203. "return_len": "true",
  204. "documents": texts
  205. }
  206. res = requests.post(self.base_url, headers=self.headers, json=data).json()
  207. rank = np.zeros(len(texts), dtype=float)
  208. for d in res["results"]:
  209. rank[d["index"]] = d["relevance_score"]
  210. return rank, token_count
  211. class LocalAIRerank(Base):
  212. def __init__(self, key, model_name, base_url):
  213. if base_url.find("/rerank") == -1:
  214. self.base_url = urljoin(base_url, "/rerank")
  215. else:
  216. self.base_url = base_url
  217. self.headers = {
  218. "Content-Type": "application/json",
  219. "Authorization": f"Bearer {key}"
  220. }
  221. self.model_name = model_name.split("___")[0]
  222. def similarity(self, query: str, texts: list):
  223. # noway to config Ragflow , use fix setting
  224. texts = [truncate(t, 500) for t in texts]
  225. data = {
  226. "model": self.model_name,
  227. "query": query,
  228. "documents": texts,
  229. "top_n": len(texts),
  230. }
  231. token_count = 0
  232. for t in texts:
  233. token_count += num_tokens_from_string(t)
  234. res = requests.post(self.base_url, headers=self.headers, json=data).json()
  235. rank = np.zeros(len(texts), dtype=float)
  236. if 'results' not in res:
  237. raise ValueError("response not contains results\n" + str(res))
  238. for d in res["results"]:
  239. rank[d["index"]] = d["relevance_score"]
  240. # Normalize the rank values to the range 0 to 1
  241. min_rank = np.min(rank)
  242. max_rank = np.max(rank)
  243. # Avoid division by zero if all ranks are identical
  244. if max_rank - min_rank != 0:
  245. rank = (rank - min_rank) / (max_rank - min_rank)
  246. else:
  247. rank = np.zeros_like(rank)
  248. return rank, token_count
  249. class NvidiaRerank(Base):
  250. def __init__(
  251. self, key, model_name, base_url="https://ai.api.nvidia.com/v1/retrieval/nvidia/"
  252. ):
  253. if not base_url:
  254. base_url = "https://ai.api.nvidia.com/v1/retrieval/nvidia/"
  255. self.model_name = model_name
  256. if self.model_name == "nvidia/nv-rerankqa-mistral-4b-v3":
  257. self.base_url = os.path.join(
  258. base_url, "nv-rerankqa-mistral-4b-v3", "reranking"
  259. )
  260. if self.model_name == "nvidia/rerank-qa-mistral-4b":
  261. self.base_url = os.path.join(base_url, "reranking")
  262. self.model_name = "nv-rerank-qa-mistral-4b:1"
  263. self.headers = {
  264. "accept": "application/json",
  265. "Content-Type": "application/json",
  266. "Authorization": f"Bearer {key}",
  267. }
  268. def similarity(self, query: str, texts: list):
  269. token_count = num_tokens_from_string(query) + sum(
  270. [num_tokens_from_string(t) for t in texts]
  271. )
  272. data = {
  273. "model": self.model_name,
  274. "query": {"text": query},
  275. "passages": [{"text": text} for text in texts],
  276. "truncate": "END",
  277. "top_n": len(texts),
  278. }
  279. res = requests.post(self.base_url, headers=self.headers, json=data).json()
  280. rank = np.zeros(len(texts), dtype=float)
  281. for d in res["rankings"]:
  282. rank[d["index"]] = d["logit"]
  283. return rank, token_count
  284. class LmStudioRerank(Base):
  285. def __init__(self, key, model_name, base_url):
  286. pass
  287. def similarity(self, query: str, texts: list):
  288. raise NotImplementedError("The LmStudioRerank has not been implement")
  289. class OpenAI_APIRerank(Base):
  290. def __init__(self, key, model_name, base_url):
  291. if base_url.find("/rerank") == -1:
  292. self.base_url = urljoin(base_url, "/rerank")
  293. else:
  294. self.base_url = base_url
  295. self.headers = {
  296. "Content-Type": "application/json",
  297. "Authorization": f"Bearer {key}"
  298. }
  299. self.model_name = model_name.split("___")[0]
  300. def similarity(self, query: str, texts: list):
  301. # noway to config Ragflow , use fix setting
  302. texts = [truncate(t, 500) for t in texts]
  303. data = {
  304. "model": self.model_name,
  305. "query": query,
  306. "documents": texts,
  307. "top_n": len(texts),
  308. }
  309. token_count = 0
  310. for t in texts:
  311. token_count += num_tokens_from_string(t)
  312. res = requests.post(self.base_url, headers=self.headers, json=data).json()
  313. rank = np.zeros(len(texts), dtype=float)
  314. if 'results' not in res:
  315. raise ValueError("response not contains results\n" + str(res))
  316. for d in res["results"]:
  317. rank[d["index"]] = d["relevance_score"]
  318. # Normalize the rank values to the range 0 to 1
  319. min_rank = np.min(rank)
  320. max_rank = np.max(rank)
  321. # Avoid division by zero if all ranks are identical
  322. if max_rank - min_rank != 0:
  323. rank = (rank - min_rank) / (max_rank - min_rank)
  324. else:
  325. rank = np.zeros_like(rank)
  326. return rank, token_count
  327. class CoHereRerank(Base):
  328. def __init__(self, key, model_name, base_url=None):
  329. from cohere import Client
  330. self.client = Client(api_key=key, base_url=base_url)
  331. self.model_name = model_name
  332. def similarity(self, query: str, texts: list):
  333. token_count = num_tokens_from_string(query) + sum(
  334. [num_tokens_from_string(t) for t in texts]
  335. )
  336. res = self.client.rerank(
  337. model=self.model_name,
  338. query=query,
  339. documents=texts,
  340. top_n=len(texts),
  341. return_documents=False,
  342. )
  343. rank = np.zeros(len(texts), dtype=float)
  344. for d in res.results:
  345. rank[d.index] = d.relevance_score
  346. return rank, token_count
  347. class TogetherAIRerank(Base):
  348. def __init__(self, key, model_name, base_url):
  349. pass
  350. def similarity(self, query: str, texts: list):
  351. raise NotImplementedError("The api has not been implement")
  352. class SILICONFLOWRerank(Base):
  353. def __init__(
  354. self, key, model_name, base_url="https://api.siliconflow.cn/v1/rerank"
  355. ):
  356. if not base_url:
  357. base_url = "https://api.siliconflow.cn/v1/rerank"
  358. self.model_name = model_name
  359. self.base_url = base_url
  360. self.headers = {
  361. "accept": "application/json",
  362. "content-type": "application/json",
  363. "authorization": f"Bearer {key}",
  364. }
  365. def similarity(self, query: str, texts: list):
  366. payload = {
  367. "model": self.model_name,
  368. "query": query,
  369. "documents": texts,
  370. "top_n": len(texts),
  371. "return_documents": False,
  372. "max_chunks_per_doc": 1024,
  373. "overlap_tokens": 80,
  374. }
  375. response = requests.post(
  376. self.base_url, json=payload, headers=self.headers
  377. ).json()
  378. rank = np.zeros(len(texts), dtype=float)
  379. if "results" not in response:
  380. return rank, 0
  381. for d in response["results"]:
  382. rank[d["index"]] = d["relevance_score"]
  383. return (
  384. rank,
  385. response["meta"]["tokens"]["input_tokens"] + response["meta"]["tokens"]["output_tokens"],
  386. )
  387. class BaiduYiyanRerank(Base):
  388. def __init__(self, key, model_name, base_url=None):
  389. from qianfan.resources import Reranker
  390. key = json.loads(key)
  391. ak = key.get("yiyan_ak", "")
  392. sk = key.get("yiyan_sk", "")
  393. self.client = Reranker(ak=ak, sk=sk)
  394. self.model_name = model_name
  395. def similarity(self, query: str, texts: list):
  396. res = self.client.do(
  397. model=self.model_name,
  398. query=query,
  399. documents=texts,
  400. top_n=len(texts),
  401. ).body
  402. rank = np.zeros(len(texts), dtype=float)
  403. for d in res["results"]:
  404. rank[d["index"]] = d["relevance_score"]
  405. return rank, self.total_token_count(res)
  406. class VoyageRerank(Base):
  407. def __init__(self, key, model_name, base_url=None):
  408. import voyageai
  409. self.client = voyageai.Client(api_key=key)
  410. self.model_name = model_name
  411. def similarity(self, query: str, texts: list):
  412. rank = np.zeros(len(texts), dtype=float)
  413. if not texts:
  414. return rank, 0
  415. res = self.client.rerank(
  416. query=query, documents=texts, model=self.model_name, top_k=len(texts)
  417. )
  418. for r in res.results:
  419. rank[r.index] = r.relevance_score
  420. return rank, res.total_tokens
  421. class QWenRerank(Base):
  422. def __init__(self, key, model_name='gte-rerank', base_url=None, **kwargs):
  423. import dashscope
  424. self.api_key = key
  425. self.model_name = dashscope.TextReRank.Models.gte_rerank if model_name is None else model_name
  426. def similarity(self, query: str, texts: list):
  427. import dashscope
  428. from http import HTTPStatus
  429. resp = dashscope.TextReRank.call(
  430. api_key=self.api_key,
  431. model=self.model_name,
  432. query=query,
  433. documents=texts,
  434. top_n=len(texts),
  435. return_documents=False
  436. )
  437. rank = np.zeros(len(texts), dtype=float)
  438. if resp.status_code == HTTPStatus.OK:
  439. for r in resp.output.results:
  440. rank[r.index] = r.relevance_score
  441. return rank, resp.usage.total_tokens
  442. else:
  443. raise ValueError(f"Error calling QWenRerank model {self.model_name}: {resp.status_code} - {resp.text}")
  444. class HuggingfaceRerank(DefaultRerank):
  445. @staticmethod
  446. def post(query: str, texts: list, url="127.0.0.1"):
  447. exc = None
  448. scores = [0 for _ in range(len(texts))]
  449. batch_size = 8
  450. for i in range(0, len(texts), batch_size):
  451. try:
  452. res = requests.post(f"http://{url}/rerank", headers={"Content-Type": "application/json"},
  453. json={"query": query, "texts": texts[i: i + batch_size],
  454. "raw_scores": False, "truncate": True})
  455. for o in res.json():
  456. scores[o["index"] + i] = o["score"]
  457. except Exception as e:
  458. exc = e
  459. if exc:
  460. raise exc
  461. return np.array(scores)
  462. def __init__(self, key, model_name="BAAI/bge-reranker-v2-m3", base_url="http://127.0.0.1"):
  463. self.model_name = model_name
  464. self.base_url = base_url
  465. def similarity(self, query: str, texts: list) -> tuple[np.ndarray, int]:
  466. if not texts:
  467. return np.array([]), 0
  468. token_count = 0
  469. for t in texts:
  470. token_count += num_tokens_from_string(t)
  471. return HuggingfaceRerank.post(query, texts, self.base_url), token_count
  472. class GPUStackRerank(Base):
  473. def __init__(
  474. self, key, model_name, base_url
  475. ):
  476. if not base_url:
  477. raise ValueError("url cannot be None")
  478. self.model_name = model_name
  479. self.base_url = str(URL(base_url) / "v1" / "rerank")
  480. self.headers = {
  481. "accept": "application/json",
  482. "content-type": "application/json",
  483. "authorization": f"Bearer {key}",
  484. }
  485. def similarity(self, query: str, texts: list):
  486. payload = {
  487. "model": self.model_name,
  488. "query": query,
  489. "documents": texts,
  490. "top_n": len(texts),
  491. }
  492. try:
  493. response = requests.post(
  494. self.base_url, json=payload, headers=self.headers
  495. )
  496. response.raise_for_status()
  497. response_json = response.json()
  498. rank = np.zeros(len(texts), dtype=float)
  499. if "results" not in response_json:
  500. return rank, 0
  501. token_count = 0
  502. for t in texts:
  503. token_count += num_tokens_from_string(t)
  504. for result in response_json["results"]:
  505. rank[result["index"]] = result["relevance_score"]
  506. return (
  507. rank,
  508. token_count,
  509. )
  510. except httpx.HTTPStatusError as e:
  511. raise ValueError(
  512. f"Error calling GPUStackRerank model {self.model_name}: {e.response.status_code} - {e.response.text}")