You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import logging
  17. import re
  18. from dataclasses import dataclass
  19. from rag.settings import TAG_FLD, PAGERANK_FLD
  20. from rag.utils import rmSpace
  21. from rag.nlp import rag_tokenizer, query
  22. import numpy as np
  23. from rag.utils.doc_store_conn import DocStoreConnection, MatchDenseExpr, FusionExpr, OrderByExpr
  24. def index_name(uid): return f"ragflow_{uid}"
  25. class Dealer:
  26. def __init__(self, dataStore: DocStoreConnection):
  27. self.qryr = query.FulltextQueryer()
  28. self.dataStore = dataStore
  29. @dataclass
  30. class SearchResult:
  31. total: int
  32. ids: list[str]
  33. query_vector: list[float] | None = None
  34. field: dict | None = None
  35. highlight: dict | None = None
  36. aggregation: list | dict | None = None
  37. keywords: list[str] | None = None
  38. group_docs: list[list] | None = None
  39. def get_vector(self, txt, emb_mdl, topk=10, similarity=0.1):
  40. qv, _ = emb_mdl.encode_queries(txt)
  41. shape = np.array(qv).shape
  42. if len(shape) > 1:
  43. raise Exception(
  44. f"Dealer.get_vector returned array's shape {shape} doesn't match expectation(exact one dimension).")
  45. embedding_data = [float(v) for v in qv]
  46. vector_column_name = f"q_{len(embedding_data)}_vec"
  47. return MatchDenseExpr(vector_column_name, embedding_data, 'float', 'cosine', topk, {"similarity": similarity})
  48. def get_filters(self, req):
  49. condition = dict()
  50. for key, field in {"kb_ids": "kb_id", "doc_ids": "doc_id"}.items():
  51. if key in req and req[key] is not None:
  52. condition[field] = req[key]
  53. # TODO(yzc): `available_int` is nullable however infinity doesn't support nullable columns.
  54. for key in ["knowledge_graph_kwd", "available_int"]:
  55. if key in req and req[key] is not None:
  56. condition[key] = req[key]
  57. return condition
  58. def search(self, req, idx_names: str | list[str],
  59. kb_ids: list[str],
  60. emb_mdl=None,
  61. highlight=False,
  62. rank_feature: dict | None = None
  63. ):
  64. filters = self.get_filters(req)
  65. orderBy = OrderByExpr()
  66. pg = int(req.get("page", 1)) - 1
  67. topk = int(req.get("topk", 1024))
  68. ps = int(req.get("size", topk))
  69. offset, limit = pg * ps, ps
  70. src = req.get("fields",
  71. ["docnm_kwd", "content_ltks", "kb_id", "img_id", "title_tks", "important_kwd", "position_int",
  72. "doc_id", "page_num_int", "top_int", "create_timestamp_flt", "knowledge_graph_kwd",
  73. "question_kwd", "question_tks",
  74. "available_int", "content_with_weight", PAGERANK_FLD, TAG_FLD])
  75. kwds = set([])
  76. qst = req.get("question", "")
  77. q_vec = []
  78. if not qst:
  79. if req.get("sort"):
  80. orderBy.asc("page_num_int")
  81. orderBy.asc("top_int")
  82. orderBy.desc("create_timestamp_flt")
  83. res = self.dataStore.search(src, [], filters, [], orderBy, offset, limit, idx_names, kb_ids)
  84. total = self.dataStore.getTotal(res)
  85. logging.debug("Dealer.search TOTAL: {}".format(total))
  86. else:
  87. highlightFields = ["content_ltks", "title_tks"] if highlight else []
  88. matchText, keywords = self.qryr.question(qst, min_match=0.3)
  89. if emb_mdl is None:
  90. matchExprs = [matchText]
  91. res = self.dataStore.search(src, highlightFields, filters, matchExprs, orderBy, offset, limit,
  92. idx_names, kb_ids, rank_feature=rank_feature)
  93. total = self.dataStore.getTotal(res)
  94. logging.debug("Dealer.search TOTAL: {}".format(total))
  95. else:
  96. matchDense = self.get_vector(qst, emb_mdl, topk, req.get("similarity", 0.1))
  97. q_vec = matchDense.embedding_data
  98. src.append(f"q_{len(q_vec)}_vec")
  99. fusionExpr = FusionExpr("weighted_sum", topk, {"weights": "0.05, 0.95"})
  100. matchExprs = [matchText, matchDense, fusionExpr]
  101. res = self.dataStore.search(src, highlightFields, filters, matchExprs, orderBy, offset, limit,
  102. idx_names, kb_ids, rank_feature=rank_feature)
  103. total = self.dataStore.getTotal(res)
  104. logging.debug("Dealer.search TOTAL: {}".format(total))
  105. # If result is empty, try again with lower min_match
  106. if total == 0:
  107. matchText, _ = self.qryr.question(qst, min_match=0.1)
  108. filters.pop("doc_ids", None)
  109. matchDense.extra_options["similarity"] = 0.17
  110. res = self.dataStore.search(src, highlightFields, filters, [matchText, matchDense, fusionExpr],
  111. orderBy, offset, limit, idx_names, kb_ids, rank_feature=rank_feature)
  112. total = self.dataStore.getTotal(res)
  113. logging.debug("Dealer.search 2 TOTAL: {}".format(total))
  114. for k in keywords:
  115. kwds.add(k)
  116. for kk in rag_tokenizer.fine_grained_tokenize(k).split():
  117. if len(kk) < 2:
  118. continue
  119. if kk in kwds:
  120. continue
  121. kwds.add(kk)
  122. logging.debug(f"TOTAL: {total}")
  123. ids = self.dataStore.getChunkIds(res)
  124. keywords = list(kwds)
  125. highlight = self.dataStore.getHighlight(res, keywords, "content_with_weight")
  126. aggs = self.dataStore.getAggregation(res, "docnm_kwd")
  127. return self.SearchResult(
  128. total=total,
  129. ids=ids,
  130. query_vector=q_vec,
  131. aggregation=aggs,
  132. highlight=highlight,
  133. field=self.dataStore.getFields(res, src),
  134. keywords=keywords
  135. )
  136. @staticmethod
  137. def trans2floats(txt):
  138. return [float(t) for t in txt.split("\t")]
  139. def insert_citations(self, answer, chunks, chunk_v,
  140. embd_mdl, tkweight=0.1, vtweight=0.9):
  141. assert len(chunks) == len(chunk_v)
  142. if not chunks:
  143. return answer, set([])
  144. pieces = re.split(r"(```)", answer)
  145. if len(pieces) >= 3:
  146. i = 0
  147. pieces_ = []
  148. while i < len(pieces):
  149. if pieces[i] == "```":
  150. st = i
  151. i += 1
  152. while i < len(pieces) and pieces[i] != "```":
  153. i += 1
  154. if i < len(pieces):
  155. i += 1
  156. pieces_.append("".join(pieces[st: i]) + "\n")
  157. else:
  158. pieces_.extend(
  159. re.split(
  160. r"([^\|][;。?!!\n]|[a-z][.?;!][ \n])",
  161. pieces[i]))
  162. i += 1
  163. pieces = pieces_
  164. else:
  165. pieces = re.split(r"([^\|][;。?!!\n]|[a-z][.?;!][ \n])", answer)
  166. for i in range(1, len(pieces)):
  167. if re.match(r"([^\|][;。?!!\n]|[a-z][.?;!][ \n])", pieces[i]):
  168. pieces[i - 1] += pieces[i][0]
  169. pieces[i] = pieces[i][1:]
  170. idx = []
  171. pieces_ = []
  172. for i, t in enumerate(pieces):
  173. if len(t) < 5:
  174. continue
  175. idx.append(i)
  176. pieces_.append(t)
  177. logging.debug("{} => {}".format(answer, pieces_))
  178. if not pieces_:
  179. return answer, set([])
  180. ans_v, _ = embd_mdl.encode(pieces_)
  181. assert len(ans_v[0]) == len(chunk_v[0]), "The dimension of query and chunk do not match: {} vs. {}".format(
  182. len(ans_v[0]), len(chunk_v[0]))
  183. chunks_tks = [rag_tokenizer.tokenize(self.qryr.rmWWW(ck)).split()
  184. for ck in chunks]
  185. cites = {}
  186. thr = 0.63
  187. while thr > 0.3 and len(cites.keys()) == 0 and pieces_ and chunks_tks:
  188. for i, a in enumerate(pieces_):
  189. sim, tksim, vtsim = self.qryr.hybrid_similarity(ans_v[i],
  190. chunk_v,
  191. rag_tokenizer.tokenize(
  192. self.qryr.rmWWW(pieces_[i])).split(),
  193. chunks_tks,
  194. tkweight, vtweight)
  195. mx = np.max(sim) * 0.99
  196. logging.debug("{} SIM: {}".format(pieces_[i], mx))
  197. if mx < thr:
  198. continue
  199. cites[idx[i]] = list(
  200. set([str(ii) for ii in range(len(chunk_v)) if sim[ii] > mx]))[:4]
  201. thr *= 0.8
  202. res = ""
  203. seted = set([])
  204. for i, p in enumerate(pieces):
  205. res += p
  206. if i not in idx:
  207. continue
  208. if i not in cites:
  209. continue
  210. for c in cites[i]:
  211. assert int(c) < len(chunk_v)
  212. for c in cites[i]:
  213. if c in seted:
  214. continue
  215. res += f" ##{c}$$"
  216. seted.add(c)
  217. return res, seted
  218. def _rank_feature_scores(self, query_rfea, search_res):
  219. ## For rank feature(tag_fea) scores.
  220. rank_fea = []
  221. pageranks = []
  222. for chunk_id in search_res.ids:
  223. pageranks.append(search_res.field[chunk_id].get(PAGERANK_FLD, 0))
  224. pageranks = np.array(pageranks, dtype=float)
  225. if not query_rfea:
  226. return np.array([0 for _ in range(len(search_res.ids))]) + pageranks
  227. q_denor = np.sqrt(np.sum([s*s for t,s in query_rfea.items() if t != PAGERANK_FLD]))
  228. for i in search_res.ids:
  229. nor, denor = 0, 0
  230. for t, sc in eval(search_res.field[i].get(TAG_FLD, "{}")).items():
  231. if t in query_rfea:
  232. nor += query_rfea[t] * sc
  233. denor += sc * sc
  234. if denor == 0:
  235. rank_fea.append(0)
  236. else:
  237. rank_fea.append(nor/np.sqrt(denor)/q_denor)
  238. return np.array(rank_fea)*10. + pageranks
  239. def rerank(self, sres, query, tkweight=0.3,
  240. vtweight=0.7, cfield="content_ltks",
  241. rank_feature: dict | None = None
  242. ):
  243. _, keywords = self.qryr.question(query)
  244. vector_size = len(sres.query_vector)
  245. vector_column = f"q_{vector_size}_vec"
  246. zero_vector = [0.0] * vector_size
  247. ins_embd = []
  248. for chunk_id in sres.ids:
  249. vector = sres.field[chunk_id].get(vector_column, zero_vector)
  250. if isinstance(vector, str):
  251. vector = [float(v) for v in vector.split("\t")]
  252. ins_embd.append(vector)
  253. if not ins_embd:
  254. return [], [], []
  255. for i in sres.ids:
  256. if isinstance(sres.field[i].get("important_kwd", []), str):
  257. sres.field[i]["important_kwd"] = [sres.field[i]["important_kwd"]]
  258. ins_tw = []
  259. for i in sres.ids:
  260. content_ltks = sres.field[i][cfield].split()
  261. title_tks = [t for t in sres.field[i].get("title_tks", "").split() if t]
  262. question_tks = [t for t in sres.field[i].get("question_tks", "").split() if t]
  263. important_kwd = sres.field[i].get("important_kwd", [])
  264. tks = content_ltks + title_tks * 2 + important_kwd * 5 + question_tks * 6
  265. ins_tw.append(tks)
  266. ## For rank feature(tag_fea) scores.
  267. rank_fea = self._rank_feature_scores(rank_feature, sres)
  268. sim, tksim, vtsim = self.qryr.hybrid_similarity(sres.query_vector,
  269. ins_embd,
  270. keywords,
  271. ins_tw, tkweight, vtweight)
  272. return sim + rank_fea, tksim, vtsim
  273. def rerank_by_model(self, rerank_mdl, sres, query, tkweight=0.3,
  274. vtweight=0.7, cfield="content_ltks",
  275. rank_feature: dict | None = None):
  276. _, keywords = self.qryr.question(query)
  277. for i in sres.ids:
  278. if isinstance(sres.field[i].get("important_kwd", []), str):
  279. sres.field[i]["important_kwd"] = [sres.field[i]["important_kwd"]]
  280. ins_tw = []
  281. for i in sres.ids:
  282. content_ltks = sres.field[i][cfield].split()
  283. title_tks = [t for t in sres.field[i].get("title_tks", "").split() if t]
  284. important_kwd = sres.field[i].get("important_kwd", [])
  285. tks = content_ltks + title_tks + important_kwd
  286. ins_tw.append(tks)
  287. tksim = self.qryr.token_similarity(keywords, ins_tw)
  288. vtsim, _ = rerank_mdl.similarity(query, [rmSpace(" ".join(tks)) for tks in ins_tw])
  289. ## For rank feature(tag_fea) scores.
  290. rank_fea = self._rank_feature_scores(rank_feature, sres)
  291. return tkweight * (np.array(tksim)+rank_fea) + vtweight * vtsim, tksim, vtsim
  292. def hybrid_similarity(self, ans_embd, ins_embd, ans, inst):
  293. return self.qryr.hybrid_similarity(ans_embd,
  294. ins_embd,
  295. rag_tokenizer.tokenize(ans).split(),
  296. rag_tokenizer.tokenize(inst).split())
  297. def retrieval(self, question, embd_mdl, tenant_ids, kb_ids, page, page_size, similarity_threshold=0.2,
  298. vector_similarity_weight=0.3, top=1024, doc_ids=None, aggs=True,
  299. rerank_mdl=None, highlight=False,
  300. rank_feature: dict | None = {PAGERANK_FLD: 10}):
  301. ranks = {"total": 0, "chunks": [], "doc_aggs": {}}
  302. if not question:
  303. return ranks
  304. RERANK_PAGE_LIMIT = 3
  305. req = {"kb_ids": kb_ids, "doc_ids": doc_ids, "size": max(page_size * RERANK_PAGE_LIMIT, 128),
  306. "question": question, "vector": True, "topk": top,
  307. "similarity": similarity_threshold,
  308. "available_int": 1}
  309. if page > RERANK_PAGE_LIMIT:
  310. req["page"] = page
  311. req["size"] = page_size
  312. if isinstance(tenant_ids, str):
  313. tenant_ids = tenant_ids.split(",")
  314. sres = self.search(req, [index_name(tid) for tid in tenant_ids],
  315. kb_ids, embd_mdl, highlight, rank_feature=rank_feature)
  316. ranks["total"] = sres.total
  317. if page <= RERANK_PAGE_LIMIT:
  318. if rerank_mdl and sres.total > 0:
  319. sim, tsim, vsim = self.rerank_by_model(rerank_mdl,
  320. sres, question, 1 - vector_similarity_weight,
  321. vector_similarity_weight,
  322. rank_feature=rank_feature)
  323. else:
  324. sim, tsim, vsim = self.rerank(
  325. sres, question, 1 - vector_similarity_weight, vector_similarity_weight,
  326. rank_feature=rank_feature)
  327. idx = np.argsort(sim * -1)[(page - 1) * page_size:page * page_size]
  328. else:
  329. sim = tsim = vsim = [1] * len(sres.ids)
  330. idx = list(range(len(sres.ids)))
  331. dim = len(sres.query_vector)
  332. vector_column = f"q_{dim}_vec"
  333. zero_vector = [0.0] * dim
  334. for i in idx:
  335. if sim[i] < similarity_threshold:
  336. break
  337. if len(ranks["chunks"]) >= page_size:
  338. if aggs:
  339. continue
  340. break
  341. id = sres.ids[i]
  342. chunk = sres.field[id]
  343. dnm = chunk["docnm_kwd"]
  344. did = chunk["doc_id"]
  345. position_int = chunk.get("position_int", [])
  346. d = {
  347. "chunk_id": id,
  348. "content_ltks": chunk["content_ltks"],
  349. "content_with_weight": chunk["content_with_weight"],
  350. "doc_id": chunk["doc_id"],
  351. "docnm_kwd": dnm,
  352. "kb_id": chunk["kb_id"],
  353. "important_kwd": chunk.get("important_kwd", []),
  354. "image_id": chunk.get("img_id", ""),
  355. "similarity": sim[i],
  356. "vector_similarity": vsim[i],
  357. "term_similarity": tsim[i],
  358. "vector": chunk.get(vector_column, zero_vector),
  359. "positions": position_int,
  360. }
  361. if highlight and sres.highlight:
  362. if id in sres.highlight:
  363. d["highlight"] = rmSpace(sres.highlight[id])
  364. else:
  365. d["highlight"] = d["content_with_weight"]
  366. ranks["chunks"].append(d)
  367. if dnm not in ranks["doc_aggs"]:
  368. ranks["doc_aggs"][dnm] = {"doc_id": did, "count": 0}
  369. ranks["doc_aggs"][dnm]["count"] += 1
  370. ranks["doc_aggs"] = [{"doc_name": k,
  371. "doc_id": v["doc_id"],
  372. "count": v["count"]} for k,
  373. v in sorted(ranks["doc_aggs"].items(),
  374. key=lambda x: x[1]["count"] * -1)]
  375. ranks["chunks"] = ranks["chunks"][:page_size]
  376. return ranks
  377. def sql_retrieval(self, sql, fetch_size=128, format="json"):
  378. tbl = self.dataStore.sql(sql, fetch_size, format)
  379. return tbl
  380. def chunk_list(self, doc_id: str, tenant_id: str,
  381. kb_ids: list[str], max_count=1024,
  382. offset=0,
  383. fields=["docnm_kwd", "content_with_weight", "img_id"]):
  384. condition = {"doc_id": doc_id}
  385. res = []
  386. bs = 128
  387. for p in range(offset, max_count, bs):
  388. es_res = self.dataStore.search(fields, [], condition, [], OrderByExpr(), p, bs, index_name(tenant_id),
  389. kb_ids)
  390. dict_chunks = self.dataStore.getFields(es_res, fields)
  391. if dict_chunks:
  392. res.extend(dict_chunks.values())
  393. if len(dict_chunks.values()) < bs:
  394. break
  395. return res
  396. def all_tags(self, tenant_id: str, kb_ids: list[str], S=1000):
  397. res = self.dataStore.search([], [], {}, [], OrderByExpr(), 0, 0, index_name(tenant_id), kb_ids, ["tag_kwd"])
  398. return self.dataStore.getAggregation(res, "tag_kwd")
  399. def all_tags_in_portion(self, tenant_id: str, kb_ids: list[str], S=1000):
  400. res = self.dataStore.search([], [], {}, [], OrderByExpr(), 0, 0, index_name(tenant_id), kb_ids, ["tag_kwd"])
  401. res = self.dataStore.getAggregation(res, "tag_kwd")
  402. total = np.sum([c for _, c in res])
  403. return {t: (c + 1) / (total + S) for t, c in res}
  404. def tag_content(self, tenant_id: str, kb_ids: list[str], doc, all_tags, topn_tags=3, keywords_topn=30, S=1000):
  405. idx_nm = index_name(tenant_id)
  406. match_txt = self.qryr.paragraph(doc["title_tks"] + " " + doc["content_ltks"], doc.get("important_kwd", []), keywords_topn)
  407. res = self.dataStore.search([], [], {}, [match_txt], OrderByExpr(), 0, 0, idx_nm, kb_ids, ["tag_kwd"])
  408. aggs = self.dataStore.getAggregation(res, "tag_kwd")
  409. if not aggs:
  410. return False
  411. cnt = np.sum([c for _, c in aggs])
  412. tag_fea = sorted([(a, round(0.1*(c + 1) / (cnt + S) / (all_tags.get(a, 0.0001)))) for a, c in aggs],
  413. key=lambda x: x[1] * -1)[:topn_tags]
  414. doc[TAG_FLD] = {a: c for a, c in tag_fea if c > 0}
  415. return True
  416. def tag_query(self, question: str, tenant_ids: str | list[str], kb_ids: list[str], all_tags, topn_tags=3, S=1000):
  417. if isinstance(tenant_ids, str):
  418. idx_nms = index_name(tenant_ids)
  419. else:
  420. idx_nms = [index_name(tid) for tid in tenant_ids]
  421. match_txt, _ = self.qryr.question(question, min_match=0.0)
  422. res = self.dataStore.search([], [], {}, [match_txt], OrderByExpr(), 0, 0, idx_nms, kb_ids, ["tag_kwd"])
  423. aggs = self.dataStore.getAggregation(res, "tag_kwd")
  424. if not aggs:
  425. return {}
  426. cnt = np.sum([c for _, c in aggs])
  427. tag_fea = sorted([(a, round(0.1*(c + 1) / (cnt + S) / (all_tags.get(a, 0.0001)))) for a, c in aggs],
  428. key=lambda x: x[1] * -1)[:topn_tags]
  429. return {a: c for a, c in tag_fea if c > 0}