您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369
  1. # -*- coding: utf-8 -*-
  2. import json
  3. import re
  4. from copy import deepcopy
  5. from elasticsearch_dsl import Q, Search
  6. from typing import List, Optional, Dict, Union
  7. from dataclasses import dataclass
  8. from api.settings import chat_logger
  9. from rag.settings import es_logger
  10. from rag.utils import rmSpace
  11. from rag.nlp import huqie, query
  12. import numpy as np
  13. def index_name(uid): return f"ragflow_{uid}"
  14. class Dealer:
  15. def __init__(self, es):
  16. self.qryr = query.EsQueryer(es)
  17. self.qryr.flds = [
  18. "title_tks^10",
  19. "title_sm_tks^5",
  20. "important_kwd^30",
  21. "important_tks^20",
  22. "content_ltks^2",
  23. "content_sm_ltks"]
  24. self.es = es
  25. @dataclass
  26. class SearchResult:
  27. total: int
  28. ids: List[str]
  29. query_vector: List[float] = None
  30. field: Optional[Dict] = None
  31. highlight: Optional[Dict] = None
  32. aggregation: Union[List, Dict, None] = None
  33. keywords: Optional[List[str]] = None
  34. group_docs: List[List] = None
  35. def _vector(self, txt, emb_mdl, sim=0.8, topk=10):
  36. qv, c = emb_mdl.encode_queries(txt)
  37. return {
  38. "field": "q_%d_vec" % len(qv),
  39. "k": topk,
  40. "similarity": sim,
  41. "num_candidates": topk * 2,
  42. "query_vector": qv
  43. }
  44. def search(self, req, idxnm, emb_mdl=None):
  45. qst = req.get("question", "")
  46. bqry, keywords = self.qryr.question(qst)
  47. if req.get("kb_ids"):
  48. bqry.filter.append(Q("terms", kb_id=req["kb_ids"]))
  49. if req.get("doc_ids"):
  50. bqry.filter.append(Q("terms", doc_id=req["doc_ids"]))
  51. if "available_int" in req:
  52. if req["available_int"] == 0:
  53. bqry.filter.append(Q("range", available_int={"lt": 1}))
  54. else:
  55. bqry.filter.append(
  56. Q("bool", must_not=Q("range", available_int={"lt": 1})))
  57. bqry.boost = 0.05
  58. s = Search()
  59. pg = int(req.get("page", 1)) - 1
  60. ps = int(req.get("size", 1000))
  61. topk = int(req.get("topk", 1024))
  62. src = req.get("fields", ["docnm_kwd", "content_ltks", "kb_id", "img_id",
  63. "image_id", "doc_id", "q_512_vec", "q_768_vec", "position_int",
  64. "q_1024_vec", "q_1536_vec", "available_int", "content_with_weight"])
  65. s = s.query(bqry)[pg * ps:(pg + 1) * ps]
  66. s = s.highlight("content_ltks")
  67. s = s.highlight("title_ltks")
  68. if not qst:
  69. if not req.get("sort"):
  70. s = s.sort(
  71. {"create_time": {"order": "desc", "unmapped_type": "date"}},
  72. {"create_timestamp_flt": {"order": "desc", "unmapped_type": "float"}}
  73. )
  74. else:
  75. s = s.sort(
  76. {"page_num_int": {"order": "asc", "unmapped_type": "float", "mode": "avg", "numeric_type": "double"}},
  77. {"top_int": {"order": "asc", "unmapped_type": "float", "mode": "avg", "numeric_type": "double"}},
  78. {"create_time": {"order": "desc", "unmapped_type": "date"}},
  79. {"create_timestamp_flt": {"order": "desc", "unmapped_type": "float"}}
  80. )
  81. if qst:
  82. s = s.highlight_options(
  83. fragment_size=120,
  84. number_of_fragments=5,
  85. boundary_scanner_locale="zh-CN",
  86. boundary_scanner="SENTENCE",
  87. boundary_chars=",./;:\\!(),。?:!……()——、"
  88. )
  89. s = s.to_dict()
  90. q_vec = []
  91. if req.get("vector"):
  92. assert emb_mdl, "No embedding model selected"
  93. s["knn"] = self._vector(
  94. qst, emb_mdl, req.get(
  95. "similarity", 0.1), topk)
  96. s["knn"]["filter"] = bqry.to_dict()
  97. if "highlight" in s:
  98. del s["highlight"]
  99. q_vec = s["knn"]["query_vector"]
  100. es_logger.info("【Q】: {}".format(json.dumps(s)))
  101. res = self.es.search(deepcopy(s), idxnm=idxnm, timeout="600s", src=src)
  102. es_logger.info("TOTAL: {}".format(self.es.getTotal(res)))
  103. if self.es.getTotal(res) == 0 and "knn" in s:
  104. bqry, _ = self.qryr.question(qst, min_match="10%")
  105. if req.get("kb_ids"):
  106. bqry.filter.append(Q("terms", kb_id=req["kb_ids"]))
  107. s["query"] = bqry.to_dict()
  108. s["knn"]["filter"] = bqry.to_dict()
  109. s["knn"]["similarity"] = 0.17
  110. res = self.es.search(s, idxnm=idxnm, timeout="600s", src=src)
  111. es_logger.info("【Q】: {}".format(json.dumps(s)))
  112. kwds = set([])
  113. for k in keywords:
  114. kwds.add(k)
  115. for kk in huqie.qieqie(k).split(" "):
  116. if len(kk) < 2:
  117. continue
  118. if kk in kwds:
  119. continue
  120. kwds.add(kk)
  121. aggs = self.getAggregation(res, "docnm_kwd")
  122. return self.SearchResult(
  123. total=self.es.getTotal(res),
  124. ids=self.es.getDocIds(res),
  125. query_vector=q_vec,
  126. aggregation=aggs,
  127. highlight=self.getHighlight(res),
  128. field=self.getFields(res, src),
  129. keywords=list(kwds)
  130. )
  131. def getAggregation(self, res, g):
  132. if not "aggregations" in res or "aggs_" + g not in res["aggregations"]:
  133. return
  134. bkts = res["aggregations"]["aggs_" + g]["buckets"]
  135. return [(b["key"], b["doc_count"]) for b in bkts]
  136. def getHighlight(self, res):
  137. def rmspace(line):
  138. eng = set(list("qwertyuioplkjhgfdsazxcvbnm"))
  139. r = []
  140. for t in line.split(" "):
  141. if not t:
  142. continue
  143. if len(r) > 0 and len(
  144. t) > 0 and r[-1][-1] in eng and t[0] in eng:
  145. r.append(" ")
  146. r.append(t)
  147. r = "".join(r)
  148. return r
  149. ans = {}
  150. for d in res["hits"]["hits"]:
  151. hlts = d.get("highlight")
  152. if not hlts:
  153. continue
  154. ans[d["_id"]] = "".join([a for a in list(hlts.items())[0][1]])
  155. return ans
  156. def getFields(self, sres, flds):
  157. res = {}
  158. if not flds:
  159. return {}
  160. for d in self.es.getSource(sres):
  161. m = {n: d.get(n) for n in flds if d.get(n) is not None}
  162. for n, v in m.items():
  163. if isinstance(v, type([])):
  164. m[n] = "\t".join([str(vv) if not isinstance(vv, list) else "\t".join([str(vvv) for vvv in vv]) for vv in v])
  165. continue
  166. if not isinstance(v, type("")):
  167. m[n] = str(m[n])
  168. if n.find("tks")>0: m[n] = rmSpace(m[n])
  169. if m:
  170. res[d["id"]] = m
  171. return res
  172. @staticmethod
  173. def trans2floats(txt):
  174. return [float(t) for t in txt.split("\t")]
  175. def insert_citations(self, answer, chunks, chunk_v,
  176. embd_mdl, tkweight=0.1, vtweight=0.9):
  177. assert len(chunks) == len(chunk_v)
  178. pieces = re.split(r"(```)", answer)
  179. if len(pieces) >= 3:
  180. i = 0
  181. pieces_ = []
  182. while i < len(pieces):
  183. if pieces[i] == "```":
  184. st = i
  185. i += 1
  186. while i<len(pieces) and pieces[i] != "```":
  187. i += 1
  188. if i < len(pieces): i += 1
  189. pieces_.append("".join(pieces[st: i])+"\n")
  190. else:
  191. pieces_.extend(re.split(r"([^\|][;。?!!\n]|[a-z][.?;!][ \n])", pieces[i]))
  192. i += 1
  193. pieces = pieces_
  194. else:
  195. pieces = re.split(r"([^\|][;。?!!\n]|[a-z][.?;!][ \n])", answer)
  196. for i in range(1, len(pieces)):
  197. if re.match(r"([^\|][;。?!!\n]|[a-z][.?;!][ \n])", pieces[i]):
  198. pieces[i - 1] += pieces[i][0]
  199. pieces[i] = pieces[i][1:]
  200. idx = []
  201. pieces_ = []
  202. for i, t in enumerate(pieces):
  203. if len(t) < 5:
  204. continue
  205. idx.append(i)
  206. pieces_.append(t)
  207. es_logger.info("{} => {}".format(answer, pieces_))
  208. if not pieces_:
  209. return answer
  210. ans_v, _ = embd_mdl.encode(pieces_)
  211. assert len(ans_v[0]) == len(chunk_v[0]), "The dimension of query and chunk do not match: {} vs. {}".format(
  212. len(ans_v[0]), len(chunk_v[0]))
  213. chunks_tks = [huqie.qie(ck).split(" ") for ck in chunks]
  214. cites = {}
  215. for i, a in enumerate(pieces_):
  216. sim, tksim, vtsim = self.qryr.hybrid_similarity(ans_v[i],
  217. chunk_v,
  218. huqie.qie(
  219. pieces_[i]).split(" "),
  220. chunks_tks,
  221. tkweight, vtweight)
  222. mx = np.max(sim) * 0.99
  223. es_logger.info("{} SIM: {}".format(pieces_[i], mx))
  224. if mx < 0.63:
  225. continue
  226. cites[idx[i]] = list(
  227. set([str(ii) for ii in range(len(chunk_v)) if sim[ii] > mx]))[:4]
  228. res = ""
  229. seted = set([])
  230. for i, p in enumerate(pieces):
  231. res += p
  232. if i not in idx:
  233. continue
  234. if i not in cites:
  235. continue
  236. for c in cites[i]: assert int(c) < len(chunk_v)
  237. for c in cites[i]:
  238. if c in seted:continue
  239. res += f" ##{c}$$"
  240. seted.add(c)
  241. return res, seted
  242. def rerank(self, sres, query, tkweight=0.3,
  243. vtweight=0.7, cfield="content_ltks"):
  244. _, keywords = self.qryr.question(query)
  245. ins_embd = [
  246. Dealer.trans2floats(
  247. sres.field[i].get("q_%d_vec" % len(sres.query_vector), "\t".join(["0"] * len(sres.query_vector)))) for i in sres.ids]
  248. if not ins_embd:
  249. return [], [], []
  250. ins_tw = [sres.field[i][cfield].split(" ")
  251. for i in sres.ids]
  252. sim, tksim, vtsim = self.qryr.hybrid_similarity(sres.query_vector,
  253. ins_embd,
  254. keywords,
  255. ins_tw, tkweight, vtweight)
  256. return sim, tksim, vtsim
  257. def hybrid_similarity(self, ans_embd, ins_embd, ans, inst):
  258. return self.qryr.hybrid_similarity(ans_embd,
  259. ins_embd,
  260. huqie.qie(ans).split(" "),
  261. huqie.qie(inst).split(" "))
  262. def retrieval(self, question, embd_mdl, tenant_id, kb_ids, page, page_size, similarity_threshold=0.2,
  263. vector_similarity_weight=0.3, top=1024, doc_ids=None, aggs=True):
  264. ranks = {"total": 0, "chunks": [], "doc_aggs": {}}
  265. if not question:
  266. return ranks
  267. req = {"kb_ids": kb_ids, "doc_ids": doc_ids, "size": page_size,
  268. "question": question, "vector": True, "topk": top,
  269. "similarity": similarity_threshold}
  270. sres = self.search(req, index_name(tenant_id), embd_mdl)
  271. sim, tsim, vsim = self.rerank(
  272. sres, question, 1 - vector_similarity_weight, vector_similarity_weight)
  273. idx = np.argsort(sim * -1)
  274. dim = len(sres.query_vector)
  275. start_idx = (page - 1) * page_size
  276. for i in idx:
  277. if sim[i] < similarity_threshold:
  278. break
  279. ranks["total"] += 1
  280. start_idx -= 1
  281. if start_idx >= 0:
  282. continue
  283. if len(ranks["chunks"]) >= page_size:
  284. if aggs:
  285. continue
  286. break
  287. id = sres.ids[i]
  288. dnm = sres.field[id]["docnm_kwd"]
  289. did = sres.field[id]["doc_id"]
  290. d = {
  291. "chunk_id": id,
  292. "content_ltks": sres.field[id]["content_ltks"],
  293. "content_with_weight": sres.field[id]["content_with_weight"],
  294. "doc_id": sres.field[id]["doc_id"],
  295. "docnm_kwd": dnm,
  296. "kb_id": sres.field[id]["kb_id"],
  297. "important_kwd": sres.field[id].get("important_kwd", []),
  298. "img_id": sres.field[id].get("img_id", ""),
  299. "similarity": sim[i],
  300. "vector_similarity": vsim[i],
  301. "term_similarity": tsim[i],
  302. "vector": self.trans2floats(sres.field[id].get("q_%d_vec" % dim, "\t".join(["0"] * dim))),
  303. "positions": sres.field[id].get("position_int", "").split("\t")
  304. }
  305. if len(d["positions"]) % 5 == 0:
  306. poss = []
  307. for i in range(0, len(d["positions"]), 5):
  308. poss.append([float(d["positions"][i]), float(d["positions"][i + 1]), float(d["positions"][i + 2]),
  309. float(d["positions"][i + 3]), float(d["positions"][i + 4])])
  310. d["positions"] = poss
  311. ranks["chunks"].append(d)
  312. if dnm not in ranks["doc_aggs"]:
  313. ranks["doc_aggs"][dnm] = {"doc_id": did, "count": 0}
  314. ranks["doc_aggs"][dnm]["count"] += 1
  315. ranks["doc_aggs"] = [{"doc_name": k, "doc_id": v["doc_id"], "count": v["count"]} for k,v in sorted(ranks["doc_aggs"].items(), key=lambda x:x[1]["count"]*-1)]
  316. return ranks
  317. def sql_retrieval(self, sql, fetch_size=128, format="json"):
  318. sql = re.sub(r"[ ]+", " ", sql)
  319. sql = sql.replace("%", "")
  320. es_logger.info(f"Get es sql: {sql}")
  321. replaces = []
  322. for r in re.finditer(r" ([a-z_]+_l?tks)( like | ?= ?)'([^']+)'", sql):
  323. fld, v = r.group(1), r.group(3)
  324. match = " MATCH({}, '{}', 'operator=OR;minimum_should_match=30%') ".format(fld, huqie.qieqie(huqie.qie(v)))
  325. replaces.append(("{}{}'{}'".format(r.group(1), r.group(2), r.group(3)), match))
  326. for p, r in replaces: sql = sql.replace(p, r, 1)
  327. chat_logger.info(f"To es: {sql}")
  328. try:
  329. tbl = self.es.sql(sql, fetch_size, format)
  330. return tbl
  331. except Exception as e:
  332. chat_logger.error(f"SQL failure: {sql} =>" + str(e))
  333. return {"error": str(e)}