您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import logging
  17. import json
  18. import re
  19. from collections import defaultdict
  20. from rag.utils.doc_store_conn import MatchTextExpr
  21. from rag.nlp import rag_tokenizer, term_weight, synonym
  22. class FulltextQueryer:
  23. def __init__(self):
  24. self.tw = term_weight.Dealer()
  25. self.syn = synonym.Dealer()
  26. self.query_fields = [
  27. "title_tks^10",
  28. "title_sm_tks^5",
  29. "important_kwd^30",
  30. "important_tks^20",
  31. "question_tks^20",
  32. "content_ltks^2",
  33. "content_sm_ltks",
  34. ]
  35. @staticmethod
  36. def subSpecialChar(line):
  37. return re.sub(r"([:\{\}/\[\]\-\*\"\(\)\|\+~\^])", r"\\\1", line).strip()
  38. @staticmethod
  39. def isChinese(line):
  40. arr = re.split(r"[ \t]+", line)
  41. if len(arr) <= 3:
  42. return True
  43. e = 0
  44. for t in arr:
  45. if not re.match(r"[a-zA-Z]+$", t):
  46. e += 1
  47. return e * 1.0 / len(arr) >= 0.7
  48. @staticmethod
  49. def rmWWW(txt):
  50. patts = [
  51. (
  52. r"是*(什么样的|哪家|一下|那家|请问|啥样|咋样了|什么时候|何时|何地|何人|是否|是不是|多少|哪里|怎么|哪儿|怎么样|如何|哪些|是啥|啥是|啊|吗|呢|吧|咋|什么|有没有|呀|谁|哪位|哪个)是*",
  53. "",
  54. ),
  55. (r"(^| )(what|who|how|which|where|why)('re|'s)? ", " "),
  56. (
  57. r"(^| )('s|'re|is|are|were|was|do|does|did|don't|doesn't|didn't|has|have|be|there|you|me|your|my|mine|just|please|may|i|should|would|wouldn't|will|won't|done|go|for|with|so|the|a|an|by|i'm|it's|he's|she's|they|they're|you're|as|by|on|in|at|up|out|down|of|to|or|and|if) ",
  58. " ")
  59. ]
  60. otxt = txt
  61. for r, p in patts:
  62. txt = re.sub(r, p, txt, flags=re.IGNORECASE)
  63. if not txt:
  64. txt = otxt
  65. return txt
  66. @staticmethod
  67. def add_space_between_eng_zh(txt):
  68. # (ENG/ENG+NUM) + ZH
  69. txt = re.sub(r'([A-Za-z]+[0-9]+)([\u4e00-\u9fa5]+)', r'\1 \2', txt)
  70. # ENG + ZH
  71. txt = re.sub(r'([A-Za-z])([\u4e00-\u9fa5]+)', r'\1 \2', txt)
  72. # ZH + (ENG/ENG+NUM)
  73. txt = re.sub(r'([\u4e00-\u9fa5]+)([A-Za-z]+[0-9]+)', r'\1 \2', txt)
  74. txt = re.sub(r'([\u4e00-\u9fa5]+)([A-Za-z])', r'\1 \2', txt)
  75. return txt
  76. def question(self, txt, tbl="qa", min_match: float = 0.6):
  77. txt = FulltextQueryer.add_space_between_eng_zh(txt)
  78. txt = re.sub(
  79. r"[ :|\r\n\t,,。??/`!!&^%%()\[\]{}<>]+",
  80. " ",
  81. rag_tokenizer.tradi2simp(rag_tokenizer.strQ2B(txt.lower())),
  82. ).strip()
  83. otxt = txt
  84. txt = FulltextQueryer.rmWWW(txt)
  85. if not self.isChinese(txt):
  86. txt = FulltextQueryer.rmWWW(txt)
  87. tks = rag_tokenizer.tokenize(txt).split()
  88. keywords = [t for t in tks if t]
  89. tks_w = self.tw.weights(tks, preprocess=False)
  90. tks_w = [(re.sub(r"[ \\\"'^]", "", tk), w) for tk, w in tks_w]
  91. tks_w = [(re.sub(r"^[a-z0-9]$", "", tk), w) for tk, w in tks_w if tk]
  92. tks_w = [(re.sub(r"^[\+-]", "", tk), w) for tk, w in tks_w if tk]
  93. tks_w = [(tk.strip(), w) for tk, w in tks_w if tk.strip()]
  94. syns = []
  95. for tk, w in tks_w[:256]:
  96. syn = self.syn.lookup(tk)
  97. syn = rag_tokenizer.tokenize(" ".join(syn)).split()
  98. keywords.extend(syn)
  99. syn = ["\"{}\"^{:.4f}".format(s, w / 4.) for s in syn if s.strip()]
  100. syns.append(" ".join(syn))
  101. q = ["({}^{:.4f}".format(tk, w) + " {})".format(syn) for (tk, w), syn in zip(tks_w, syns) if
  102. tk and not re.match(r"[.^+\(\)-]", tk)]
  103. for i in range(1, len(tks_w)):
  104. left, right = tks_w[i - 1][0].strip(), tks_w[i][0].strip()
  105. if not left or not right:
  106. continue
  107. q.append(
  108. '"%s %s"^%.4f'
  109. % (
  110. tks_w[i - 1][0],
  111. tks_w[i][0],
  112. max(tks_w[i - 1][1], tks_w[i][1]) * 2,
  113. )
  114. )
  115. if not q:
  116. q.append(txt)
  117. query = " ".join(q)
  118. return MatchTextExpr(
  119. self.query_fields, query, 100
  120. ), keywords
  121. def need_fine_grained_tokenize(tk):
  122. if len(tk) < 3:
  123. return False
  124. if re.match(r"[0-9a-z\.\+#_\*-]+$", tk):
  125. return False
  126. return True
  127. txt = FulltextQueryer.rmWWW(txt)
  128. qs, keywords = [], []
  129. for tt in self.tw.split(txt)[:256]: # .split():
  130. if not tt:
  131. continue
  132. keywords.append(tt)
  133. twts = self.tw.weights([tt])
  134. syns = self.syn.lookup(tt)
  135. if syns and len(keywords) < 32:
  136. keywords.extend(syns)
  137. logging.debug(json.dumps(twts, ensure_ascii=False))
  138. tms = []
  139. for tk, w in sorted(twts, key=lambda x: x[1] * -1):
  140. sm = (
  141. rag_tokenizer.fine_grained_tokenize(tk).split()
  142. if need_fine_grained_tokenize(tk)
  143. else []
  144. )
  145. sm = [
  146. re.sub(
  147. r"[ ,\./;'\[\]\\`~!@#$%\^&\*\(\)=\+_<>\?:\"\{\}\|,。;‘’【】、!¥……()——《》?:“”-]+",
  148. "",
  149. m,
  150. )
  151. for m in sm
  152. ]
  153. sm = [FulltextQueryer.subSpecialChar(m) for m in sm if len(m) > 1]
  154. sm = [m for m in sm if len(m) > 1]
  155. if len(keywords) < 32:
  156. keywords.append(re.sub(r"[ \\\"']+", "", tk))
  157. keywords.extend(sm)
  158. tk_syns = self.syn.lookup(tk)
  159. tk_syns = [FulltextQueryer.subSpecialChar(s) for s in tk_syns]
  160. if len(keywords) < 32:
  161. keywords.extend([s for s in tk_syns if s])
  162. tk_syns = [rag_tokenizer.fine_grained_tokenize(s) for s in tk_syns if s]
  163. tk_syns = [f"\"{s}\"" if s.find(" ") > 0 else s for s in tk_syns]
  164. if len(keywords) >= 32:
  165. break
  166. tk = FulltextQueryer.subSpecialChar(tk)
  167. if tk.find(" ") > 0:
  168. tk = '"%s"' % tk
  169. if tk_syns:
  170. tk = f"({tk} OR (%s)^0.2)" % " ".join(tk_syns)
  171. if sm:
  172. tk = f'{tk} OR "%s" OR ("%s"~2)^0.5' % (" ".join(sm), " ".join(sm))
  173. if tk.strip():
  174. tms.append((tk, w))
  175. tms = " ".join([f"({t})^{w}" for t, w in tms])
  176. if len(twts) > 1:
  177. tms += ' ("%s"~2)^1.5' % rag_tokenizer.tokenize(tt)
  178. syns = " OR ".join(
  179. [
  180. '"%s"'
  181. % rag_tokenizer.tokenize(FulltextQueryer.subSpecialChar(s))
  182. for s in syns
  183. ]
  184. )
  185. if syns and tms:
  186. tms = f"({tms})^5 OR ({syns})^0.7"
  187. qs.append(tms)
  188. if qs:
  189. query = " OR ".join([f"({t})" for t in qs if t])
  190. if not query:
  191. query = otxt
  192. return MatchTextExpr(
  193. self.query_fields, query, 100, {"minimum_should_match": min_match}
  194. ), keywords
  195. return None, keywords
  196. def hybrid_similarity(self, avec, bvecs, atks, btkss, tkweight=0.3, vtweight=0.7):
  197. from sklearn.metrics.pairwise import cosine_similarity as CosineSimilarity
  198. import numpy as np
  199. sims = CosineSimilarity([avec], bvecs)
  200. tksim = self.token_similarity(atks, btkss)
  201. if np.sum(sims[0]) == 0:
  202. return np.array(tksim), tksim, sims[0]
  203. return np.array(sims[0]) * vtweight + np.array(tksim) * tkweight, tksim, sims[0]
  204. def token_similarity(self, atks, btkss):
  205. def toDict(tks):
  206. if isinstance(tks, str):
  207. tks = tks.split()
  208. d = defaultdict(int)
  209. wts = self.tw.weights(tks, preprocess=False)
  210. for i, (t, c) in enumerate(wts):
  211. d[t] += c
  212. return d
  213. atks = toDict(atks)
  214. btkss = [toDict(tks) for tks in btkss]
  215. return [self.similarity(atks, btks) for btks in btkss]
  216. def similarity(self, qtwt, dtwt):
  217. if isinstance(dtwt, type("")):
  218. dtwt = {t: w for t, w in self.tw.weights(self.tw.split(dtwt), preprocess=False)}
  219. if isinstance(qtwt, type("")):
  220. qtwt = {t: w for t, w in self.tw.weights(self.tw.split(qtwt), preprocess=False)}
  221. s = 1e-9
  222. for k, v in qtwt.items():
  223. if k in dtwt:
  224. s += v #* dtwt[k]
  225. q = 1e-9
  226. for k, v in qtwt.items():
  227. q += v #* v
  228. return s/q #math.sqrt(3. * (s / q / math.log10( len(dtwt.keys()) + 512 )))
  229. def paragraph(self, content_tks: str, keywords: list = [], keywords_topn=30):
  230. if isinstance(content_tks, str):
  231. content_tks = [c.strip() for c in content_tks.strip() if c.strip()]
  232. tks_w = self.tw.weights(content_tks, preprocess=False)
  233. keywords = [f'"{k.strip()}"' for k in keywords]
  234. for tk, w in sorted(tks_w, key=lambda x: x[1] * -1)[:keywords_topn]:
  235. tk_syns = self.syn.lookup(tk)
  236. tk_syns = [FulltextQueryer.subSpecialChar(s) for s in tk_syns]
  237. tk_syns = [rag_tokenizer.fine_grained_tokenize(s) for s in tk_syns if s]
  238. tk_syns = [f"\"{s}\"" if s.find(" ") > 0 else s for s in tk_syns]
  239. tk = FulltextQueryer.subSpecialChar(tk)
  240. if tk.find(" ") > 0:
  241. tk = '"%s"' % tk
  242. if tk_syns:
  243. tk = f"({tk} OR (%s)^0.2)" % " ".join(tk_syns)
  244. if tk:
  245. keywords.append(f"{tk}^{w}")
  246. return MatchTextExpr(self.query_fields, " ".join(keywords), 100,
  247. {"minimum_should_match": min(3, len(keywords) // 10)})