You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import logging
  17. import json
  18. import re
  19. from rag.utils.doc_store_conn import MatchTextExpr
  20. from rag.nlp import rag_tokenizer, term_weight, synonym
  21. class FulltextQueryer:
  22. def __init__(self):
  23. self.tw = term_weight.Dealer()
  24. self.syn = synonym.Dealer()
  25. self.query_fields = [
  26. "title_tks^10",
  27. "title_sm_tks^5",
  28. "important_kwd^30",
  29. "important_tks^20",
  30. "content_ltks^2",
  31. "content_sm_ltks",
  32. ]
  33. @staticmethod
  34. def subSpecialChar(line):
  35. return re.sub(r"([:\{\}/\[\]\-\*\"\(\)\|\+~\^])", r"\\\1", line).strip()
  36. @staticmethod
  37. def isChinese(line):
  38. arr = re.split(r"[ \t]+", line)
  39. if len(arr) <= 3:
  40. return True
  41. e = 0
  42. for t in arr:
  43. if not re.match(r"[a-zA-Z]+$", t):
  44. e += 1
  45. return e * 1.0 / len(arr) >= 0.7
  46. @staticmethod
  47. def rmWWW(txt):
  48. patts = [
  49. (
  50. r"是*(什么样的|哪家|一下|那家|请问|啥样|咋样了|什么时候|何时|何地|何人|是否|是不是|多少|哪里|怎么|哪儿|怎么样|如何|哪些|是啥|啥是|啊|吗|呢|吧|咋|什么|有没有|呀)是*",
  51. "",
  52. ),
  53. (r"(^| )(what|who|how|which|where|why)('re|'s)? ", " "),
  54. (r"(^| )('s|'re|is|are|were|was|do|does|did|don't|doesn't|didn't|has|have|be|there|you|me|your|my|mine|just|please|may|i|should|would|wouldn't|will|won't|done|go|for|with|so|the|a|an|by|i'm|it's|he's|she's|they|they're|you're|as|by|on|in|at|up|out|down|of) ", " ")
  55. ]
  56. for r, p in patts:
  57. txt = re.sub(r, p, txt, flags=re.IGNORECASE)
  58. return txt
  59. def question(self, txt, tbl="qa", min_match:float=0.6):
  60. txt = re.sub(
  61. r"[ :\r\n\t,,。??/`!!&\^%%]+",
  62. " ",
  63. rag_tokenizer.tradi2simp(rag_tokenizer.strQ2B(txt.lower())),
  64. ).strip()
  65. txt = FulltextQueryer.rmWWW(txt)
  66. if not self.isChinese(txt):
  67. txt = FulltextQueryer.rmWWW(txt)
  68. tks = rag_tokenizer.tokenize(txt).split(" ")
  69. keywords = [t for t in tks if t]
  70. tks_w = self.tw.weights(tks, preprocess=False)
  71. tks_w = [(re.sub(r"[ \\\"'^]", "", tk), w) for tk, w in tks_w]
  72. tks_w = [(re.sub(r"^[a-z0-9]$", "", tk), w) for tk, w in tks_w if tk]
  73. tks_w = [(re.sub(r"^[\+-]", "", tk), w) for tk, w in tks_w if tk]
  74. syns = []
  75. for tk, w in tks_w:
  76. syn = self.syn.lookup(tk)
  77. syn = rag_tokenizer.tokenize(" ".join(syn)).split(" ")
  78. keywords.extend(syn)
  79. syn = ["\"{}\"^{:.4f}".format(s, w / 4.) for s in syn]
  80. syns.append(" ".join(syn))
  81. q = ["({}^{:.4f}".format(tk, w) + " %s)".format() for (tk, w), syn in zip(tks_w, syns)]
  82. for i in range(1, len(tks_w)):
  83. q.append(
  84. '"%s %s"^%.4f'
  85. % (
  86. tks_w[i - 1][0],
  87. tks_w[i][0],
  88. max(tks_w[i - 1][1], tks_w[i][1]) * 2,
  89. )
  90. )
  91. if not q:
  92. q.append(txt)
  93. query = " ".join(q)
  94. return MatchTextExpr(
  95. self.query_fields, query, 100
  96. ), keywords
  97. def need_fine_grained_tokenize(tk):
  98. if len(tk) < 3:
  99. return False
  100. if re.match(r"[0-9a-z\.\+#_\*-]+$", tk):
  101. return False
  102. return True
  103. txt = FulltextQueryer.rmWWW(txt)
  104. qs, keywords = [], []
  105. for tt in self.tw.split(txt)[:256]: # .split(" "):
  106. if not tt:
  107. continue
  108. keywords.append(tt)
  109. twts = self.tw.weights([tt])
  110. syns = self.syn.lookup(tt)
  111. if syns: keywords.extend(syns)
  112. logging.debug(json.dumps(twts, ensure_ascii=False))
  113. tms = []
  114. for tk, w in sorted(twts, key=lambda x: x[1] * -1):
  115. sm = (
  116. rag_tokenizer.fine_grained_tokenize(tk).split(" ")
  117. if need_fine_grained_tokenize(tk)
  118. else []
  119. )
  120. sm = [
  121. re.sub(
  122. r"[ ,\./;'\[\]\\`~!@#$%\^&\*\(\)=\+_<>\?:\"\{\}\|,。;‘’【】、!¥……()——《》?:“”-]+",
  123. "",
  124. m,
  125. )
  126. for m in sm
  127. ]
  128. sm = [FulltextQueryer.subSpecialChar(m) for m in sm if len(m) > 1]
  129. sm = [m for m in sm if len(m) > 1]
  130. keywords.append(re.sub(r"[ \\\"']+", "", tk))
  131. keywords.extend(sm)
  132. if len(keywords) >= 12:
  133. break
  134. tk_syns = self.syn.lookup(tk)
  135. tk = FulltextQueryer.subSpecialChar(tk)
  136. if tk.find(" ") > 0:
  137. tk = '"%s"' % tk
  138. if tk_syns:
  139. tk = f"({tk} %s)" % " ".join(tk_syns)
  140. if sm:
  141. tk = f'{tk} OR "%s" OR ("%s"~2)^0.5' % (" ".join(sm), " ".join(sm))
  142. if tk.strip():
  143. tms.append((tk, w))
  144. tms = " ".join([f"({t})^{w}" for t, w in tms])
  145. if len(twts) > 1:
  146. tms += ' ("%s"~4)^1.5' % (" ".join([t for t, _ in twts]))
  147. if re.match(r"[0-9a-z ]+$", tt):
  148. tms = f'("{tt}" OR "%s")' % rag_tokenizer.tokenize(tt)
  149. syns = " OR ".join(
  150. [
  151. '"%s"^0.7'
  152. % FulltextQueryer.subSpecialChar(rag_tokenizer.tokenize(s))
  153. for s in syns
  154. ]
  155. )
  156. if syns:
  157. tms = f"({tms})^5 OR ({syns})^0.7"
  158. qs.append(tms)
  159. if qs:
  160. query = " OR ".join([f"({t})" for t in qs if t])
  161. return MatchTextExpr(
  162. self.query_fields, query, 100, {"minimum_should_match": min_match}
  163. ), keywords
  164. return None, keywords
  165. def hybrid_similarity(self, avec, bvecs, atks, btkss, tkweight=0.3, vtweight=0.7):
  166. from sklearn.metrics.pairwise import cosine_similarity as CosineSimilarity
  167. import numpy as np
  168. sims = CosineSimilarity([avec], bvecs)
  169. tksim = self.token_similarity(atks, btkss)
  170. return np.array(sims[0]) * vtweight + np.array(tksim) * tkweight, tksim, sims[0]
  171. def token_similarity(self, atks, btkss):
  172. def toDict(tks):
  173. d = {}
  174. if isinstance(tks, str):
  175. tks = tks.split(" ")
  176. for t, c in self.tw.weights(tks, preprocess=False):
  177. if t not in d:
  178. d[t] = 0
  179. d[t] += c
  180. return d
  181. atks = toDict(atks)
  182. btkss = [toDict(tks) for tks in btkss]
  183. return [self.similarity(atks, btks) for btks in btkss]
  184. def similarity(self, qtwt, dtwt):
  185. if isinstance(dtwt, type("")):
  186. dtwt = {t: w for t, w in self.tw.weights(self.tw.split(dtwt), preprocess=False)}
  187. if isinstance(qtwt, type("")):
  188. qtwt = {t: w for t, w in self.tw.weights(self.tw.split(qtwt), preprocess=False)}
  189. s = 1e-9
  190. for k, v in qtwt.items():
  191. if k in dtwt:
  192. s += v # * dtwt[k]
  193. q = 1e-9
  194. for k, v in qtwt.items():
  195. q += v
  196. return s / q