You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

query.py 7.1KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import json
  17. import math
  18. import re
  19. import logging
  20. import copy
  21. from elasticsearch_dsl import Q
  22. from rag.nlp import rag_tokenizer, term_weight, synonym
  23. class EsQueryer:
  24. def __init__(self, es):
  25. self.tw = term_weight.Dealer()
  26. self.es = es
  27. self.syn = synonym.Dealer()
  28. self.flds = ["ask_tks^10", "ask_small_tks"]
  29. @staticmethod
  30. def subSpecialChar(line):
  31. return re.sub(r"([:\{\}/\[\]\-\*\"\(\)\|~\^])", r"\\\1", line).strip()
  32. @staticmethod
  33. def isChinese(line):
  34. arr = re.split(r"[ \t]+", line)
  35. if len(arr) <= 3:
  36. return True
  37. e = 0
  38. for t in arr:
  39. if not re.match(r"[a-zA-Z]+$", t):
  40. e += 1
  41. return e * 1. / len(arr) >= 0.7
  42. @staticmethod
  43. def rmWWW(txt):
  44. patts = [
  45. (r"是*(什么样的|哪家|一下|那家|啥样|咋样了|什么时候|何时|何地|何人|是否|是不是|多少|哪里|怎么|哪儿|怎么样|如何|哪些|是啥|啥是|啊|吗|呢|吧|咋|什么|有没有|呀)是*", ""),
  46. (r"(^| )(what|who|how|which|where|why)('re|'s)? ", " "),
  47. (r"(^| )('s|'re|is|are|were|was|do|does|did|don't|doesn't|didn't|has|have|be|there|you|me|your|my|mine|just|please|may|i|should|would|wouldn't|will|won't|done|go|for|with|so|the|a|an|by|i'm|it's|he's|she's|they|they're|you're|as|by|on|in|at|up|out|down) ", " ")
  48. ]
  49. for r, p in patts:
  50. txt = re.sub(r, p, txt, flags=re.IGNORECASE)
  51. return txt
  52. def question(self, txt, tbl="qa", min_match="60%"):
  53. txt = re.sub(
  54. r"[ :\r\n\t,,。??/`!!&\^%%]+",
  55. " ",
  56. rag_tokenizer.tradi2simp(
  57. rag_tokenizer.strQ2B(
  58. txt.lower()))).strip()
  59. txt = EsQueryer.rmWWW(txt)
  60. if not self.isChinese(txt):
  61. tks = rag_tokenizer.tokenize(txt).split(" ")
  62. tks_w = self.tw.weights(tks)
  63. tks_w = [(re.sub(r"[ \\\"']+", "", tk), w) for tk, w in tks_w]
  64. q = ["{}^{:.4f}".format(tk, w) for tk, w in tks_w if tk]
  65. for i in range(1, len(tks_w)):
  66. q.append("\"%s %s\"^%.4f" % (tks_w[i - 1][0], tks_w[i][0], max(tks_w[i - 1][1], tks_w[i][1])*2))
  67. if not q:
  68. q.append(txt)
  69. return Q("bool",
  70. must=Q("query_string", fields=self.flds,
  71. type="best_fields", query=" ".join(q),
  72. boost=1)#, minimum_should_match=min_match)
  73. ), tks
  74. def need_fine_grained_tokenize(tk):
  75. if len(tk) < 4:
  76. return False
  77. if re.match(r"[0-9a-z\.\+#_\*-]+$", tk):
  78. return False
  79. return True
  80. qs, keywords = [], []
  81. for tt in self.tw.split(txt)[:256]: # .split(" "):
  82. if not tt:
  83. continue
  84. twts = self.tw.weights([tt])
  85. syns = self.syn.lookup(tt)
  86. logging.info(json.dumps(twts, ensure_ascii=False))
  87. tms = []
  88. for tk, w in sorted(twts, key=lambda x: x[1] * -1):
  89. sm = rag_tokenizer.fine_grained_tokenize(tk).split(" ") if need_fine_grained_tokenize(tk) else []
  90. sm = [
  91. re.sub(
  92. r"[ ,\./;'\[\]\\`~!@#$%\^&\*\(\)=\+_<>\?:\"\{\}\|,。;‘’【】、!¥……()——《》?:“”-]+",
  93. "",
  94. m) for m in sm]
  95. sm = [EsQueryer.subSpecialChar(m) for m in sm if len(m) > 1]
  96. sm = [m for m in sm if len(m) > 1]
  97. if len(sm) < 2:
  98. sm = []
  99. keywords.append(re.sub(r"[ \\\"']+", "", tk))
  100. tk_syns = self.syn.lookup(tk)
  101. tk = EsQueryer.subSpecialChar(tk)
  102. if tk.find(" ") > 0:
  103. tk = "\"%s\"" % tk
  104. if tk_syns:
  105. tk = f"({tk} %s)" % " ".join(tk_syns)
  106. if sm:
  107. tk = f"{tk} OR \"%s\" OR (\"%s\"~2)^0.5" % (
  108. " ".join(sm), " ".join(sm))
  109. tms.append((tk, w))
  110. tms = " ".join([f"({t})^{w}" for t, w in tms])
  111. if len(twts) > 1:
  112. tms += f" (\"%s\"~4)^1.5" % (" ".join([t for t, _ in twts]))
  113. if re.match(r"[0-9a-z ]+$", tt):
  114. tms = f"(\"{tt}\" OR \"%s\")" % rag_tokenizer.tokenize(tt)
  115. syns = " OR ".join(
  116. ["\"%s\"^0.7" % EsQueryer.subSpecialChar(rag_tokenizer.tokenize(s)) for s in syns])
  117. if syns:
  118. tms = f"({tms})^5 OR ({syns})^0.7"
  119. qs.append(tms)
  120. flds = copy.deepcopy(self.flds)
  121. mst = []
  122. if qs:
  123. mst.append(
  124. Q("query_string", fields=flds, type="best_fields",
  125. query=" OR ".join([f"({t})" for t in qs if t]), boost=1, minimum_should_match=min_match)
  126. )
  127. return Q("bool",
  128. must=mst,
  129. ), keywords
  130. def hybrid_similarity(self, avec, bvecs, atks, btkss, tkweight=0.3,
  131. vtweight=0.7):
  132. from sklearn.metrics.pairwise import cosine_similarity as CosineSimilarity
  133. import numpy as np
  134. sims = CosineSimilarity([avec], bvecs)
  135. tksim = self.token_similarity(atks, btkss)
  136. return np.array(sims[0]) * vtweight + \
  137. np.array(tksim) * tkweight, tksim, sims[0]
  138. def token_similarity(self, atks, btkss):
  139. def toDict(tks):
  140. d = {}
  141. if isinstance(tks, str):
  142. tks = tks.split(" ")
  143. for t, c in self.tw.weights(tks):
  144. if t not in d:
  145. d[t] = 0
  146. d[t] += c
  147. return d
  148. atks = toDict(atks)
  149. btkss = [toDict(tks) for tks in btkss]
  150. return [self.similarity(atks, btks) for btks in btkss]
  151. def similarity(self, qtwt, dtwt):
  152. if isinstance(dtwt, type("")):
  153. dtwt = {t: w for t, w in self.tw.weights(self.tw.split(dtwt))}
  154. if isinstance(qtwt, type("")):
  155. qtwt = {t: w for t, w in self.tw.weights(self.tw.split(qtwt))}
  156. s = 1e-9
  157. for k, v in qtwt.items():
  158. if k in dtwt:
  159. s += v # * dtwt[k]
  160. q = 1e-9
  161. for k, v in qtwt.items():
  162. q += v # * v
  163. #d = 1e-9
  164. # for k, v in dtwt.items():
  165. # d += v * v
  166. return s / q / max(1, math.sqrt(math.log10(max(len(qtwt.keys()), len(dtwt.keys())))))# math.sqrt(q) / math.sqrt(d)