You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

es_conn.py 18KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. import logging
  2. import re
  3. import json
  4. import time
  5. import os
  6. import copy
  7. from elasticsearch import Elasticsearch, NotFoundError
  8. from elasticsearch_dsl import UpdateByQuery, Q, Search, Index
  9. from elastic_transport import ConnectionTimeout
  10. from rag import settings
  11. from rag.utils import singleton
  12. from api.utils.file_utils import get_project_base_directory
  13. import polars as pl
  14. from rag.utils.doc_store_conn import DocStoreConnection, MatchExpr, OrderByExpr, MatchTextExpr, MatchDenseExpr, \
  15. FusionExpr
  16. from rag.nlp import is_english, rag_tokenizer
  17. @singleton
  18. class ESConnection(DocStoreConnection):
  19. def __init__(self):
  20. self.info = {}
  21. logging.info(f"Use Elasticsearch {settings.ES['hosts']} as the doc engine.")
  22. for _ in range(24):
  23. try:
  24. self.es = Elasticsearch(
  25. settings.ES["hosts"].split(","),
  26. basic_auth=(settings.ES["username"], settings.ES[
  27. "password"]) if "username" in settings.ES and "password" in settings.ES else None,
  28. verify_certs=False,
  29. timeout=600
  30. )
  31. if self.es:
  32. self.info = self.es.info()
  33. break
  34. except Exception as e:
  35. logging.warn(f"{str(e)}. Waiting Elasticsearch {settings.ES['hosts']} to be healthy.")
  36. time.sleep(5)
  37. if not self.es.ping():
  38. msg = f"Elasticsearch {settings.ES['hosts']} didn't become healthy in 120s."
  39. logging.error(msg)
  40. raise Exception(msg)
  41. v = self.info.get("version", {"number": "8.11.3"})
  42. v = v["number"].split(".")[0]
  43. if int(v) < 8:
  44. msg = f"Elasticsearch version must be greater than or equal to 8, current version: {v}"
  45. logging.error(msg)
  46. raise Exception(msg)
  47. fp_mapping = os.path.join(get_project_base_directory(), "conf", "mapping.json")
  48. if not os.path.exists(fp_mapping):
  49. msg = f"Elasticsearch mapping file not found at {fp_mapping}"
  50. logging.error(msg)
  51. raise Exception(msg)
  52. self.mapping = json.load(open(fp_mapping, "r"))
  53. logging.info(f"Elasticsearch {settings.ES['hosts']} is healthy.")
  54. """
  55. Database operations
  56. """
  57. def dbType(self) -> str:
  58. return "elasticsearch"
  59. def health(self) -> dict:
  60. health_dict = dict(self.es.cluster.health())
  61. health_dict["type"] = "elasticsearch"
  62. return health_dict
  63. """
  64. Table operations
  65. """
  66. def createIdx(self, indexName: str, knowledgebaseId: str, vectorSize: int):
  67. if self.indexExist(indexName, knowledgebaseId):
  68. return True
  69. try:
  70. from elasticsearch.client import IndicesClient
  71. return IndicesClient(self.es).create(index=indexName,
  72. settings=self.mapping["settings"],
  73. mappings=self.mapping["mappings"])
  74. except Exception:
  75. logging.exception("ES create index error %s" % (indexName))
  76. def deleteIdx(self, indexName: str, knowledgebaseId: str):
  77. try:
  78. self.es.indices.delete(index=indexName, allow_no_indices=True)
  79. except NotFoundError:
  80. pass
  81. except Exception:
  82. logging.exception("ES delete index error %s" % (indexName))
  83. def indexExist(self, indexName: str, knowledgebaseId: str) -> bool:
  84. s = Index(indexName, self.es)
  85. for i in range(3):
  86. try:
  87. return s.exists()
  88. except Exception as e:
  89. logging.exception("ES indexExist")
  90. if str(e).find("Timeout") > 0 or str(e).find("Conflict") > 0:
  91. continue
  92. return False
  93. """
  94. CRUD operations
  95. """
  96. def search(self, selectFields: list[str], highlightFields: list[str], condition: dict, matchExprs: list[MatchExpr],
  97. orderBy: OrderByExpr, offset: int, limit: int, indexNames: str | list[str],
  98. knowledgebaseIds: list[str]) -> list[dict] | pl.DataFrame:
  99. """
  100. Refers to https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html
  101. """
  102. if isinstance(indexNames, str):
  103. indexNames = indexNames.split(",")
  104. assert isinstance(indexNames, list) and len(indexNames) > 0
  105. assert "_id" not in condition
  106. s = Search()
  107. bqry = None
  108. vector_similarity_weight = 0.5
  109. for m in matchExprs:
  110. if isinstance(m, FusionExpr) and m.method == "weighted_sum" and "weights" in m.fusion_params:
  111. assert len(matchExprs) == 3 and isinstance(matchExprs[0], MatchTextExpr) and isinstance(matchExprs[1],
  112. MatchDenseExpr) and isinstance(
  113. matchExprs[2], FusionExpr)
  114. weights = m.fusion_params["weights"]
  115. vector_similarity_weight = float(weights.split(",")[1])
  116. for m in matchExprs:
  117. if isinstance(m, MatchTextExpr):
  118. minimum_should_match = "0%"
  119. if "minimum_should_match" in m.extra_options:
  120. minimum_should_match = str(int(m.extra_options["minimum_should_match"] * 100)) + "%"
  121. bqry = Q("bool",
  122. must=Q("query_string", fields=m.fields,
  123. type="best_fields", query=m.matching_text,
  124. minimum_should_match=minimum_should_match,
  125. boost=1),
  126. boost=1.0 - vector_similarity_weight,
  127. )
  128. elif isinstance(m, MatchDenseExpr):
  129. assert (bqry is not None)
  130. similarity = 0.0
  131. if "similarity" in m.extra_options:
  132. similarity = m.extra_options["similarity"]
  133. s = s.knn(m.vector_column_name,
  134. m.topn,
  135. m.topn * 2,
  136. query_vector=list(m.embedding_data),
  137. filter=bqry.to_dict(),
  138. similarity=similarity,
  139. )
  140. condition["kb_id"] = knowledgebaseIds
  141. if condition:
  142. if not bqry:
  143. bqry = Q("bool", must=[])
  144. for k, v in condition.items():
  145. if not isinstance(k, str) or not v:
  146. continue
  147. if isinstance(v, list):
  148. bqry.filter.append(Q("terms", **{k: v}))
  149. elif isinstance(v, str) or isinstance(v, int):
  150. bqry.filter.append(Q("term", **{k: v}))
  151. else:
  152. raise Exception(
  153. f"Condition `{str(k)}={str(v)}` value type is {str(type(v))}, expected to be int, str or list.")
  154. if bqry:
  155. s = s.query(bqry)
  156. for field in highlightFields:
  157. s = s.highlight(field)
  158. if orderBy:
  159. orders = list()
  160. for field, order in orderBy.fields:
  161. order = "asc" if order == 0 else "desc"
  162. orders.append({field: {"order": order, "unmapped_type": "float",
  163. "mode": "avg", "numeric_type": "double"}})
  164. s = s.sort(*orders)
  165. if limit > 0:
  166. s = s[offset:limit]
  167. q = s.to_dict()
  168. print(json.dumps(q), flush=True)
  169. logging.debug("ESConnection.search [Q]: " + json.dumps(q))
  170. for i in range(3):
  171. try:
  172. res = self.es.search(index=indexNames,
  173. body=q,
  174. timeout="600s",
  175. # search_type="dfs_query_then_fetch",
  176. track_total_hits=True,
  177. _source=True)
  178. if str(res.get("timed_out", "")).lower() == "true":
  179. raise Exception("Es Timeout.")
  180. logging.debug("ESConnection.search res: " + str(res))
  181. return res
  182. except Exception as e:
  183. logging.exception("ES search [Q]: " + str(q))
  184. if str(e).find("Timeout") > 0:
  185. continue
  186. raise e
  187. logging.error("ES search timeout for 3 times!")
  188. raise Exception("ES search timeout.")
  189. def get(self, chunkId: str, indexName: str, knowledgebaseIds: list[str]) -> dict | None:
  190. for i in range(3):
  191. try:
  192. res = self.es.get(index=(indexName),
  193. id=chunkId, source=True, )
  194. if str(res.get("timed_out", "")).lower() == "true":
  195. raise Exception("Es Timeout.")
  196. if not res.get("found"):
  197. return None
  198. chunk = res["_source"]
  199. chunk["id"] = chunkId
  200. return chunk
  201. except Exception as e:
  202. logging.exception(f"ES get({chunkId}) got exception")
  203. if str(e).find("Timeout") > 0:
  204. continue
  205. raise e
  206. logging.error("ES search timeout for 3 times!")
  207. raise Exception("ES search timeout.")
  208. def insert(self, documents: list[dict], indexName: str, knowledgebaseId: str) -> list[str]:
  209. # Refers to https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
  210. operations = []
  211. for d in documents:
  212. assert "_id" not in d
  213. assert "id" in d
  214. d_copy = copy.deepcopy(d)
  215. meta_id = d_copy.pop("id", "")
  216. operations.append(
  217. {"index": {"_index": indexName, "_id": meta_id}})
  218. operations.append(d_copy)
  219. res = []
  220. for _ in range(100):
  221. try:
  222. r = self.es.bulk(index=(indexName), operations=operations,
  223. refresh=False, timeout="600s")
  224. if re.search(r"False", str(r["errors"]), re.IGNORECASE):
  225. return res
  226. for item in r["items"]:
  227. for action in ["create", "delete", "index", "update"]:
  228. if action in item and "error" in item[action]:
  229. res.append(str(item[action]["_id"]) + ":" + str(item[action]["error"]))
  230. return res
  231. except Exception as e:
  232. logging.warning("Fail to bulk: " + str(e))
  233. if re.search(r"(Timeout|time out)", str(e), re.IGNORECASE):
  234. time.sleep(3)
  235. continue
  236. return res
  237. def update(self, condition: dict, newValue: dict, indexName: str, knowledgebaseId: str) -> bool:
  238. doc = copy.deepcopy(newValue)
  239. doc.pop("id", None)
  240. if "id" in condition and isinstance(condition["id"], str):
  241. # update specific single document
  242. chunkId = condition["id"]
  243. for i in range(3):
  244. try:
  245. self.es.update(index=indexName, id=chunkId, doc=doc)
  246. return True
  247. except Exception as e:
  248. logging.exception(
  249. f"ES failed to update(index={indexName}, id={id}, doc={json.dumps(condition, ensure_ascii=False)})")
  250. if str(e).find("Timeout") > 0:
  251. continue
  252. else:
  253. # update unspecific maybe-multiple documents
  254. bqry = Q("bool")
  255. for k, v in condition.items():
  256. if not isinstance(k, str) or not v:
  257. continue
  258. if isinstance(v, list):
  259. bqry.filter.append(Q("terms", **{k: v}))
  260. elif isinstance(v, str) or isinstance(v, int):
  261. bqry.filter.append(Q("term", **{k: v}))
  262. else:
  263. raise Exception(
  264. f"Condition `{str(k)}={str(v)}` value type is {str(type(v))}, expected to be int, str or list.")
  265. scripts = []
  266. for k, v in newValue.items():
  267. if not isinstance(k, str) or not v:
  268. continue
  269. if isinstance(v, str):
  270. scripts.append(f"ctx._source.{k} = '{v}'")
  271. elif isinstance(v, int):
  272. scripts.append(f"ctx._source.{k} = {v}")
  273. else:
  274. raise Exception(
  275. f"newValue `{str(k)}={str(v)}` value type is {str(type(v))}, expected to be int, str.")
  276. ubq = UpdateByQuery(
  277. index=indexName).using(
  278. self.es).query(bqry)
  279. ubq = ubq.script(source="; ".join(scripts))
  280. ubq = ubq.params(refresh=True)
  281. ubq = ubq.params(slices=5)
  282. ubq = ubq.params(conflicts="proceed")
  283. for i in range(3):
  284. try:
  285. _ = ubq.execute()
  286. return True
  287. except Exception as e:
  288. logging.error("ES update exception: " + str(e) + "[Q]:" + str(bqry.to_dict()))
  289. if str(e).find("Timeout") > 0 or str(e).find("Conflict") > 0:
  290. continue
  291. return False
  292. def delete(self, condition: dict, indexName: str, knowledgebaseId: str) -> int:
  293. qry = None
  294. assert "_id" not in condition
  295. if "id" in condition:
  296. chunk_ids = condition["id"]
  297. if not isinstance(chunk_ids, list):
  298. chunk_ids = [chunk_ids]
  299. qry = Q("ids", values=chunk_ids)
  300. else:
  301. qry = Q("bool")
  302. for k, v in condition.items():
  303. if isinstance(v, list):
  304. qry.must.append(Q("terms", **{k: v}))
  305. elif isinstance(v, str) or isinstance(v, int):
  306. qry.must.append(Q("term", **{k: v}))
  307. else:
  308. raise Exception("Condition value must be int, str or list.")
  309. logging.debug("ESConnection.delete [Q]: " + json.dumps(qry.to_dict()))
  310. for _ in range(10):
  311. try:
  312. res = self.es.delete_by_query(
  313. index=indexName,
  314. body=Search().query(qry).to_dict(),
  315. refresh=True)
  316. return res["deleted"]
  317. except Exception as e:
  318. logging.warning("Fail to delete: " + str(filter) + str(e))
  319. if re.search(r"(Timeout|time out)", str(e), re.IGNORECASE):
  320. time.sleep(3)
  321. continue
  322. if re.search(r"(not_found)", str(e), re.IGNORECASE):
  323. return 0
  324. return 0
  325. """
  326. Helper functions for search result
  327. """
  328. def getTotal(self, res):
  329. if isinstance(res["hits"]["total"], type({})):
  330. return res["hits"]["total"]["value"]
  331. return res["hits"]["total"]
  332. def getChunkIds(self, res):
  333. return [d["_id"] for d in res["hits"]["hits"]]
  334. def __getSource(self, res):
  335. rr = []
  336. for d in res["hits"]["hits"]:
  337. d["_source"]["id"] = d["_id"]
  338. d["_source"]["_score"] = d["_score"]
  339. rr.append(d["_source"])
  340. return rr
  341. def getFields(self, res, fields: list[str]) -> dict[str, dict]:
  342. res_fields = {}
  343. if not fields:
  344. return {}
  345. for d in self.__getSource(res):
  346. m = {n: d.get(n) for n in fields if d.get(n) is not None}
  347. for n, v in m.items():
  348. if isinstance(v, list):
  349. m[n] = v
  350. continue
  351. if not isinstance(v, str):
  352. m[n] = str(m[n])
  353. # if n.find("tks") > 0:
  354. # m[n] = rmSpace(m[n])
  355. if m:
  356. res_fields[d["id"]] = m
  357. return res_fields
  358. def getHighlight(self, res, keywords: list[str], fieldnm: str):
  359. ans = {}
  360. for d in res["hits"]["hits"]:
  361. hlts = d.get("highlight")
  362. if not hlts:
  363. continue
  364. txt = "...".join([a for a in list(hlts.items())[0][1]])
  365. if not is_english(txt.split(" ")):
  366. ans[d["_id"]] = txt
  367. continue
  368. txt = d["_source"][fieldnm]
  369. txt = re.sub(r"[\r\n]", " ", txt, flags=re.IGNORECASE | re.MULTILINE)
  370. txts = []
  371. for t in re.split(r"[.?!;\n]", txt):
  372. for w in keywords:
  373. t = re.sub(r"(^|[ .?/'\"\(\)!,:;-])(%s)([ .?/'\"\(\)!,:;-])" % re.escape(w), r"\1<em>\2</em>\3", t,
  374. flags=re.IGNORECASE | re.MULTILINE)
  375. if not re.search(r"<em>[^<>]+</em>", t, flags=re.IGNORECASE | re.MULTILINE):
  376. continue
  377. txts.append(t)
  378. ans[d["_id"]] = "...".join(txts) if txts else "...".join([a for a in list(hlts.items())[0][1]])
  379. return ans
  380. def getAggregation(self, res, fieldnm: str):
  381. agg_field = "aggs_" + fieldnm
  382. if "aggregations" not in res or agg_field not in res["aggregations"]:
  383. return list()
  384. bkts = res["aggregations"][agg_field]["buckets"]
  385. return [(b["key"], b["doc_count"]) for b in bkts]
  386. """
  387. SQL
  388. """
  389. def sql(self, sql: str, fetch_size: int, format: str):
  390. logging.debug(f"ESConnection.sql get sql: {sql}")
  391. sql = re.sub(r"[ `]+", " ", sql)
  392. sql = sql.replace("%", "")
  393. replaces = []
  394. for r in re.finditer(r" ([a-z_]+_l?tks)( like | ?= ?)'([^']+)'", sql):
  395. fld, v = r.group(1), r.group(3)
  396. match = " MATCH({}, '{}', 'operator=OR;minimum_should_match=30%') ".format(
  397. fld, rag_tokenizer.fine_grained_tokenize(rag_tokenizer.tokenize(v)))
  398. replaces.append(
  399. ("{}{}'{}'".format(
  400. r.group(1),
  401. r.group(2),
  402. r.group(3)),
  403. match))
  404. for p, r in replaces:
  405. sql = sql.replace(p, r, 1)
  406. logging.debug(f"ESConnection.sql to es: {sql}")
  407. for i in range(3):
  408. try:
  409. res = self.es.sql.query(body={"query": sql, "fetch_size": fetch_size}, format=format,
  410. request_timeout="2s")
  411. return res
  412. except ConnectionTimeout:
  413. logging.exception("ESConnection.sql timeout [Q]: " + sql)
  414. continue
  415. except Exception:
  416. logging.exception("ESConnection.sql got exception [Q]: " + sql)
  417. return None
  418. logging.error("ESConnection.sql timeout for 3 times!")
  419. return None