Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649
  1. # Copyright (c) 2024 Microsoft Corporation.
  2. # Licensed under the MIT License
  3. """
  4. Reference:
  5. - [graphrag](https://github.com/microsoft/graphrag)
  6. - [LightRag](https://github.com/HKUDS/LightRAG)
  7. """
  8. import html
  9. import json
  10. import logging
  11. import re
  12. import time
  13. from collections import defaultdict
  14. from hashlib import md5
  15. from typing import Any, Callable
  16. import os
  17. import trio
  18. from typing import Set, Tuple
  19. import networkx as nx
  20. import numpy as np
  21. import xxhash
  22. from networkx.readwrite import json_graph
  23. import dataclasses
  24. from api.utils.api_utils import timeout
  25. from api import settings
  26. from api.utils import get_uuid
  27. from rag.nlp import search, rag_tokenizer
  28. from rag.utils.doc_store_conn import OrderByExpr
  29. from rag.utils.redis_conn import REDIS_CONN
  30. GRAPH_FIELD_SEP = "<SEP>"
  31. ErrorHandlerFn = Callable[[BaseException | None, str | None, dict | None], None]
  32. chat_limiter = trio.CapacityLimiter(int(os.environ.get('MAX_CONCURRENT_CHATS', 10)))
  33. @dataclasses.dataclass
  34. class GraphChange:
  35. removed_nodes: Set[str] = dataclasses.field(default_factory=set)
  36. added_updated_nodes: Set[str] = dataclasses.field(default_factory=set)
  37. removed_edges: Set[Tuple[str, str]] = dataclasses.field(default_factory=set)
  38. added_updated_edges: Set[Tuple[str, str]] = dataclasses.field(default_factory=set)
  39. def perform_variable_replacements(
  40. input: str, history: list[dict] | None = None, variables: dict | None = None
  41. ) -> str:
  42. """Perform variable replacements on the input string and in a chat log."""
  43. if history is None:
  44. history = []
  45. if variables is None:
  46. variables = {}
  47. result = input
  48. def replace_all(input: str) -> str:
  49. result = input
  50. for k, v in variables.items():
  51. result = result.replace(f"{{{k}}}", str(v))
  52. return result
  53. result = replace_all(result)
  54. for i, entry in enumerate(history):
  55. if entry.get("role") == "system":
  56. entry["content"] = replace_all(entry.get("content") or "")
  57. return result
  58. def clean_str(input: Any) -> str:
  59. """Clean an input string by removing HTML escapes, control characters, and other unwanted characters."""
  60. # If we get non-string input, just give it back
  61. if not isinstance(input, str):
  62. return input
  63. result = html.unescape(input.strip())
  64. # https://stackoverflow.com/questions/4324790/removing-control-characters-from-a-string-in-python
  65. return re.sub(r"[\"\x00-\x1f\x7f-\x9f]", "", result)
  66. def dict_has_keys_with_types(
  67. data: dict, expected_fields: list[tuple[str, type]]
  68. ) -> bool:
  69. """Return True if the given dictionary has the given keys with the given types."""
  70. for field, field_type in expected_fields:
  71. if field not in data:
  72. return False
  73. value = data[field]
  74. if not isinstance(value, field_type):
  75. return False
  76. return True
  77. def get_llm_cache(llmnm, txt, history, genconf):
  78. hasher = xxhash.xxh64()
  79. hasher.update(str(llmnm).encode("utf-8"))
  80. hasher.update(str(txt).encode("utf-8"))
  81. hasher.update(str(history).encode("utf-8"))
  82. hasher.update(str(genconf).encode("utf-8"))
  83. k = hasher.hexdigest()
  84. bin = REDIS_CONN.get(k)
  85. if not bin:
  86. return
  87. return bin
  88. def set_llm_cache(llmnm, txt, v, history, genconf):
  89. hasher = xxhash.xxh64()
  90. hasher.update(str(llmnm).encode("utf-8"))
  91. hasher.update(str(txt).encode("utf-8"))
  92. hasher.update(str(history).encode("utf-8"))
  93. hasher.update(str(genconf).encode("utf-8"))
  94. k = hasher.hexdigest()
  95. REDIS_CONN.set(k, v.encode("utf-8"), 24*3600)
  96. def get_embed_cache(llmnm, txt):
  97. hasher = xxhash.xxh64()
  98. hasher.update(str(llmnm).encode("utf-8"))
  99. hasher.update(str(txt).encode("utf-8"))
  100. k = hasher.hexdigest()
  101. bin = REDIS_CONN.get(k)
  102. if not bin:
  103. return
  104. return np.array(json.loads(bin))
  105. def set_embed_cache(llmnm, txt, arr):
  106. hasher = xxhash.xxh64()
  107. hasher.update(str(llmnm).encode("utf-8"))
  108. hasher.update(str(txt).encode("utf-8"))
  109. k = hasher.hexdigest()
  110. arr = json.dumps(arr.tolist() if isinstance(arr, np.ndarray) else arr)
  111. REDIS_CONN.set(k, arr.encode("utf-8"), 24*3600)
  112. def get_tags_from_cache(kb_ids):
  113. hasher = xxhash.xxh64()
  114. hasher.update(str(kb_ids).encode("utf-8"))
  115. k = hasher.hexdigest()
  116. bin = REDIS_CONN.get(k)
  117. if not bin:
  118. return
  119. return bin
  120. def set_tags_to_cache(kb_ids, tags):
  121. hasher = xxhash.xxh64()
  122. hasher.update(str(kb_ids).encode("utf-8"))
  123. k = hasher.hexdigest()
  124. REDIS_CONN.set(k, json.dumps(tags).encode("utf-8"), 600)
  125. def tidy_graph(graph: nx.Graph, callback, check_attribute: bool = True):
  126. """
  127. Ensure all nodes and edges in the graph have some essential attribute.
  128. """
  129. def is_valid_item(node_attrs: dict) -> bool:
  130. valid_node = True
  131. for attr in ["description", "source_id"]:
  132. if attr not in node_attrs:
  133. valid_node = False
  134. break
  135. return valid_node
  136. if check_attribute:
  137. purged_nodes = []
  138. for node, node_attrs in graph.nodes(data=True):
  139. if not is_valid_item(node_attrs):
  140. purged_nodes.append(node)
  141. for node in purged_nodes:
  142. graph.remove_node(node)
  143. if purged_nodes and callback:
  144. callback(msg=f"Purged {len(purged_nodes)} nodes from graph due to missing essential attributes.")
  145. purged_edges = []
  146. for source, target, attr in graph.edges(data=True):
  147. if check_attribute:
  148. if not is_valid_item(attr):
  149. purged_edges.append((source, target))
  150. if "keywords" not in attr:
  151. attr["keywords"] = []
  152. for source, target in purged_edges:
  153. graph.remove_edge(source, target)
  154. if purged_edges and callback:
  155. callback(msg=f"Purged {len(purged_edges)} edges from graph due to missing essential attributes.")
  156. def get_from_to(node1, node2):
  157. if node1 < node2:
  158. return (node1, node2)
  159. else:
  160. return (node2, node1)
  161. def graph_merge(g1: nx.Graph, g2: nx.Graph, change: GraphChange):
  162. """Merge graph g2 into g1 in place."""
  163. for node_name, attr in g2.nodes(data=True):
  164. change.added_updated_nodes.add(node_name)
  165. if not g1.has_node(node_name):
  166. g1.add_node(node_name, **attr)
  167. continue
  168. node = g1.nodes[node_name]
  169. node["description"] += GRAPH_FIELD_SEP + attr["description"]
  170. # A node's source_id indicates which chunks it came from.
  171. node["source_id"] += attr["source_id"]
  172. for source, target, attr in g2.edges(data=True):
  173. change.added_updated_edges.add(get_from_to(source, target))
  174. edge = g1.get_edge_data(source, target)
  175. if edge is None:
  176. g1.add_edge(source, target, **attr)
  177. continue
  178. edge["weight"] += attr.get("weight", 0)
  179. edge["description"] += GRAPH_FIELD_SEP + attr["description"]
  180. edge["keywords"] += attr["keywords"]
  181. # A edge's source_id indicates which chunks it came from.
  182. edge["source_id"] += attr["source_id"]
  183. for node_degree in g1.degree:
  184. g1.nodes[str(node_degree[0])]["rank"] = int(node_degree[1])
  185. # A graph's source_id indicates which documents it came from.
  186. if "source_id" not in g1.graph:
  187. g1.graph["source_id"] = []
  188. g1.graph["source_id"] += g2.graph.get("source_id", [])
  189. return g1
  190. def compute_args_hash(*args):
  191. return md5(str(args).encode()).hexdigest()
  192. def handle_single_entity_extraction(
  193. record_attributes: list[str],
  194. chunk_key: str,
  195. ):
  196. if len(record_attributes) < 4 or record_attributes[0] != '"entity"':
  197. return None
  198. # add this record as a node in the G
  199. entity_name = clean_str(record_attributes[1].upper())
  200. if not entity_name.strip():
  201. return None
  202. entity_type = clean_str(record_attributes[2].upper())
  203. entity_description = clean_str(record_attributes[3])
  204. entity_source_id = chunk_key
  205. return dict(
  206. entity_name=entity_name.upper(),
  207. entity_type=entity_type.upper(),
  208. description=entity_description,
  209. source_id=entity_source_id,
  210. )
  211. def handle_single_relationship_extraction(record_attributes: list[str], chunk_key: str):
  212. if len(record_attributes) < 5 or record_attributes[0] != '"relationship"':
  213. return None
  214. # add this record as edge
  215. source = clean_str(record_attributes[1].upper())
  216. target = clean_str(record_attributes[2].upper())
  217. edge_description = clean_str(record_attributes[3])
  218. edge_keywords = clean_str(record_attributes[4])
  219. edge_source_id = chunk_key
  220. weight = (
  221. float(record_attributes[-1]) if is_float_regex(record_attributes[-1]) else 1.0
  222. )
  223. pair = sorted([source.upper(), target.upper()])
  224. return dict(
  225. src_id=pair[0],
  226. tgt_id=pair[1],
  227. weight=weight,
  228. description=edge_description,
  229. keywords=edge_keywords,
  230. source_id=edge_source_id,
  231. metadata={"created_at": time.time()},
  232. )
  233. def pack_user_ass_to_openai_messages(*args: str):
  234. roles = ["user", "assistant"]
  235. return [
  236. {"role": roles[i % 2], "content": content} for i, content in enumerate(args)
  237. ]
  238. def split_string_by_multi_markers(content: str, markers: list[str]) -> list[str]:
  239. """Split a string by multiple markers"""
  240. if not markers:
  241. return [content]
  242. results = re.split("|".join(re.escape(marker) for marker in markers), content)
  243. return [r.strip() for r in results if r.strip()]
  244. def is_float_regex(value):
  245. return bool(re.match(r"^[-+]?[0-9]*\.?[0-9]+$", value))
  246. def chunk_id(chunk):
  247. return xxhash.xxh64((chunk["content_with_weight"] + chunk["kb_id"]).encode("utf-8")).hexdigest()
  248. async def graph_node_to_chunk(kb_id, embd_mdl, ent_name, meta, chunks):
  249. global chat_limiter
  250. enable_timeout_assertion=os.environ.get("ENABLE_TIMEOUT_ASSERTION")
  251. chunk = {
  252. "id": get_uuid(),
  253. "important_kwd": [ent_name],
  254. "title_tks": rag_tokenizer.tokenize(ent_name),
  255. "entity_kwd": ent_name,
  256. "knowledge_graph_kwd": "entity",
  257. "entity_type_kwd": meta["entity_type"],
  258. "content_with_weight": json.dumps(meta, ensure_ascii=False),
  259. "content_ltks": rag_tokenizer.tokenize(meta["description"]),
  260. "source_id": meta["source_id"],
  261. "kb_id": kb_id,
  262. "available_int": 0
  263. }
  264. chunk["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(chunk["content_ltks"])
  265. ebd = get_embed_cache(embd_mdl.llm_name, ent_name)
  266. if ebd is None:
  267. async with chat_limiter:
  268. with trio.fail_after(3 if enable_timeout_assertion else 30000000):
  269. ebd, _ = await trio.to_thread.run_sync(lambda: embd_mdl.encode([ent_name]))
  270. ebd = ebd[0]
  271. set_embed_cache(embd_mdl.llm_name, ent_name, ebd)
  272. assert ebd is not None
  273. chunk["q_%d_vec" % len(ebd)] = ebd
  274. chunks.append(chunk)
  275. @timeout(3, 3)
  276. def get_relation(tenant_id, kb_id, from_ent_name, to_ent_name, size=1):
  277. ents = from_ent_name
  278. if isinstance(ents, str):
  279. ents = [from_ent_name]
  280. if isinstance(to_ent_name, str):
  281. to_ent_name = [to_ent_name]
  282. ents.extend(to_ent_name)
  283. ents = list(set(ents))
  284. conds = {
  285. "fields": ["content_with_weight"],
  286. "size": size,
  287. "from_entity_kwd": ents,
  288. "to_entity_kwd": ents,
  289. "knowledge_graph_kwd": ["relation"]
  290. }
  291. res = []
  292. es_res = settings.retrievaler.search(conds, search.index_name(tenant_id), [kb_id] if isinstance(kb_id, str) else kb_id)
  293. for id in es_res.ids:
  294. try:
  295. if size == 1:
  296. return json.loads(es_res.field[id]["content_with_weight"])
  297. res.append(json.loads(es_res.field[id]["content_with_weight"]))
  298. except Exception:
  299. continue
  300. return res
  301. async def graph_edge_to_chunk(kb_id, embd_mdl, from_ent_name, to_ent_name, meta, chunks):
  302. enable_timeout_assertion=os.environ.get("ENABLE_TIMEOUT_ASSERTION")
  303. chunk = {
  304. "id": get_uuid(),
  305. "from_entity_kwd": from_ent_name,
  306. "to_entity_kwd": to_ent_name,
  307. "knowledge_graph_kwd": "relation",
  308. "content_with_weight": json.dumps(meta, ensure_ascii=False),
  309. "content_ltks": rag_tokenizer.tokenize(meta["description"]),
  310. "important_kwd": meta["keywords"],
  311. "source_id": meta["source_id"],
  312. "weight_int": int(meta["weight"]),
  313. "kb_id": kb_id,
  314. "available_int": 0
  315. }
  316. chunk["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(chunk["content_ltks"])
  317. txt = f"{from_ent_name}->{to_ent_name}"
  318. ebd = get_embed_cache(embd_mdl.llm_name, txt)
  319. if ebd is None:
  320. async with chat_limiter:
  321. with trio.fail_after(3 if enable_timeout_assertion else 300000000):
  322. ebd, _ = await trio.to_thread.run_sync(lambda: embd_mdl.encode([txt+f": {meta['description']}"]))
  323. ebd = ebd[0]
  324. set_embed_cache(embd_mdl.llm_name, txt, ebd)
  325. assert ebd is not None
  326. chunk["q_%d_vec" % len(ebd)] = ebd
  327. chunks.append(chunk)
  328. async def does_graph_contains(tenant_id, kb_id, doc_id):
  329. # Get doc_ids of graph
  330. fields = ["source_id"]
  331. condition = {
  332. "knowledge_graph_kwd": ["graph"],
  333. "removed_kwd": "N",
  334. }
  335. res = await trio.to_thread.run_sync(lambda: settings.docStoreConn.search(fields, [], condition, [], OrderByExpr(), 0, 1, search.index_name(tenant_id), [kb_id]))
  336. fields2 = settings.docStoreConn.getFields(res, fields)
  337. graph_doc_ids = set()
  338. for chunk_id in fields2.keys():
  339. graph_doc_ids = set(fields2[chunk_id]["source_id"])
  340. return doc_id in graph_doc_ids
  341. async def get_graph_doc_ids(tenant_id, kb_id) -> list[str]:
  342. conds = {
  343. "fields": ["source_id"],
  344. "removed_kwd": "N",
  345. "size": 1,
  346. "knowledge_graph_kwd": ["graph"]
  347. }
  348. res = await trio.to_thread.run_sync(lambda: settings.retrievaler.search(conds, search.index_name(tenant_id), [kb_id]))
  349. doc_ids = []
  350. if res.total == 0:
  351. return doc_ids
  352. for id in res.ids:
  353. doc_ids = res.field[id]["source_id"]
  354. return doc_ids
  355. async def get_graph(tenant_id, kb_id, exclude_rebuild=None):
  356. conds = {
  357. "fields": ["content_with_weight", "removed_kwd", "source_id"],
  358. "size": 1,
  359. "knowledge_graph_kwd": ["graph"]
  360. }
  361. res = await trio.to_thread.run_sync(lambda: settings.retrievaler.search(conds, search.index_name(tenant_id), [kb_id]))
  362. if not res.total == 0:
  363. for id in res.ids:
  364. try:
  365. if res.field[id]["removed_kwd"] == "N":
  366. g = json_graph.node_link_graph(json.loads(res.field[id]["content_with_weight"]), edges="edges")
  367. if "source_id" not in g.graph:
  368. g.graph["source_id"] = res.field[id]["source_id"]
  369. else:
  370. g = await rebuild_graph(tenant_id, kb_id, exclude_rebuild)
  371. return g
  372. except Exception:
  373. continue
  374. result = None
  375. return result
  376. async def set_graph(tenant_id: str, kb_id: str, embd_mdl, graph: nx.Graph, change: GraphChange, callback):
  377. global chat_limiter
  378. start = trio.current_time()
  379. await trio.to_thread.run_sync(lambda: settings.docStoreConn.delete({"knowledge_graph_kwd": ["graph", "subgraph"]}, search.index_name(tenant_id), kb_id))
  380. if change.removed_nodes:
  381. await trio.to_thread.run_sync(lambda: settings.docStoreConn.delete({"knowledge_graph_kwd": ["entity"], "entity_kwd": sorted(change.removed_nodes)}, search.index_name(tenant_id), kb_id))
  382. if change.removed_edges:
  383. async def del_edges(from_node, to_node):
  384. async with chat_limiter:
  385. await trio.to_thread.run_sync(lambda: settings.docStoreConn.delete({"knowledge_graph_kwd": ["relation"], "from_entity_kwd": from_node, "to_entity_kwd": to_node}, search.index_name(tenant_id), kb_id))
  386. async with trio.open_nursery() as nursery:
  387. for from_node, to_node in change.removed_edges:
  388. nursery.start_soon(del_edges, from_node, to_node)
  389. now = trio.current_time()
  390. if callback:
  391. callback(msg=f"set_graph removed {len(change.removed_nodes)} nodes and {len(change.removed_edges)} edges from index in {now - start:.2f}s.")
  392. start = now
  393. chunks = [{
  394. "id": get_uuid(),
  395. "content_with_weight": json.dumps(nx.node_link_data(graph, edges="edges"), ensure_ascii=False),
  396. "knowledge_graph_kwd": "graph",
  397. "kb_id": kb_id,
  398. "source_id": graph.graph.get("source_id", []),
  399. "available_int": 0,
  400. "removed_kwd": "N"
  401. }]
  402. # generate updated subgraphs
  403. for source in graph.graph["source_id"]:
  404. subgraph = graph.subgraph([n for n in graph.nodes if source in graph.nodes[n]["source_id"]]).copy()
  405. subgraph.graph["source_id"] = [source]
  406. for n in subgraph.nodes:
  407. subgraph.nodes[n]["source_id"] = [source]
  408. chunks.append({
  409. "id": get_uuid(),
  410. "content_with_weight": json.dumps(nx.node_link_data(subgraph, edges="edges"), ensure_ascii=False),
  411. "knowledge_graph_kwd": "subgraph",
  412. "kb_id": kb_id,
  413. "source_id": [source],
  414. "available_int": 0,
  415. "removed_kwd": "N"
  416. })
  417. async with trio.open_nursery() as nursery:
  418. for ii, node in enumerate(change.added_updated_nodes):
  419. node_attrs = graph.nodes[node]
  420. nursery.start_soon(graph_node_to_chunk, kb_id, embd_mdl, node, node_attrs, chunks)
  421. if ii%100 == 9 and callback:
  422. callback(msg=f"Get embedding of nodes: {ii}/{len(change.added_updated_nodes)}")
  423. async with trio.open_nursery() as nursery:
  424. for ii, (from_node, to_node) in enumerate(change.added_updated_edges):
  425. edge_attrs = graph.get_edge_data(from_node, to_node)
  426. if not edge_attrs:
  427. # added_updated_edges could record a non-existing edge if both from_node and to_node participate in nodes merging.
  428. continue
  429. nursery.start_soon(graph_edge_to_chunk, kb_id, embd_mdl, from_node, to_node, edge_attrs, chunks)
  430. if ii%100 == 9 and callback:
  431. callback(msg=f"Get embedding of edges: {ii}/{len(change.added_updated_edges)}")
  432. now = trio.current_time()
  433. if callback:
  434. callback(msg=f"set_graph converted graph change to {len(chunks)} chunks in {now - start:.2f}s.")
  435. start = now
  436. enable_timeout_assertion=os.environ.get("ENABLE_TIMEOUT_ASSERTION")
  437. es_bulk_size = 4
  438. for b in range(0, len(chunks), es_bulk_size):
  439. with trio.fail_after(3 if enable_timeout_assertion else 30000000):
  440. doc_store_result = await trio.to_thread.run_sync(lambda: settings.docStoreConn.insert(chunks[b:b + es_bulk_size], search.index_name(tenant_id), kb_id))
  441. if b % 100 == es_bulk_size and callback:
  442. callback(msg=f"Insert chunks: {b}/{len(chunks)}")
  443. if doc_store_result:
  444. error_message = f"Insert chunk error: {doc_store_result}, please check log file and Elasticsearch/Infinity status!"
  445. raise Exception(error_message)
  446. now = trio.current_time()
  447. if callback:
  448. callback(msg=f"set_graph added/updated {len(change.added_updated_nodes)} nodes and {len(change.added_updated_edges)} edges from index in {now - start:.2f}s.")
  449. def is_continuous_subsequence(subseq, seq):
  450. def find_all_indexes(tup, value):
  451. indexes = []
  452. start = 0
  453. while True:
  454. try:
  455. index = tup.index(value, start)
  456. indexes.append(index)
  457. start = index + 1
  458. except ValueError:
  459. break
  460. return indexes
  461. index_list = find_all_indexes(seq,subseq[0])
  462. for idx in index_list:
  463. if idx!=len(seq)-1:
  464. if seq[idx+1]==subseq[-1]:
  465. return True
  466. return False
  467. def merge_tuples(list1, list2):
  468. result = []
  469. for tup in list1:
  470. last_element = tup[-1]
  471. if last_element in tup[:-1]:
  472. result.append(tup)
  473. else:
  474. matching_tuples = [t for t in list2 if t[0] == last_element]
  475. already_match_flag = 0
  476. for match in matching_tuples:
  477. matchh = (match[1], match[0])
  478. if is_continuous_subsequence(match, tup) or is_continuous_subsequence(matchh, tup):
  479. continue
  480. already_match_flag = 1
  481. merged_tuple = tup + match[1:]
  482. result.append(merged_tuple)
  483. if not already_match_flag:
  484. result.append(tup)
  485. return result
  486. async def get_entity_type2sampels(idxnms, kb_ids: list):
  487. es_res = await trio.to_thread.run_sync(lambda: settings.retrievaler.search({"knowledge_graph_kwd": "ty2ents", "kb_id": kb_ids,
  488. "size": 10000,
  489. "fields": ["content_with_weight"]},
  490. idxnms, kb_ids))
  491. res = defaultdict(list)
  492. for id in es_res.ids:
  493. smp = es_res.field[id].get("content_with_weight")
  494. if not smp:
  495. continue
  496. try:
  497. smp = json.loads(smp)
  498. except Exception as e:
  499. logging.exception(e)
  500. for ty, ents in smp.items():
  501. res[ty].extend(ents)
  502. return res
  503. def flat_uniq_list(arr, key):
  504. res = []
  505. for a in arr:
  506. a = a[key]
  507. if isinstance(a, list):
  508. res.extend(a)
  509. else:
  510. res.append(a)
  511. return list(set(res))
  512. async def rebuild_graph(tenant_id, kb_id, exclude_rebuild=None):
  513. graph = nx.Graph()
  514. flds = ["knowledge_graph_kwd", "content_with_weight", "source_id"]
  515. bs = 256
  516. for i in range(0, 1024*bs, bs):
  517. es_res = await trio.to_thread.run_sync(lambda: settings.docStoreConn.search(flds, [],
  518. {"kb_id": kb_id, "knowledge_graph_kwd": ["subgraph"]},
  519. [],
  520. OrderByExpr(),
  521. i, bs, search.index_name(tenant_id), [kb_id]
  522. ))
  523. # tot = settings.docStoreConn.getTotal(es_res)
  524. es_res = settings.docStoreConn.getFields(es_res, flds)
  525. if len(es_res) == 0:
  526. break
  527. for id, d in es_res.items():
  528. assert d["knowledge_graph_kwd"] == "subgraph"
  529. if isinstance(exclude_rebuild, list):
  530. if sum([n in d["source_id"] for n in exclude_rebuild]):
  531. continue
  532. elif exclude_rebuild in d["source_id"]:
  533. continue
  534. next_graph = json_graph.node_link_graph(json.loads(d["content_with_weight"]), edges="edges")
  535. merged_graph = nx.compose(graph, next_graph)
  536. merged_source = {
  537. n: graph.nodes[n]["source_id"] + next_graph.nodes[n]["source_id"]
  538. for n in graph.nodes & next_graph.nodes
  539. }
  540. nx.set_node_attributes(merged_graph, merged_source, "source_id")
  541. if "source_id" in graph.graph:
  542. merged_graph.graph["source_id"] = graph.graph["source_id"] + next_graph.graph["source_id"]
  543. else:
  544. merged_graph.graph["source_id"] = next_graph.graph["source_id"]
  545. graph = merged_graph
  546. if len(graph.nodes) == 0:
  547. return None
  548. graph.graph["source_id"] = sorted(graph.graph["source_id"])
  549. return graph