您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646
  1. # Copyright (c) 2024 Microsoft Corporation.
  2. # Licensed under the MIT License
  3. """
  4. Reference:
  5. - [graphrag](https://github.com/microsoft/graphrag)
  6. - [LightRag](https://github.com/HKUDS/LightRAG)
  7. """
  8. import html
  9. import json
  10. import logging
  11. import re
  12. import time
  13. from collections import defaultdict
  14. from hashlib import md5
  15. from typing import Any, Callable
  16. import os
  17. import trio
  18. from typing import Set, Tuple
  19. import networkx as nx
  20. import numpy as np
  21. import xxhash
  22. from networkx.readwrite import json_graph
  23. import dataclasses
  24. from api.utils.api_utils import timeout
  25. from api import settings
  26. from api.utils import get_uuid
  27. from rag.nlp import search, rag_tokenizer
  28. from rag.utils.doc_store_conn import OrderByExpr
  29. from rag.utils.redis_conn import REDIS_CONN
  30. GRAPH_FIELD_SEP = "<SEP>"
  31. ErrorHandlerFn = Callable[[BaseException | None, str | None, dict | None], None]
  32. chat_limiter = trio.CapacityLimiter(int(os.environ.get('MAX_CONCURRENT_CHATS', 10)))
  33. @dataclasses.dataclass
  34. class GraphChange:
  35. removed_nodes: Set[str] = dataclasses.field(default_factory=set)
  36. added_updated_nodes: Set[str] = dataclasses.field(default_factory=set)
  37. removed_edges: Set[Tuple[str, str]] = dataclasses.field(default_factory=set)
  38. added_updated_edges: Set[Tuple[str, str]] = dataclasses.field(default_factory=set)
  39. def perform_variable_replacements(
  40. input: str, history: list[dict] | None = None, variables: dict | None = None
  41. ) -> str:
  42. """Perform variable replacements on the input string and in a chat log."""
  43. if history is None:
  44. history = []
  45. if variables is None:
  46. variables = {}
  47. result = input
  48. def replace_all(input: str) -> str:
  49. result = input
  50. for k, v in variables.items():
  51. result = result.replace(f"{{{k}}}", str(v))
  52. return result
  53. result = replace_all(result)
  54. for i, entry in enumerate(history):
  55. if entry.get("role") == "system":
  56. entry["content"] = replace_all(entry.get("content") or "")
  57. return result
  58. def clean_str(input: Any) -> str:
  59. """Clean an input string by removing HTML escapes, control characters, and other unwanted characters."""
  60. # If we get non-string input, just give it back
  61. if not isinstance(input, str):
  62. return input
  63. result = html.unescape(input.strip())
  64. # https://stackoverflow.com/questions/4324790/removing-control-characters-from-a-string-in-python
  65. return re.sub(r"[\"\x00-\x1f\x7f-\x9f]", "", result)
  66. def dict_has_keys_with_types(
  67. data: dict, expected_fields: list[tuple[str, type]]
  68. ) -> bool:
  69. """Return True if the given dictionary has the given keys with the given types."""
  70. for field, field_type in expected_fields:
  71. if field not in data:
  72. return False
  73. value = data[field]
  74. if not isinstance(value, field_type):
  75. return False
  76. return True
  77. def get_llm_cache(llmnm, txt, history, genconf):
  78. hasher = xxhash.xxh64()
  79. hasher.update(str(llmnm).encode("utf-8"))
  80. hasher.update(str(txt).encode("utf-8"))
  81. hasher.update(str(history).encode("utf-8"))
  82. hasher.update(str(genconf).encode("utf-8"))
  83. k = hasher.hexdigest()
  84. bin = REDIS_CONN.get(k)
  85. if not bin:
  86. return
  87. return bin
  88. def set_llm_cache(llmnm, txt, v, history, genconf):
  89. hasher = xxhash.xxh64()
  90. hasher.update(str(llmnm).encode("utf-8"))
  91. hasher.update(str(txt).encode("utf-8"))
  92. hasher.update(str(history).encode("utf-8"))
  93. hasher.update(str(genconf).encode("utf-8"))
  94. k = hasher.hexdigest()
  95. REDIS_CONN.set(k, v.encode("utf-8"), 24*3600)
  96. def get_embed_cache(llmnm, txt):
  97. hasher = xxhash.xxh64()
  98. hasher.update(str(llmnm).encode("utf-8"))
  99. hasher.update(str(txt).encode("utf-8"))
  100. k = hasher.hexdigest()
  101. bin = REDIS_CONN.get(k)
  102. if not bin:
  103. return
  104. return np.array(json.loads(bin))
  105. def set_embed_cache(llmnm, txt, arr):
  106. hasher = xxhash.xxh64()
  107. hasher.update(str(llmnm).encode("utf-8"))
  108. hasher.update(str(txt).encode("utf-8"))
  109. k = hasher.hexdigest()
  110. arr = json.dumps(arr.tolist() if isinstance(arr, np.ndarray) else arr)
  111. REDIS_CONN.set(k, arr.encode("utf-8"), 24*3600)
  112. def get_tags_from_cache(kb_ids):
  113. hasher = xxhash.xxh64()
  114. hasher.update(str(kb_ids).encode("utf-8"))
  115. k = hasher.hexdigest()
  116. bin = REDIS_CONN.get(k)
  117. if not bin:
  118. return
  119. return bin
  120. def set_tags_to_cache(kb_ids, tags):
  121. hasher = xxhash.xxh64()
  122. hasher.update(str(kb_ids).encode("utf-8"))
  123. k = hasher.hexdigest()
  124. REDIS_CONN.set(k, json.dumps(tags).encode("utf-8"), 600)
  125. def tidy_graph(graph: nx.Graph, callback, check_attribute: bool = True):
  126. """
  127. Ensure all nodes and edges in the graph have some essential attribute.
  128. """
  129. def is_valid_item(node_attrs: dict) -> bool:
  130. valid_node = True
  131. for attr in ["description", "source_id"]:
  132. if attr not in node_attrs:
  133. valid_node = False
  134. break
  135. return valid_node
  136. if check_attribute:
  137. purged_nodes = []
  138. for node, node_attrs in graph.nodes(data=True):
  139. if not is_valid_item(node_attrs):
  140. purged_nodes.append(node)
  141. for node in purged_nodes:
  142. graph.remove_node(node)
  143. if purged_nodes and callback:
  144. callback(msg=f"Purged {len(purged_nodes)} nodes from graph due to missing essential attributes.")
  145. purged_edges = []
  146. for source, target, attr in graph.edges(data=True):
  147. if check_attribute:
  148. if not is_valid_item(attr):
  149. purged_edges.append((source, target))
  150. if "keywords" not in attr:
  151. attr["keywords"] = []
  152. for source, target in purged_edges:
  153. graph.remove_edge(source, target)
  154. if purged_edges and callback:
  155. callback(msg=f"Purged {len(purged_edges)} edges from graph due to missing essential attributes.")
  156. def get_from_to(node1, node2):
  157. if node1 < node2:
  158. return (node1, node2)
  159. else:
  160. return (node2, node1)
  161. def graph_merge(g1: nx.Graph, g2: nx.Graph, change: GraphChange):
  162. """Merge graph g2 into g1 in place."""
  163. for node_name, attr in g2.nodes(data=True):
  164. change.added_updated_nodes.add(node_name)
  165. if not g1.has_node(node_name):
  166. g1.add_node(node_name, **attr)
  167. continue
  168. node = g1.nodes[node_name]
  169. node["description"] += GRAPH_FIELD_SEP + attr["description"]
  170. # A node's source_id indicates which chunks it came from.
  171. node["source_id"] += attr["source_id"]
  172. for source, target, attr in g2.edges(data=True):
  173. change.added_updated_edges.add(get_from_to(source, target))
  174. edge = g1.get_edge_data(source, target)
  175. if edge is None:
  176. g1.add_edge(source, target, **attr)
  177. continue
  178. edge["weight"] += attr.get("weight", 0)
  179. edge["description"] += GRAPH_FIELD_SEP + attr["description"]
  180. edge["keywords"] += attr["keywords"]
  181. # A edge's source_id indicates which chunks it came from.
  182. edge["source_id"] += attr["source_id"]
  183. for node_degree in g1.degree:
  184. g1.nodes[str(node_degree[0])]["rank"] = int(node_degree[1])
  185. # A graph's source_id indicates which documents it came from.
  186. if "source_id" not in g1.graph:
  187. g1.graph["source_id"] = []
  188. g1.graph["source_id"] += g2.graph.get("source_id", [])
  189. return g1
  190. def compute_args_hash(*args):
  191. return md5(str(args).encode()).hexdigest()
  192. def handle_single_entity_extraction(
  193. record_attributes: list[str],
  194. chunk_key: str,
  195. ):
  196. if len(record_attributes) < 4 or record_attributes[0] != '"entity"':
  197. return None
  198. # add this record as a node in the G
  199. entity_name = clean_str(record_attributes[1].upper())
  200. if not entity_name.strip():
  201. return None
  202. entity_type = clean_str(record_attributes[2].upper())
  203. entity_description = clean_str(record_attributes[3])
  204. entity_source_id = chunk_key
  205. return dict(
  206. entity_name=entity_name.upper(),
  207. entity_type=entity_type.upper(),
  208. description=entity_description,
  209. source_id=entity_source_id,
  210. )
  211. def handle_single_relationship_extraction(record_attributes: list[str], chunk_key: str):
  212. if len(record_attributes) < 5 or record_attributes[0] != '"relationship"':
  213. return None
  214. # add this record as edge
  215. source = clean_str(record_attributes[1].upper())
  216. target = clean_str(record_attributes[2].upper())
  217. edge_description = clean_str(record_attributes[3])
  218. edge_keywords = clean_str(record_attributes[4])
  219. edge_source_id = chunk_key
  220. weight = (
  221. float(record_attributes[-1]) if is_float_regex(record_attributes[-1]) else 1.0
  222. )
  223. pair = sorted([source.upper(), target.upper()])
  224. return dict(
  225. src_id=pair[0],
  226. tgt_id=pair[1],
  227. weight=weight,
  228. description=edge_description,
  229. keywords=edge_keywords,
  230. source_id=edge_source_id,
  231. metadata={"created_at": time.time()},
  232. )
  233. def pack_user_ass_to_openai_messages(*args: str):
  234. roles = ["user", "assistant"]
  235. return [
  236. {"role": roles[i % 2], "content": content} for i, content in enumerate(args)
  237. ]
  238. def split_string_by_multi_markers(content: str, markers: list[str]) -> list[str]:
  239. """Split a string by multiple markers"""
  240. if not markers:
  241. return [content]
  242. results = re.split("|".join(re.escape(marker) for marker in markers), content)
  243. return [r.strip() for r in results if r.strip()]
  244. def is_float_regex(value):
  245. return bool(re.match(r"^[-+]?[0-9]*\.?[0-9]+$", value))
  246. def chunk_id(chunk):
  247. return xxhash.xxh64((chunk["content_with_weight"] + chunk["kb_id"]).encode("utf-8")).hexdigest()
  248. async def graph_node_to_chunk(kb_id, embd_mdl, ent_name, meta, chunks):
  249. global chat_limiter
  250. chunk = {
  251. "id": get_uuid(),
  252. "important_kwd": [ent_name],
  253. "title_tks": rag_tokenizer.tokenize(ent_name),
  254. "entity_kwd": ent_name,
  255. "knowledge_graph_kwd": "entity",
  256. "entity_type_kwd": meta["entity_type"],
  257. "content_with_weight": json.dumps(meta, ensure_ascii=False),
  258. "content_ltks": rag_tokenizer.tokenize(meta["description"]),
  259. "source_id": meta["source_id"],
  260. "kb_id": kb_id,
  261. "available_int": 0
  262. }
  263. chunk["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(chunk["content_ltks"])
  264. ebd = get_embed_cache(embd_mdl.llm_name, ent_name)
  265. if ebd is None:
  266. async with chat_limiter:
  267. with trio.fail_after(3):
  268. ebd, _ = await trio.to_thread.run_sync(lambda: embd_mdl.encode([ent_name]))
  269. ebd = ebd[0]
  270. set_embed_cache(embd_mdl.llm_name, ent_name, ebd)
  271. assert ebd is not None
  272. chunk["q_%d_vec" % len(ebd)] = ebd
  273. chunks.append(chunk)
  274. @timeout(3, 3)
  275. def get_relation(tenant_id, kb_id, from_ent_name, to_ent_name, size=1):
  276. ents = from_ent_name
  277. if isinstance(ents, str):
  278. ents = [from_ent_name]
  279. if isinstance(to_ent_name, str):
  280. to_ent_name = [to_ent_name]
  281. ents.extend(to_ent_name)
  282. ents = list(set(ents))
  283. conds = {
  284. "fields": ["content_with_weight"],
  285. "size": size,
  286. "from_entity_kwd": ents,
  287. "to_entity_kwd": ents,
  288. "knowledge_graph_kwd": ["relation"]
  289. }
  290. res = []
  291. es_res = settings.retrievaler.search(conds, search.index_name(tenant_id), [kb_id] if isinstance(kb_id, str) else kb_id)
  292. for id in es_res.ids:
  293. try:
  294. if size == 1:
  295. return json.loads(es_res.field[id]["content_with_weight"])
  296. res.append(json.loads(es_res.field[id]["content_with_weight"]))
  297. except Exception:
  298. continue
  299. return res
  300. async def graph_edge_to_chunk(kb_id, embd_mdl, from_ent_name, to_ent_name, meta, chunks):
  301. chunk = {
  302. "id": get_uuid(),
  303. "from_entity_kwd": from_ent_name,
  304. "to_entity_kwd": to_ent_name,
  305. "knowledge_graph_kwd": "relation",
  306. "content_with_weight": json.dumps(meta, ensure_ascii=False),
  307. "content_ltks": rag_tokenizer.tokenize(meta["description"]),
  308. "important_kwd": meta["keywords"],
  309. "source_id": meta["source_id"],
  310. "weight_int": int(meta["weight"]),
  311. "kb_id": kb_id,
  312. "available_int": 0
  313. }
  314. chunk["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(chunk["content_ltks"])
  315. txt = f"{from_ent_name}->{to_ent_name}"
  316. ebd = get_embed_cache(embd_mdl.llm_name, txt)
  317. if ebd is None:
  318. async with chat_limiter:
  319. with trio.fail_after(3):
  320. ebd, _ = await trio.to_thread.run_sync(lambda: embd_mdl.encode([txt+f": {meta['description']}"]))
  321. ebd = ebd[0]
  322. set_embed_cache(embd_mdl.llm_name, txt, ebd)
  323. assert ebd is not None
  324. chunk["q_%d_vec" % len(ebd)] = ebd
  325. chunks.append(chunk)
  326. async def does_graph_contains(tenant_id, kb_id, doc_id):
  327. # Get doc_ids of graph
  328. fields = ["source_id"]
  329. condition = {
  330. "knowledge_graph_kwd": ["graph"],
  331. "removed_kwd": "N",
  332. }
  333. res = await trio.to_thread.run_sync(lambda: settings.docStoreConn.search(fields, [], condition, [], OrderByExpr(), 0, 1, search.index_name(tenant_id), [kb_id]))
  334. fields2 = settings.docStoreConn.getFields(res, fields)
  335. graph_doc_ids = set()
  336. for chunk_id in fields2.keys():
  337. graph_doc_ids = set(fields2[chunk_id]["source_id"])
  338. return doc_id in graph_doc_ids
  339. async def get_graph_doc_ids(tenant_id, kb_id) -> list[str]:
  340. conds = {
  341. "fields": ["source_id"],
  342. "removed_kwd": "N",
  343. "size": 1,
  344. "knowledge_graph_kwd": ["graph"]
  345. }
  346. res = await trio.to_thread.run_sync(lambda: settings.retrievaler.search(conds, search.index_name(tenant_id), [kb_id]))
  347. doc_ids = []
  348. if res.total == 0:
  349. return doc_ids
  350. for id in res.ids:
  351. doc_ids = res.field[id]["source_id"]
  352. return doc_ids
  353. async def get_graph(tenant_id, kb_id, exclude_rebuild=None):
  354. conds = {
  355. "fields": ["content_with_weight", "removed_kwd", "source_id"],
  356. "size": 1,
  357. "knowledge_graph_kwd": ["graph"]
  358. }
  359. res = await trio.to_thread.run_sync(lambda: settings.retrievaler.search(conds, search.index_name(tenant_id), [kb_id]))
  360. if not res.total == 0:
  361. for id in res.ids:
  362. try:
  363. if res.field[id]["removed_kwd"] == "N":
  364. g = json_graph.node_link_graph(json.loads(res.field[id]["content_with_weight"]), edges="edges")
  365. if "source_id" not in g.graph:
  366. g.graph["source_id"] = res.field[id]["source_id"]
  367. else:
  368. g = await rebuild_graph(tenant_id, kb_id, exclude_rebuild)
  369. return g
  370. except Exception:
  371. continue
  372. result = None
  373. return result
  374. async def set_graph(tenant_id: str, kb_id: str, embd_mdl, graph: nx.Graph, change: GraphChange, callback):
  375. global chat_limiter
  376. start = trio.current_time()
  377. await trio.to_thread.run_sync(lambda: settings.docStoreConn.delete({"knowledge_graph_kwd": ["graph", "subgraph"]}, search.index_name(tenant_id), kb_id))
  378. if change.removed_nodes:
  379. await trio.to_thread.run_sync(lambda: settings.docStoreConn.delete({"knowledge_graph_kwd": ["entity"], "entity_kwd": sorted(change.removed_nodes)}, search.index_name(tenant_id), kb_id))
  380. if change.removed_edges:
  381. async def del_edges(from_node, to_node):
  382. async with chat_limiter:
  383. await trio.to_thread.run_sync(lambda: settings.docStoreConn.delete({"knowledge_graph_kwd": ["relation"], "from_entity_kwd": from_node, "to_entity_kwd": to_node}, search.index_name(tenant_id), kb_id))
  384. async with trio.open_nursery() as nursery:
  385. for from_node, to_node in change.removed_edges:
  386. nursery.start_soon(del_edges, from_node, to_node)
  387. now = trio.current_time()
  388. if callback:
  389. callback(msg=f"set_graph removed {len(change.removed_nodes)} nodes and {len(change.removed_edges)} edges from index in {now - start:.2f}s.")
  390. start = now
  391. chunks = [{
  392. "id": get_uuid(),
  393. "content_with_weight": json.dumps(nx.node_link_data(graph, edges="edges"), ensure_ascii=False),
  394. "knowledge_graph_kwd": "graph",
  395. "kb_id": kb_id,
  396. "source_id": graph.graph.get("source_id", []),
  397. "available_int": 0,
  398. "removed_kwd": "N"
  399. }]
  400. # generate updated subgraphs
  401. for source in graph.graph["source_id"]:
  402. subgraph = graph.subgraph([n for n in graph.nodes if source in graph.nodes[n]["source_id"]]).copy()
  403. subgraph.graph["source_id"] = [source]
  404. for n in subgraph.nodes:
  405. subgraph.nodes[n]["source_id"] = [source]
  406. chunks.append({
  407. "id": get_uuid(),
  408. "content_with_weight": json.dumps(nx.node_link_data(subgraph, edges="edges"), ensure_ascii=False),
  409. "knowledge_graph_kwd": "subgraph",
  410. "kb_id": kb_id,
  411. "source_id": [source],
  412. "available_int": 0,
  413. "removed_kwd": "N"
  414. })
  415. async with trio.open_nursery() as nursery:
  416. for ii, node in enumerate(change.added_updated_nodes):
  417. node_attrs = graph.nodes[node]
  418. nursery.start_soon(graph_node_to_chunk, kb_id, embd_mdl, node, node_attrs, chunks)
  419. if ii%100 == 9 and callback:
  420. callback(msg=f"Get embedding of nodes: {ii}/{len(change.added_updated_nodes)}")
  421. async with trio.open_nursery() as nursery:
  422. for ii, (from_node, to_node) in enumerate(change.added_updated_edges):
  423. edge_attrs = graph.get_edge_data(from_node, to_node)
  424. if not edge_attrs:
  425. # added_updated_edges could record a non-existing edge if both from_node and to_node participate in nodes merging.
  426. continue
  427. nursery.start_soon(graph_edge_to_chunk, kb_id, embd_mdl, from_node, to_node, edge_attrs, chunks)
  428. if ii%100 == 9 and callback:
  429. callback(msg=f"Get embedding of edges: {ii}/{len(change.added_updated_edges)}")
  430. now = trio.current_time()
  431. if callback:
  432. callback(msg=f"set_graph converted graph change to {len(chunks)} chunks in {now - start:.2f}s.")
  433. start = now
  434. es_bulk_size = 4
  435. for b in range(0, len(chunks), es_bulk_size):
  436. with trio.fail_after(3):
  437. doc_store_result = await trio.to_thread.run_sync(lambda: settings.docStoreConn.insert(chunks[b:b + es_bulk_size], search.index_name(tenant_id), kb_id))
  438. if b % 100 == es_bulk_size and callback:
  439. callback(msg=f"Insert chunks: {b}/{len(chunks)}")
  440. if doc_store_result:
  441. error_message = f"Insert chunk error: {doc_store_result}, please check log file and Elasticsearch/Infinity status!"
  442. raise Exception(error_message)
  443. now = trio.current_time()
  444. if callback:
  445. callback(msg=f"set_graph added/updated {len(change.added_updated_nodes)} nodes and {len(change.added_updated_edges)} edges from index in {now - start:.2f}s.")
  446. def is_continuous_subsequence(subseq, seq):
  447. def find_all_indexes(tup, value):
  448. indexes = []
  449. start = 0
  450. while True:
  451. try:
  452. index = tup.index(value, start)
  453. indexes.append(index)
  454. start = index + 1
  455. except ValueError:
  456. break
  457. return indexes
  458. index_list = find_all_indexes(seq,subseq[0])
  459. for idx in index_list:
  460. if idx!=len(seq)-1:
  461. if seq[idx+1]==subseq[-1]:
  462. return True
  463. return False
  464. def merge_tuples(list1, list2):
  465. result = []
  466. for tup in list1:
  467. last_element = tup[-1]
  468. if last_element in tup[:-1]:
  469. result.append(tup)
  470. else:
  471. matching_tuples = [t for t in list2 if t[0] == last_element]
  472. already_match_flag = 0
  473. for match in matching_tuples:
  474. matchh = (match[1], match[0])
  475. if is_continuous_subsequence(match, tup) or is_continuous_subsequence(matchh, tup):
  476. continue
  477. already_match_flag = 1
  478. merged_tuple = tup + match[1:]
  479. result.append(merged_tuple)
  480. if not already_match_flag:
  481. result.append(tup)
  482. return result
  483. async def get_entity_type2sampels(idxnms, kb_ids: list):
  484. es_res = await trio.to_thread.run_sync(lambda: settings.retrievaler.search({"knowledge_graph_kwd": "ty2ents", "kb_id": kb_ids,
  485. "size": 10000,
  486. "fields": ["content_with_weight"]},
  487. idxnms, kb_ids))
  488. res = defaultdict(list)
  489. for id in es_res.ids:
  490. smp = es_res.field[id].get("content_with_weight")
  491. if not smp:
  492. continue
  493. try:
  494. smp = json.loads(smp)
  495. except Exception as e:
  496. logging.exception(e)
  497. for ty, ents in smp.items():
  498. res[ty].extend(ents)
  499. return res
  500. def flat_uniq_list(arr, key):
  501. res = []
  502. for a in arr:
  503. a = a[key]
  504. if isinstance(a, list):
  505. res.extend(a)
  506. else:
  507. res.append(a)
  508. return list(set(res))
  509. async def rebuild_graph(tenant_id, kb_id, exclude_rebuild=None):
  510. graph = nx.Graph()
  511. flds = ["knowledge_graph_kwd", "content_with_weight", "source_id"]
  512. bs = 256
  513. for i in range(0, 1024*bs, bs):
  514. es_res = await trio.to_thread.run_sync(lambda: settings.docStoreConn.search(flds, [],
  515. {"kb_id": kb_id, "knowledge_graph_kwd": ["subgraph"]},
  516. [],
  517. OrderByExpr(),
  518. i, bs, search.index_name(tenant_id), [kb_id]
  519. ))
  520. # tot = settings.docStoreConn.getTotal(es_res)
  521. es_res = settings.docStoreConn.getFields(es_res, flds)
  522. if len(es_res) == 0:
  523. break
  524. for id, d in es_res.items():
  525. assert d["knowledge_graph_kwd"] == "subgraph"
  526. if isinstance(exclude_rebuild, list):
  527. if sum([n in d["source_id"] for n in exclude_rebuild]):
  528. continue
  529. elif exclude_rebuild in d["source_id"]:
  530. continue
  531. next_graph = json_graph.node_link_graph(json.loads(d["content_with_weight"]), edges="edges")
  532. merged_graph = nx.compose(graph, next_graph)
  533. merged_source = {
  534. n: graph.nodes[n]["source_id"] + next_graph.nodes[n]["source_id"]
  535. for n in graph.nodes & next_graph.nodes
  536. }
  537. nx.set_node_attributes(merged_graph, merged_source, "source_id")
  538. if "source_id" in graph.graph:
  539. merged_graph.graph["source_id"] = graph.graph["source_id"] + next_graph.graph["source_id"]
  540. else:
  541. merged_graph.graph["source_id"] = next_graph.graph["source_id"]
  542. graph = merged_graph
  543. if len(graph.nodes) == 0:
  544. return None
  545. graph.graph["source_id"] = sorted(graph.graph["source_id"])
  546. return graph