Você não pode selecionar mais de 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616
  1. # Copyright (c) 2024 Microsoft Corporation.
  2. # Licensed under the MIT License
  3. """
  4. Reference:
  5. - [graphrag](https://github.com/microsoft/graphrag)
  6. - [LightRag](https://github.com/HKUDS/LightRAG)
  7. """
  8. import html
  9. import json
  10. import logging
  11. import re
  12. import time
  13. from collections import defaultdict
  14. from copy import deepcopy
  15. from hashlib import md5
  16. from typing import Any, Callable
  17. import os
  18. import trio
  19. import networkx as nx
  20. import numpy as np
  21. import xxhash
  22. from networkx.readwrite import json_graph
  23. from api import settings
  24. from rag.nlp import search, rag_tokenizer
  25. from rag.utils.doc_store_conn import OrderByExpr
  26. from rag.utils.redis_conn import REDIS_CONN
  27. ErrorHandlerFn = Callable[[BaseException | None, str | None, dict | None], None]
  28. chat_limiter = trio.CapacityLimiter(int(os.environ.get('MAX_CONCURRENT_CHATS', 10)))
  29. def perform_variable_replacements(
  30. input: str, history: list[dict] | None = None, variables: dict | None = None
  31. ) -> str:
  32. """Perform variable replacements on the input string and in a chat log."""
  33. if history is None:
  34. history = []
  35. if variables is None:
  36. variables = {}
  37. result = input
  38. def replace_all(input: str) -> str:
  39. result = input
  40. for k, v in variables.items():
  41. result = result.replace(f"{{{k}}}", v)
  42. return result
  43. result = replace_all(result)
  44. for i, entry in enumerate(history):
  45. if entry.get("role") == "system":
  46. entry["content"] = replace_all(entry.get("content") or "")
  47. return result
  48. def clean_str(input: Any) -> str:
  49. """Clean an input string by removing HTML escapes, control characters, and other unwanted characters."""
  50. # If we get non-string input, just give it back
  51. if not isinstance(input, str):
  52. return input
  53. result = html.unescape(input.strip())
  54. # https://stackoverflow.com/questions/4324790/removing-control-characters-from-a-string-in-python
  55. return re.sub(r"[\"\x00-\x1f\x7f-\x9f]", "", result)
  56. def dict_has_keys_with_types(
  57. data: dict, expected_fields: list[tuple[str, type]]
  58. ) -> bool:
  59. """Return True if the given dictionary has the given keys with the given types."""
  60. for field, field_type in expected_fields:
  61. if field not in data:
  62. return False
  63. value = data[field]
  64. if not isinstance(value, field_type):
  65. return False
  66. return True
  67. def get_llm_cache(llmnm, txt, history, genconf):
  68. hasher = xxhash.xxh64()
  69. hasher.update(str(llmnm).encode("utf-8"))
  70. hasher.update(str(txt).encode("utf-8"))
  71. hasher.update(str(history).encode("utf-8"))
  72. hasher.update(str(genconf).encode("utf-8"))
  73. k = hasher.hexdigest()
  74. bin = REDIS_CONN.get(k)
  75. if not bin:
  76. return
  77. return bin
  78. def set_llm_cache(llmnm, txt, v, history, genconf):
  79. hasher = xxhash.xxh64()
  80. hasher.update(str(llmnm).encode("utf-8"))
  81. hasher.update(str(txt).encode("utf-8"))
  82. hasher.update(str(history).encode("utf-8"))
  83. hasher.update(str(genconf).encode("utf-8"))
  84. k = hasher.hexdigest()
  85. REDIS_CONN.set(k, v.encode("utf-8"), 24*3600)
  86. def get_embed_cache(llmnm, txt):
  87. hasher = xxhash.xxh64()
  88. hasher.update(str(llmnm).encode("utf-8"))
  89. hasher.update(str(txt).encode("utf-8"))
  90. k = hasher.hexdigest()
  91. bin = REDIS_CONN.get(k)
  92. if not bin:
  93. return
  94. return np.array(json.loads(bin))
  95. def set_embed_cache(llmnm, txt, arr):
  96. hasher = xxhash.xxh64()
  97. hasher.update(str(llmnm).encode("utf-8"))
  98. hasher.update(str(txt).encode("utf-8"))
  99. k = hasher.hexdigest()
  100. arr = json.dumps(arr.tolist() if isinstance(arr, np.ndarray) else arr)
  101. REDIS_CONN.set(k, arr.encode("utf-8"), 24*3600)
  102. def get_tags_from_cache(kb_ids):
  103. hasher = xxhash.xxh64()
  104. hasher.update(str(kb_ids).encode("utf-8"))
  105. k = hasher.hexdigest()
  106. bin = REDIS_CONN.get(k)
  107. if not bin:
  108. return
  109. return bin
  110. def set_tags_to_cache(kb_ids, tags):
  111. hasher = xxhash.xxh64()
  112. hasher.update(str(kb_ids).encode("utf-8"))
  113. k = hasher.hexdigest()
  114. REDIS_CONN.set(k, json.dumps(tags).encode("utf-8"), 600)
  115. def graph_merge(g1, g2):
  116. g = g2.copy()
  117. for n, attr in g1.nodes(data=True):
  118. if n not in g2.nodes():
  119. g.add_node(n, **attr)
  120. continue
  121. for source, target, attr in g1.edges(data=True):
  122. if g.has_edge(source, target):
  123. g[source][target].update({"weight": attr.get("weight", 0)+1})
  124. continue
  125. g.add_edge(source, target)#, **attr)
  126. for node_degree in g.degree:
  127. g.nodes[str(node_degree[0])]["rank"] = int(node_degree[1])
  128. return g
  129. def compute_args_hash(*args):
  130. return md5(str(args).encode()).hexdigest()
  131. def handle_single_entity_extraction(
  132. record_attributes: list[str],
  133. chunk_key: str,
  134. ):
  135. if len(record_attributes) < 4 or record_attributes[0] != '"entity"':
  136. return None
  137. # add this record as a node in the G
  138. entity_name = clean_str(record_attributes[1].upper())
  139. if not entity_name.strip():
  140. return None
  141. entity_type = clean_str(record_attributes[2].upper())
  142. entity_description = clean_str(record_attributes[3])
  143. entity_source_id = chunk_key
  144. return dict(
  145. entity_name=entity_name.upper(),
  146. entity_type=entity_type.upper(),
  147. description=entity_description,
  148. source_id=entity_source_id,
  149. )
  150. def handle_single_relationship_extraction(record_attributes: list[str], chunk_key: str):
  151. if len(record_attributes) < 5 or record_attributes[0] != '"relationship"':
  152. return None
  153. # add this record as edge
  154. source = clean_str(record_attributes[1].upper())
  155. target = clean_str(record_attributes[2].upper())
  156. edge_description = clean_str(record_attributes[3])
  157. edge_keywords = clean_str(record_attributes[4])
  158. edge_source_id = chunk_key
  159. weight = (
  160. float(record_attributes[-1]) if is_float_regex(record_attributes[-1]) else 1.0
  161. )
  162. pair = sorted([source.upper(), target.upper()])
  163. return dict(
  164. src_id=pair[0],
  165. tgt_id=pair[1],
  166. weight=weight,
  167. description=edge_description,
  168. keywords=edge_keywords,
  169. source_id=edge_source_id,
  170. metadata={"created_at": time.time()},
  171. )
  172. def pack_user_ass_to_openai_messages(*args: str):
  173. roles = ["user", "assistant"]
  174. return [
  175. {"role": roles[i % 2], "content": content} for i, content in enumerate(args)
  176. ]
  177. def split_string_by_multi_markers(content: str, markers: list[str]) -> list[str]:
  178. """Split a string by multiple markers"""
  179. if not markers:
  180. return [content]
  181. results = re.split("|".join(re.escape(marker) for marker in markers), content)
  182. return [r.strip() for r in results if r.strip()]
  183. def is_float_regex(value):
  184. return bool(re.match(r"^[-+]?[0-9]*\.?[0-9]+$", value))
  185. def chunk_id(chunk):
  186. return xxhash.xxh64((chunk["content_with_weight"] + chunk["kb_id"]).encode("utf-8")).hexdigest()
  187. def get_entity_cache(tenant_id, kb_id, ent_name) -> str | list[str]:
  188. hasher = xxhash.xxh64()
  189. hasher.update(str(tenant_id).encode("utf-8"))
  190. hasher.update(str(kb_id).encode("utf-8"))
  191. hasher.update(str(ent_name).encode("utf-8"))
  192. k = hasher.hexdigest()
  193. bin = REDIS_CONN.get(k)
  194. if not bin:
  195. return
  196. return json.loads(bin)
  197. def set_entity_cache(tenant_id, kb_id, ent_name, content_with_weight):
  198. hasher = xxhash.xxh64()
  199. hasher.update(str(tenant_id).encode("utf-8"))
  200. hasher.update(str(kb_id).encode("utf-8"))
  201. hasher.update(str(ent_name).encode("utf-8"))
  202. k = hasher.hexdigest()
  203. REDIS_CONN.set(k, content_with_weight.encode("utf-8"), 3600)
  204. def get_entity(tenant_id, kb_id, ent_name):
  205. cache = get_entity_cache(tenant_id, kb_id, ent_name)
  206. if cache:
  207. return cache
  208. conds = {
  209. "fields": ["content_with_weight"],
  210. "entity_kwd": ent_name,
  211. "size": 10000,
  212. "knowledge_graph_kwd": ["entity"]
  213. }
  214. res = []
  215. es_res = settings.retrievaler.search(conds, search.index_name(tenant_id), [kb_id])
  216. for id in es_res.ids:
  217. try:
  218. if isinstance(ent_name, str):
  219. set_entity_cache(tenant_id, kb_id, ent_name, es_res.field[id]["content_with_weight"])
  220. return json.loads(es_res.field[id]["content_with_weight"])
  221. res.append(json.loads(es_res.field[id]["content_with_weight"]))
  222. except Exception:
  223. continue
  224. return res
  225. def set_entity(tenant_id, kb_id, embd_mdl, ent_name, meta):
  226. chunk = {
  227. "important_kwd": [ent_name],
  228. "title_tks": rag_tokenizer.tokenize(ent_name),
  229. "entity_kwd": ent_name,
  230. "knowledge_graph_kwd": "entity",
  231. "entity_type_kwd": meta["entity_type"],
  232. "content_with_weight": json.dumps(meta, ensure_ascii=False),
  233. "content_ltks": rag_tokenizer.tokenize(meta["description"]),
  234. "source_id": list(set(meta["source_id"])),
  235. "kb_id": kb_id,
  236. "available_int": 0
  237. }
  238. chunk["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(chunk["content_ltks"])
  239. set_entity_cache(tenant_id, kb_id, ent_name, chunk["content_with_weight"])
  240. res = settings.retrievaler.search({"entity_kwd": ent_name, "size": 1, "fields": []},
  241. search.index_name(tenant_id), [kb_id])
  242. if res.ids:
  243. settings.docStoreConn.update({"entity_kwd": ent_name}, chunk, search.index_name(tenant_id), kb_id)
  244. else:
  245. ebd = get_embed_cache(embd_mdl.llm_name, ent_name)
  246. if ebd is None:
  247. try:
  248. ebd, _ = embd_mdl.encode([ent_name])
  249. ebd = ebd[0]
  250. set_embed_cache(embd_mdl.llm_name, ent_name, ebd)
  251. except Exception as e:
  252. logging.exception(f"Fail to embed entity: {e}")
  253. if ebd is not None:
  254. chunk["q_%d_vec" % len(ebd)] = ebd
  255. settings.docStoreConn.insert([{"id": chunk_id(chunk), **chunk}], search.index_name(tenant_id), kb_id)
  256. def get_relation(tenant_id, kb_id, from_ent_name, to_ent_name, size=1):
  257. ents = from_ent_name
  258. if isinstance(ents, str):
  259. ents = [from_ent_name]
  260. if isinstance(to_ent_name, str):
  261. to_ent_name = [to_ent_name]
  262. ents.extend(to_ent_name)
  263. ents = list(set(ents))
  264. conds = {
  265. "fields": ["content_with_weight"],
  266. "size": size,
  267. "from_entity_kwd": ents,
  268. "to_entity_kwd": ents,
  269. "knowledge_graph_kwd": ["relation"]
  270. }
  271. res = []
  272. es_res = settings.retrievaler.search(conds, search.index_name(tenant_id), [kb_id] if isinstance(kb_id, str) else kb_id)
  273. for id in es_res.ids:
  274. try:
  275. if size == 1:
  276. return json.loads(es_res.field[id]["content_with_weight"])
  277. res.append(json.loads(es_res.field[id]["content_with_weight"]))
  278. except Exception:
  279. continue
  280. return res
  281. def set_relation(tenant_id, kb_id, embd_mdl, from_ent_name, to_ent_name, meta):
  282. chunk = {
  283. "from_entity_kwd": from_ent_name,
  284. "to_entity_kwd": to_ent_name,
  285. "knowledge_graph_kwd": "relation",
  286. "content_with_weight": json.dumps(meta, ensure_ascii=False),
  287. "content_ltks": rag_tokenizer.tokenize(meta["description"]),
  288. "important_kwd": meta["keywords"],
  289. "source_id": list(set(meta["source_id"])),
  290. "weight_int": int(meta["weight"]),
  291. "kb_id": kb_id,
  292. "available_int": 0
  293. }
  294. chunk["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(chunk["content_ltks"])
  295. res = settings.retrievaler.search({"from_entity_kwd": to_ent_name, "to_entity_kwd": to_ent_name, "size": 1, "fields": []},
  296. search.index_name(tenant_id), [kb_id])
  297. if res.ids:
  298. settings.docStoreConn.update({"from_entity_kwd": from_ent_name, "to_entity_kwd": to_ent_name},
  299. chunk,
  300. search.index_name(tenant_id), kb_id)
  301. else:
  302. txt = f"{from_ent_name}->{to_ent_name}"
  303. ebd = get_embed_cache(embd_mdl.llm_name, txt)
  304. if ebd is None:
  305. try:
  306. ebd, _ = embd_mdl.encode([txt+f": {meta['description']}"])
  307. ebd = ebd[0]
  308. set_embed_cache(embd_mdl.llm_name, txt, ebd)
  309. except Exception as e:
  310. logging.exception(f"Fail to embed entity relation: {e}")
  311. if ebd is not None:
  312. chunk["q_%d_vec" % len(ebd)] = ebd
  313. settings.docStoreConn.insert([{"id": chunk_id(chunk), **chunk}], search.index_name(tenant_id), kb_id)
  314. async def does_graph_contains(tenant_id, kb_id, doc_id):
  315. # Get doc_ids of graph
  316. fields = ["source_id"]
  317. condition = {
  318. "knowledge_graph_kwd": ["graph"],
  319. "removed_kwd": "N",
  320. }
  321. res = await trio.to_thread.run_sync(lambda: settings.docStoreConn.search(fields, [], condition, [], OrderByExpr(), 0, 1, search.index_name(tenant_id), [kb_id]))
  322. fields2 = settings.docStoreConn.getFields(res, fields)
  323. graph_doc_ids = set()
  324. for chunk_id in fields2.keys():
  325. graph_doc_ids = set(fields2[chunk_id]["source_id"])
  326. return doc_id in graph_doc_ids
  327. async def get_graph_doc_ids(tenant_id, kb_id) -> list[str]:
  328. conds = {
  329. "fields": ["source_id"],
  330. "removed_kwd": "N",
  331. "size": 1,
  332. "knowledge_graph_kwd": ["graph"]
  333. }
  334. res = await trio.to_thread.run_sync(lambda: settings.retrievaler.search(conds, search.index_name(tenant_id), [kb_id]))
  335. doc_ids = []
  336. if res.total == 0:
  337. return doc_ids
  338. for id in res.ids:
  339. doc_ids = res.field[id]["source_id"]
  340. return doc_ids
  341. async def get_graph(tenant_id, kb_id):
  342. conds = {
  343. "fields": ["content_with_weight", "source_id"],
  344. "removed_kwd": "N",
  345. "size": 1,
  346. "knowledge_graph_kwd": ["graph"]
  347. }
  348. res = await trio.to_thread.run_sync(lambda: settings.retrievaler.search(conds, search.index_name(tenant_id), [kb_id]))
  349. if res.total == 0:
  350. return None, []
  351. for id in res.ids:
  352. try:
  353. return json_graph.node_link_graph(json.loads(res.field[id]["content_with_weight"]), edges="edges"), \
  354. res.field[id]["source_id"]
  355. except Exception:
  356. continue
  357. result = await rebuild_graph(tenant_id, kb_id)
  358. return result
  359. async def set_graph(tenant_id, kb_id, graph, docids):
  360. chunk = {
  361. "content_with_weight": json.dumps(nx.node_link_data(graph, edges="edges"), ensure_ascii=False,
  362. indent=2),
  363. "knowledge_graph_kwd": "graph",
  364. "kb_id": kb_id,
  365. "source_id": list(docids),
  366. "available_int": 0,
  367. "removed_kwd": "N"
  368. }
  369. res = await trio.to_thread.run_sync(lambda: settings.retrievaler.search({"knowledge_graph_kwd": "graph", "size": 1, "fields": []}, search.index_name(tenant_id), [kb_id]))
  370. if res.ids:
  371. await trio.to_thread.run_sync(lambda: settings.docStoreConn.update({"knowledge_graph_kwd": "graph"}, chunk,
  372. search.index_name(tenant_id), kb_id))
  373. else:
  374. await trio.to_thread.run_sync(lambda: settings.docStoreConn.insert([{"id": chunk_id(chunk), **chunk}], search.index_name(tenant_id), kb_id))
  375. def is_continuous_subsequence(subseq, seq):
  376. def find_all_indexes(tup, value):
  377. indexes = []
  378. start = 0
  379. while True:
  380. try:
  381. index = tup.index(value, start)
  382. indexes.append(index)
  383. start = index + 1
  384. except ValueError:
  385. break
  386. return indexes
  387. index_list = find_all_indexes(seq,subseq[0])
  388. for idx in index_list:
  389. if idx!=len(seq)-1:
  390. if seq[idx+1]==subseq[-1]:
  391. return True
  392. return False
  393. def merge_tuples(list1, list2):
  394. result = []
  395. for tup in list1:
  396. last_element = tup[-1]
  397. if last_element in tup[:-1]:
  398. result.append(tup)
  399. else:
  400. matching_tuples = [t for t in list2 if t[0] == last_element]
  401. already_match_flag = 0
  402. for match in matching_tuples:
  403. matchh = (match[1], match[0])
  404. if is_continuous_subsequence(match, tup) or is_continuous_subsequence(matchh, tup):
  405. continue
  406. already_match_flag = 1
  407. merged_tuple = tup + match[1:]
  408. result.append(merged_tuple)
  409. if not already_match_flag:
  410. result.append(tup)
  411. return result
  412. async def update_nodes_pagerank_nhop_neighbour(tenant_id, kb_id, graph, n_hop):
  413. def n_neighbor(id):
  414. nonlocal graph, n_hop
  415. count = 0
  416. source_edge = list(graph.edges(id))
  417. if not source_edge:
  418. return []
  419. count = count + 1
  420. while count < n_hop:
  421. count = count + 1
  422. sc_edge = deepcopy(source_edge)
  423. source_edge = []
  424. for pair in sc_edge:
  425. append_edge = list(graph.edges(pair[-1]))
  426. for tuples in merge_tuples([pair], append_edge):
  427. source_edge.append(tuples)
  428. nbrs = []
  429. for path in source_edge:
  430. n = {"path": path, "weights": []}
  431. wts = nx.get_edge_attributes(graph, 'weight')
  432. for i in range(len(path)-1):
  433. f, t = path[i], path[i+1]
  434. n["weights"].append(wts.get((f, t), 0))
  435. nbrs.append(n)
  436. return nbrs
  437. pr = nx.pagerank(graph)
  438. try:
  439. async with trio.open_nursery() as nursery:
  440. for n, p in pr.items():
  441. graph.nodes[n]["pagerank"] = p
  442. nursery.start_soon(lambda: trio.to_thread.run_sync(lambda: settings.docStoreConn.update({"entity_kwd": n, "kb_id": kb_id},
  443. {"rank_flt": p,
  444. "n_hop_with_weight": json.dumps((n), ensure_ascii=False)},
  445. search.index_name(tenant_id), kb_id)))
  446. except Exception as e:
  447. logging.exception(e)
  448. ty2ents = defaultdict(list)
  449. for p, r in sorted(pr.items(), key=lambda x: x[1], reverse=True):
  450. ty = graph.nodes[p].get("entity_type")
  451. if not ty or len(ty2ents[ty]) > 12:
  452. continue
  453. ty2ents[ty].append(p)
  454. chunk = {
  455. "content_with_weight": json.dumps(ty2ents, ensure_ascii=False),
  456. "kb_id": kb_id,
  457. "knowledge_graph_kwd": "ty2ents",
  458. "available_int": 0
  459. }
  460. res = await trio.to_thread.run_sync(lambda: settings.retrievaler.search({"knowledge_graph_kwd": "ty2ents", "size": 1, "fields": []},
  461. search.index_name(tenant_id), [kb_id]))
  462. if res.ids:
  463. await trio.to_thread.run_sync(lambda: settings.docStoreConn.update({"knowledge_graph_kwd": "ty2ents"},
  464. chunk,
  465. search.index_name(tenant_id), kb_id))
  466. else:
  467. await trio.to_thread.run_sync(lambda: settings.docStoreConn.insert([{"id": chunk_id(chunk), **chunk}], search.index_name(tenant_id), kb_id))
  468. async def get_entity_type2sampels(idxnms, kb_ids: list):
  469. es_res = await trio.to_thread.run_sync(lambda: settings.retrievaler.search({"knowledge_graph_kwd": "ty2ents", "kb_id": kb_ids,
  470. "size": 10000,
  471. "fields": ["content_with_weight"]},
  472. idxnms, kb_ids))
  473. res = defaultdict(list)
  474. for id in es_res.ids:
  475. smp = es_res.field[id].get("content_with_weight")
  476. if not smp:
  477. continue
  478. try:
  479. smp = json.loads(smp)
  480. except Exception as e:
  481. logging.exception(e)
  482. for ty, ents in smp.items():
  483. res[ty].extend(ents)
  484. return res
  485. def flat_uniq_list(arr, key):
  486. res = []
  487. for a in arr:
  488. a = a[key]
  489. if isinstance(a, list):
  490. res.extend(a)
  491. else:
  492. res.append(a)
  493. return list(set(res))
  494. async def rebuild_graph(tenant_id, kb_id):
  495. graph = nx.Graph()
  496. src_ids = []
  497. flds = ["entity_kwd", "entity_type_kwd", "from_entity_kwd", "to_entity_kwd", "weight_int", "knowledge_graph_kwd", "source_id"]
  498. bs = 256
  499. for i in range(0, 39*bs, bs):
  500. es_res = await trio.to_thread.run_sync(lambda: settings.docStoreConn.search(flds, [],
  501. {"kb_id": kb_id, "knowledge_graph_kwd": ["entity", "relation"]},
  502. [],
  503. OrderByExpr(),
  504. i, bs, search.index_name(tenant_id), [kb_id]
  505. ))
  506. tot = settings.docStoreConn.getTotal(es_res)
  507. if tot == 0:
  508. return None, None
  509. es_res = settings.docStoreConn.getFields(es_res, flds)
  510. for id, d in es_res.items():
  511. src_ids.extend(d.get("source_id", []))
  512. if d["knowledge_graph_kwd"] == "entity":
  513. graph.add_node(d["entity_kwd"], entity_type=d["entity_type_kwd"])
  514. elif "from_entity_kwd" in d and "to_entity_kwd" in d:
  515. graph.add_edge(
  516. d["from_entity_kwd"],
  517. d["to_entity_kwd"],
  518. weight=int(d["weight_int"])
  519. )
  520. if len(es_res.keys()) < 128:
  521. return graph, list(set(src_ids))
  522. return graph, list(set(src_ids))