You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

index.py 10KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import json
  17. import logging
  18. import os
  19. import networkx as nx
  20. import trio
  21. from api import settings
  22. from api.utils import get_uuid
  23. from api.utils.api_utils import timeout
  24. from graphrag.light.graph_extractor import GraphExtractor as LightKGExt
  25. from graphrag.general.graph_extractor import GraphExtractor as GeneralKGExt
  26. from graphrag.general.community_reports_extractor import CommunityReportsExtractor
  27. from graphrag.entity_resolution import EntityResolution
  28. from graphrag.general.extractor import Extractor
  29. from graphrag.utils import (
  30. graph_merge,
  31. get_graph,
  32. set_graph,
  33. chunk_id,
  34. does_graph_contains,
  35. tidy_graph,
  36. GraphChange,
  37. )
  38. from rag.nlp import rag_tokenizer, search
  39. from rag.utils.redis_conn import RedisDistributedLock
  40. async def run_graphrag(
  41. row: dict,
  42. language,
  43. with_resolution: bool,
  44. with_community: bool,
  45. chat_model,
  46. embedding_model,
  47. callback,
  48. ):
  49. enable_timeout_assertion=os.environ.get("ENABLE_TIMEOUT_ASSERTION")
  50. start = trio.current_time()
  51. tenant_id, kb_id, doc_id = row["tenant_id"], str(row["kb_id"]), row["doc_id"]
  52. chunks = []
  53. for d in settings.retrievaler.chunk_list(
  54. doc_id, tenant_id, [kb_id], fields=["content_with_weight", "doc_id"]
  55. ):
  56. chunks.append(d["content_with_weight"])
  57. with trio.fail_after(max(120, len(chunks)*60*10) if enable_timeout_assertion else 10000000000):
  58. subgraph = await generate_subgraph(
  59. LightKGExt
  60. if "method" not in row["kb_parser_config"].get("graphrag", {}) or row["kb_parser_config"]["graphrag"]["method"] != "general"
  61. else GeneralKGExt,
  62. tenant_id,
  63. kb_id,
  64. doc_id,
  65. chunks,
  66. language,
  67. row["kb_parser_config"]["graphrag"].get("entity_types", []),
  68. chat_model,
  69. embedding_model,
  70. callback,
  71. )
  72. if not subgraph:
  73. return
  74. graphrag_task_lock = RedisDistributedLock(f"graphrag_task_{kb_id}", lock_value=doc_id, timeout=1200)
  75. await graphrag_task_lock.spin_acquire()
  76. callback(msg=f"run_graphrag {doc_id} graphrag_task_lock acquired")
  77. try:
  78. subgraph_nodes = set(subgraph.nodes())
  79. new_graph = await merge_subgraph(
  80. tenant_id,
  81. kb_id,
  82. doc_id,
  83. subgraph,
  84. embedding_model,
  85. callback,
  86. )
  87. assert new_graph is not None
  88. if not with_resolution and not with_community:
  89. return
  90. if with_resolution:
  91. await graphrag_task_lock.spin_acquire()
  92. callback(msg=f"run_graphrag {doc_id} graphrag_task_lock acquired")
  93. await resolve_entities(
  94. new_graph,
  95. subgraph_nodes,
  96. tenant_id,
  97. kb_id,
  98. doc_id,
  99. chat_model,
  100. embedding_model,
  101. callback,
  102. )
  103. if with_community:
  104. await graphrag_task_lock.spin_acquire()
  105. callback(msg=f"run_graphrag {doc_id} graphrag_task_lock acquired")
  106. await extract_community(
  107. new_graph,
  108. tenant_id,
  109. kb_id,
  110. doc_id,
  111. chat_model,
  112. embedding_model,
  113. callback,
  114. )
  115. finally:
  116. graphrag_task_lock.release()
  117. now = trio.current_time()
  118. callback(msg=f"GraphRAG for doc {doc_id} done in {now - start:.2f} seconds.")
  119. return
  120. async def generate_subgraph(
  121. extractor: Extractor,
  122. tenant_id: str,
  123. kb_id: str,
  124. doc_id: str,
  125. chunks: list[str],
  126. language,
  127. entity_types,
  128. llm_bdl,
  129. embed_bdl,
  130. callback,
  131. ):
  132. contains = await does_graph_contains(tenant_id, kb_id, doc_id)
  133. if contains:
  134. callback(msg=f"Graph already contains {doc_id}")
  135. return None
  136. start = trio.current_time()
  137. ext = extractor(
  138. llm_bdl,
  139. language=language,
  140. entity_types=entity_types,
  141. )
  142. ents, rels = await ext(doc_id, chunks, callback)
  143. subgraph = nx.Graph()
  144. for ent in ents:
  145. assert "description" in ent, f"entity {ent} does not have description"
  146. ent["source_id"] = [doc_id]
  147. subgraph.add_node(ent["entity_name"], **ent)
  148. ignored_rels = 0
  149. for rel in rels:
  150. assert "description" in rel, f"relation {rel} does not have description"
  151. if not subgraph.has_node(rel["src_id"]) or not subgraph.has_node(rel["tgt_id"]):
  152. ignored_rels += 1
  153. continue
  154. rel["source_id"] = [doc_id]
  155. subgraph.add_edge(
  156. rel["src_id"],
  157. rel["tgt_id"],
  158. **rel,
  159. )
  160. if ignored_rels:
  161. callback(msg=f"ignored {ignored_rels} relations due to missing entities.")
  162. tidy_graph(subgraph, callback, check_attribute=False)
  163. subgraph.graph["source_id"] = [doc_id]
  164. chunk = {
  165. "content_with_weight": json.dumps(
  166. nx.node_link_data(subgraph, edges="edges"), ensure_ascii=False
  167. ),
  168. "knowledge_graph_kwd": "subgraph",
  169. "kb_id": kb_id,
  170. "source_id": [doc_id],
  171. "available_int": 0,
  172. "removed_kwd": "N",
  173. }
  174. cid = chunk_id(chunk)
  175. await trio.to_thread.run_sync(
  176. lambda: settings.docStoreConn.delete(
  177. {"knowledge_graph_kwd": "subgraph", "source_id": doc_id}, search.index_name(tenant_id), kb_id
  178. )
  179. )
  180. await trio.to_thread.run_sync(
  181. lambda: settings.docStoreConn.insert(
  182. [{"id": cid, **chunk}], search.index_name(tenant_id), kb_id
  183. )
  184. )
  185. now = trio.current_time()
  186. callback(msg=f"generated subgraph for doc {doc_id} in {now - start:.2f} seconds.")
  187. return subgraph
  188. @timeout(60*3)
  189. async def merge_subgraph(
  190. tenant_id: str,
  191. kb_id: str,
  192. doc_id: str,
  193. subgraph: nx.Graph,
  194. embedding_model,
  195. callback,
  196. ):
  197. start = trio.current_time()
  198. change = GraphChange()
  199. old_graph = await get_graph(tenant_id, kb_id, subgraph.graph["source_id"])
  200. if old_graph is not None:
  201. logging.info("Merge with an exiting graph...................")
  202. tidy_graph(old_graph, callback)
  203. new_graph = graph_merge(old_graph, subgraph, change)
  204. else:
  205. new_graph = subgraph
  206. change.added_updated_nodes = set(new_graph.nodes())
  207. change.added_updated_edges = set(new_graph.edges())
  208. pr = nx.pagerank(new_graph)
  209. for node_name, pagerank in pr.items():
  210. new_graph.nodes[node_name]["pagerank"] = pagerank
  211. await set_graph(tenant_id, kb_id, embedding_model, new_graph, change, callback)
  212. now = trio.current_time()
  213. callback(
  214. msg=f"merging subgraph for doc {doc_id} into the global graph done in {now - start:.2f} seconds."
  215. )
  216. return new_graph
  217. @timeout(60*30, 1)
  218. async def resolve_entities(
  219. graph,
  220. subgraph_nodes: set[str],
  221. tenant_id: str,
  222. kb_id: str,
  223. doc_id: str,
  224. llm_bdl,
  225. embed_bdl,
  226. callback,
  227. ):
  228. start = trio.current_time()
  229. er = EntityResolution(
  230. llm_bdl,
  231. )
  232. reso = await er(graph, subgraph_nodes, callback=callback)
  233. graph = reso.graph
  234. change = reso.change
  235. callback(msg=f"Graph resolution removed {len(change.removed_nodes)} nodes and {len(change.removed_edges)} edges.")
  236. callback(msg="Graph resolution updated pagerank.")
  237. await set_graph(tenant_id, kb_id, embed_bdl, graph, change, callback)
  238. now = trio.current_time()
  239. callback(msg=f"Graph resolution done in {now - start:.2f}s.")
  240. @timeout(60*30, 1)
  241. async def extract_community(
  242. graph,
  243. tenant_id: str,
  244. kb_id: str,
  245. doc_id: str,
  246. llm_bdl,
  247. embed_bdl,
  248. callback,
  249. ):
  250. start = trio.current_time()
  251. ext = CommunityReportsExtractor(
  252. llm_bdl,
  253. )
  254. cr = await ext(graph, callback=callback)
  255. community_structure = cr.structured_output
  256. community_reports = cr.output
  257. doc_ids = graph.graph["source_id"]
  258. now = trio.current_time()
  259. callback(
  260. msg=f"Graph extracted {len(cr.structured_output)} communities in {now - start:.2f}s."
  261. )
  262. start = now
  263. chunks = []
  264. for stru, rep in zip(community_structure, community_reports):
  265. obj = {
  266. "report": rep,
  267. "evidences": "\n".join([f.get("explanation", "") for f in stru["findings"]]),
  268. }
  269. chunk = {
  270. "id": get_uuid(),
  271. "docnm_kwd": stru["title"],
  272. "title_tks": rag_tokenizer.tokenize(stru["title"]),
  273. "content_with_weight": json.dumps(obj, ensure_ascii=False),
  274. "content_ltks": rag_tokenizer.tokenize(
  275. obj["report"] + " " + obj["evidences"]
  276. ),
  277. "knowledge_graph_kwd": "community_report",
  278. "weight_flt": stru["weight"],
  279. "entities_kwd": stru["entities"],
  280. "important_kwd": stru["entities"],
  281. "kb_id": kb_id,
  282. "source_id": list(doc_ids),
  283. "available_int": 0,
  284. }
  285. chunk["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(
  286. chunk["content_ltks"]
  287. )
  288. chunks.append(chunk)
  289. await trio.to_thread.run_sync(
  290. lambda: settings.docStoreConn.delete(
  291. {"knowledge_graph_kwd": "community_report", "kb_id": kb_id},
  292. search.index_name(tenant_id),
  293. kb_id,
  294. )
  295. )
  296. es_bulk_size = 4
  297. for b in range(0, len(chunks), es_bulk_size):
  298. doc_store_result = await trio.to_thread.run_sync(lambda: settings.docStoreConn.insert(chunks[b:b + es_bulk_size], search.index_name(tenant_id), kb_id))
  299. if doc_store_result:
  300. error_message = f"Insert chunk error: {doc_store_result}, please check log file and Elasticsearch/Infinity status!"
  301. raise Exception(error_message)
  302. now = trio.current_time()
  303. callback(
  304. msg=f"Graph indexed {len(cr.structured_output)} communities in {now - start:.2f}s."
  305. )
  306. return community_structure, community_reports