選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。

index.py 10KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import json
  17. import logging
  18. import networkx as nx
  19. import trio
  20. from api import settings
  21. from api.utils import get_uuid
  22. from graphrag.light.graph_extractor import GraphExtractor as LightKGExt
  23. from graphrag.general.graph_extractor import GraphExtractor as GeneralKGExt
  24. from graphrag.general.community_reports_extractor import CommunityReportsExtractor
  25. from graphrag.entity_resolution import EntityResolution
  26. from graphrag.general.extractor import Extractor
  27. from graphrag.utils import (
  28. graph_merge,
  29. get_graph,
  30. set_graph,
  31. chunk_id,
  32. does_graph_contains,
  33. tidy_graph,
  34. GraphChange,
  35. )
  36. from rag.nlp import rag_tokenizer, search
  37. from rag.utils.redis_conn import RedisDistributedLock
  38. async def run_graphrag(
  39. row: dict,
  40. language,
  41. with_resolution: bool,
  42. with_community: bool,
  43. chat_model,
  44. embedding_model,
  45. callback,
  46. ):
  47. start = trio.current_time()
  48. tenant_id, kb_id, doc_id = row["tenant_id"], str(row["kb_id"]), row["doc_id"]
  49. chunks = []
  50. for d in settings.retrievaler.chunk_list(
  51. doc_id, tenant_id, [kb_id], fields=["content_with_weight", "doc_id"]
  52. ):
  53. chunks.append(d["content_with_weight"])
  54. subgraph = await generate_subgraph(
  55. LightKGExt
  56. if row["parser_config"]["graphrag"]["method"] != "general"
  57. else GeneralKGExt,
  58. tenant_id,
  59. kb_id,
  60. doc_id,
  61. chunks,
  62. language,
  63. row["parser_config"]["graphrag"]["entity_types"],
  64. chat_model,
  65. embedding_model,
  66. callback,
  67. )
  68. if not subgraph:
  69. return
  70. subgraph_nodes = set(subgraph.nodes())
  71. new_graph = await merge_subgraph(
  72. tenant_id,
  73. kb_id,
  74. doc_id,
  75. subgraph,
  76. embedding_model,
  77. callback,
  78. )
  79. assert new_graph is not None
  80. if not with_resolution or not with_community:
  81. return
  82. if with_resolution:
  83. await resolve_entities(
  84. new_graph,
  85. subgraph_nodes,
  86. tenant_id,
  87. kb_id,
  88. doc_id,
  89. chat_model,
  90. embedding_model,
  91. callback,
  92. )
  93. if with_community:
  94. await extract_community(
  95. new_graph,
  96. tenant_id,
  97. kb_id,
  98. doc_id,
  99. chat_model,
  100. embedding_model,
  101. callback,
  102. )
  103. now = trio.current_time()
  104. callback(msg=f"GraphRAG for doc {doc_id} done in {now - start:.2f} seconds.")
  105. return
  106. async def generate_subgraph(
  107. extractor: Extractor,
  108. tenant_id: str,
  109. kb_id: str,
  110. doc_id: str,
  111. chunks: list[str],
  112. language,
  113. entity_types,
  114. llm_bdl,
  115. embed_bdl,
  116. callback,
  117. ):
  118. contains = await does_graph_contains(tenant_id, kb_id, doc_id)
  119. if contains:
  120. callback(msg=f"Graph already contains {doc_id}")
  121. return None
  122. start = trio.current_time()
  123. ext = extractor(
  124. llm_bdl,
  125. language=language,
  126. entity_types=entity_types,
  127. )
  128. ents, rels = await ext(doc_id, chunks, callback)
  129. subgraph = nx.Graph()
  130. for ent in ents:
  131. assert "description" in ent, f"entity {ent} does not have description"
  132. ent["source_id"] = [doc_id]
  133. subgraph.add_node(ent["entity_name"], **ent)
  134. ignored_rels = 0
  135. for rel in rels:
  136. assert "description" in rel, f"relation {rel} does not have description"
  137. if not subgraph.has_node(rel["src_id"]) or not subgraph.has_node(rel["tgt_id"]):
  138. ignored_rels += 1
  139. continue
  140. rel["source_id"] = [doc_id]
  141. subgraph.add_edge(
  142. rel["src_id"],
  143. rel["tgt_id"],
  144. **rel,
  145. )
  146. if ignored_rels:
  147. callback(msg=f"ignored {ignored_rels} relations due to missing entities.")
  148. tidy_graph(subgraph, callback)
  149. subgraph.graph["source_id"] = [doc_id]
  150. chunk = {
  151. "content_with_weight": json.dumps(
  152. nx.node_link_data(subgraph, edges="edges"), ensure_ascii=False
  153. ),
  154. "knowledge_graph_kwd": "subgraph",
  155. "kb_id": kb_id,
  156. "source_id": [doc_id],
  157. "available_int": 0,
  158. "removed_kwd": "N",
  159. }
  160. cid = chunk_id(chunk)
  161. await trio.to_thread.run_sync(
  162. lambda: settings.docStoreConn.delete(
  163. {"knowledge_graph_kwd": "subgraph", "source_id": doc_id}, search.index_name(tenant_id), kb_id
  164. )
  165. )
  166. await trio.to_thread.run_sync(
  167. lambda: settings.docStoreConn.insert(
  168. [{"id": cid, **chunk}], search.index_name(tenant_id), kb_id
  169. )
  170. )
  171. now = trio.current_time()
  172. callback(msg=f"generated subgraph for doc {doc_id} in {now - start:.2f} seconds.")
  173. return subgraph
  174. async def merge_subgraph(
  175. tenant_id: str,
  176. kb_id: str,
  177. doc_id: str,
  178. subgraph: nx.Graph,
  179. embedding_model,
  180. callback,
  181. ):
  182. graphrag_task_lock = RedisDistributedLock(f"graphrag_task_{kb_id}", lock_value=doc_id, timeout=600)
  183. while True:
  184. if graphrag_task_lock.acquire():
  185. break
  186. callback(msg=f"merge_subgraph {doc_id} is waiting graphrag_task_lock")
  187. await trio.sleep(10)
  188. start = trio.current_time()
  189. change = GraphChange()
  190. old_graph = await get_graph(tenant_id, kb_id)
  191. if old_graph is not None:
  192. logging.info("Merge with an exiting graph...................")
  193. tidy_graph(old_graph, callback)
  194. new_graph = graph_merge(old_graph, subgraph, change)
  195. else:
  196. new_graph = subgraph
  197. change.added_updated_nodes = set(new_graph.nodes())
  198. change.added_updated_edges = set(new_graph.edges())
  199. pr = nx.pagerank(new_graph)
  200. for node_name, pagerank in pr.items():
  201. new_graph.nodes[node_name]["pagerank"] = pagerank
  202. await set_graph(tenant_id, kb_id, embedding_model, new_graph, change, callback)
  203. graphrag_task_lock.release()
  204. now = trio.current_time()
  205. callback(
  206. msg=f"merging subgraph for doc {doc_id} into the global graph done in {now - start:.2f} seconds."
  207. )
  208. return new_graph
  209. async def resolve_entities(
  210. graph,
  211. subgraph_nodes: set[str],
  212. tenant_id: str,
  213. kb_id: str,
  214. doc_id: str,
  215. llm_bdl,
  216. embed_bdl,
  217. callback,
  218. ):
  219. graphrag_task_lock = RedisDistributedLock(f"graphrag_task_{kb_id}", lock_value=doc_id, timeout=600)
  220. while True:
  221. if graphrag_task_lock.acquire():
  222. break
  223. callback(msg=f"resolve_entities {doc_id} is waiting graphrag_task_lock")
  224. await trio.sleep(10)
  225. start = trio.current_time()
  226. er = EntityResolution(
  227. llm_bdl,
  228. )
  229. reso = await er(graph, subgraph_nodes, callback=callback)
  230. graph = reso.graph
  231. change = reso.change
  232. callback(msg=f"Graph resolution removed {len(change.removed_nodes)} nodes and {len(change.removed_edges)} edges.")
  233. callback(msg="Graph resolution updated pagerank.")
  234. await set_graph(tenant_id, kb_id, embed_bdl, graph, change, callback)
  235. graphrag_task_lock.release()
  236. now = trio.current_time()
  237. callback(msg=f"Graph resolution done in {now - start:.2f}s.")
  238. async def extract_community(
  239. graph,
  240. tenant_id: str,
  241. kb_id: str,
  242. doc_id: str,
  243. llm_bdl,
  244. embed_bdl,
  245. callback,
  246. ):
  247. graphrag_task_lock = RedisDistributedLock(f"graphrag_task_{kb_id}", lock_value=doc_id, timeout=600)
  248. while True:
  249. if graphrag_task_lock.acquire():
  250. break
  251. callback(msg=f"extract_community {doc_id} is waiting graphrag_task_lock")
  252. await trio.sleep(10)
  253. start = trio.current_time()
  254. ext = CommunityReportsExtractor(
  255. llm_bdl,
  256. )
  257. cr = await ext(graph, callback=callback)
  258. community_structure = cr.structured_output
  259. community_reports = cr.output
  260. doc_ids = graph.graph["source_id"]
  261. now = trio.current_time()
  262. callback(
  263. msg=f"Graph extracted {len(cr.structured_output)} communities in {now - start:.2f}s."
  264. )
  265. start = now
  266. chunks = []
  267. for stru, rep in zip(community_structure, community_reports):
  268. obj = {
  269. "report": rep,
  270. "evidences": "\n".join([f["explanation"] for f in stru["findings"]]),
  271. }
  272. chunk = {
  273. "id": get_uuid(),
  274. "docnm_kwd": stru["title"],
  275. "title_tks": rag_tokenizer.tokenize(stru["title"]),
  276. "content_with_weight": json.dumps(obj, ensure_ascii=False),
  277. "content_ltks": rag_tokenizer.tokenize(
  278. obj["report"] + " " + obj["evidences"]
  279. ),
  280. "knowledge_graph_kwd": "community_report",
  281. "weight_flt": stru["weight"],
  282. "entities_kwd": stru["entities"],
  283. "important_kwd": stru["entities"],
  284. "kb_id": kb_id,
  285. "source_id": list(doc_ids),
  286. "available_int": 0,
  287. }
  288. chunk["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(
  289. chunk["content_ltks"]
  290. )
  291. chunks.append(chunk)
  292. await trio.to_thread.run_sync(
  293. lambda: settings.docStoreConn.delete(
  294. {"knowledge_graph_kwd": "community_report", "kb_id": kb_id},
  295. search.index_name(tenant_id),
  296. kb_id,
  297. )
  298. )
  299. es_bulk_size = 4
  300. for b in range(0, len(chunks), es_bulk_size):
  301. doc_store_result = await trio.to_thread.run_sync(lambda: settings.docStoreConn.insert(chunks[b:b + es_bulk_size], search.index_name(tenant_id), kb_id))
  302. if doc_store_result:
  303. error_message = f"Insert chunk error: {doc_store_result}, please check log file and Elasticsearch/Infinity status!"
  304. raise Exception(error_message)
  305. graphrag_task_lock.release()
  306. now = trio.current_time()
  307. callback(
  308. msg=f"Graph indexed {len(cr.structured_output)} communities in {now - start:.2f}s."
  309. )
  310. return community_structure, community_reports