Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. # Copyright (c) 2024 Microsoft Corporation.
  2. # Licensed under the MIT License
  3. """
  4. Reference:
  5. - [graphrag](https://github.com/microsoft/graphrag)
  6. """
  7. import re
  8. from typing import Any, Callable
  9. from dataclasses import dataclass
  10. from graphrag.general.extractor import Extractor, ENTITY_EXTRACTION_MAX_GLEANINGS
  11. from graphrag.light.graph_prompt import PROMPTS
  12. from graphrag.utils import pack_user_ass_to_openai_messages, split_string_by_multi_markers, chat_limiter
  13. from rag.llm.chat_model import Base as CompletionLLM
  14. import networkx as nx
  15. from rag.utils import num_tokens_from_string
  16. import trio
  17. @dataclass
  18. class GraphExtractionResult:
  19. """Unipartite graph extraction result class definition."""
  20. output: nx.Graph
  21. source_docs: dict[Any, Any]
  22. class GraphExtractor(Extractor):
  23. _max_gleanings: int
  24. def __init__(
  25. self,
  26. llm_invoker: CompletionLLM,
  27. language: str | None = "English",
  28. entity_types: list[str] | None = None,
  29. get_entity: Callable | None = None,
  30. set_entity: Callable | None = None,
  31. get_relation: Callable | None = None,
  32. set_relation: Callable | None = None,
  33. example_number: int = 2,
  34. max_gleanings: int | None = None,
  35. ):
  36. super().__init__(llm_invoker, language, entity_types, get_entity, set_entity, get_relation, set_relation)
  37. """Init method definition."""
  38. self._max_gleanings = (
  39. max_gleanings
  40. if max_gleanings is not None
  41. else ENTITY_EXTRACTION_MAX_GLEANINGS
  42. )
  43. self._example_number = example_number
  44. examples = "\n".join(
  45. PROMPTS["entity_extraction_examples"][: int(self._example_number)]
  46. )
  47. example_context_base = dict(
  48. tuple_delimiter=PROMPTS["DEFAULT_TUPLE_DELIMITER"],
  49. record_delimiter=PROMPTS["DEFAULT_RECORD_DELIMITER"],
  50. completion_delimiter=PROMPTS["DEFAULT_COMPLETION_DELIMITER"],
  51. entity_types=",".join(self._entity_types),
  52. language=self._language,
  53. )
  54. # add example's format
  55. examples = examples.format(**example_context_base)
  56. self._entity_extract_prompt = PROMPTS["entity_extraction"]
  57. self._context_base = dict(
  58. tuple_delimiter=PROMPTS["DEFAULT_TUPLE_DELIMITER"],
  59. record_delimiter=PROMPTS["DEFAULT_RECORD_DELIMITER"],
  60. completion_delimiter=PROMPTS["DEFAULT_COMPLETION_DELIMITER"],
  61. entity_types=",".join(self._entity_types),
  62. examples=examples,
  63. language=self._language,
  64. )
  65. self._continue_prompt = PROMPTS["entiti_continue_extraction"]
  66. self._if_loop_prompt = PROMPTS["entiti_if_loop_extraction"]
  67. self._left_token_count = llm_invoker.max_length - num_tokens_from_string(
  68. self._entity_extract_prompt.format(
  69. **self._context_base, input_text="{input_text}"
  70. ).format(**self._context_base, input_text="")
  71. )
  72. self._left_token_count = max(llm_invoker.max_length * 0.6, self._left_token_count)
  73. async def _process_single_content(self, chunk_key_dp: tuple[str, str], chunk_seq: int, num_chunks: int, out_results):
  74. token_count = 0
  75. chunk_key = chunk_key_dp[0]
  76. content = chunk_key_dp[1]
  77. hint_prompt = self._entity_extract_prompt.format(
  78. **self._context_base, input_text="{input_text}"
  79. ).format(**self._context_base, input_text=content)
  80. gen_conf = {"temperature": 0.8}
  81. async with chat_limiter:
  82. final_result = await trio.to_thread.run_sync(lambda: self._chat(hint_prompt, [{"role": "user", "content": "Output:"}], gen_conf))
  83. token_count += num_tokens_from_string(hint_prompt + final_result)
  84. history = pack_user_ass_to_openai_messages("Output:", final_result, self._continue_prompt)
  85. for now_glean_index in range(self._max_gleanings):
  86. async with chat_limiter:
  87. glean_result = await trio.to_thread.run_sync(lambda: self._chat(hint_prompt, history, gen_conf))
  88. history.extend([{"role": "assistant", "content": glean_result}, {"role": "user", "content": self._continue_prompt}])
  89. token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + hint_prompt + self._continue_prompt)
  90. final_result += glean_result
  91. if now_glean_index == self._max_gleanings - 1:
  92. break
  93. async with chat_limiter:
  94. if_loop_result = await trio.to_thread.run_sync(lambda: self._chat(self._if_loop_prompt, history, gen_conf))
  95. token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + if_loop_result + self._if_loop_prompt)
  96. if_loop_result = if_loop_result.strip().strip('"').strip("'").lower()
  97. if if_loop_result != "yes":
  98. break
  99. records = split_string_by_multi_markers(
  100. final_result,
  101. [self._context_base["record_delimiter"], self._context_base["completion_delimiter"]],
  102. )
  103. rcds = []
  104. for record in records:
  105. record = re.search(r"\((.*)\)", record)
  106. if record is None:
  107. continue
  108. rcds.append(record.group(1))
  109. records = rcds
  110. maybe_nodes, maybe_edges = self._entities_and_relations(chunk_key, records, self._context_base["tuple_delimiter"])
  111. out_results.append((maybe_nodes, maybe_edges, token_count))
  112. if self.callback:
  113. self.callback(0.5+0.1*len(out_results)/num_chunks, msg = f"Entities extraction of chunk {chunk_seq} {len(out_results)}/{num_chunks} done, {len(maybe_nodes)} nodes, {len(maybe_edges)} edges, {token_count} tokens.")