Pārlūkot izejas kodu

Fix graphrag : "role" user (#2273)

### What problem does this PR solve?

#2270 

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
tags/v0.11.0
H pirms 1 gada
vecāks
revīzija
c6e723f2ee
Revīzijas autora e-pasta adrese nav piesaistīta nevienam kontam

+ 2
- 2
agent/component/jin10.py Parādīt failu

@@ -100,8 +100,8 @@ class Jin10(ComponentBase, ABC):
if self._param.symbols_datatype == "quotes":
for i in response['data']:
i['Selling Price'] = i['a']
i['buying price'] = i['b']
i['commodity code'] = i['c']
i['Buying Price'] = i['b']
i['Commodity Code'] = i['c']
i['Stock Exchange'] = i['e']
i['Highest Price'] = i['h']
i['Yesterday’s Closing Price'] = i['hc']

+ 1
- 1
graphrag/claim_extractor.py Parādīt failu

@@ -170,7 +170,7 @@ class ClaimExtractor:
}
text = perform_variable_replacements(self._extraction_prompt, variables=variables)
gen_conf = {"temperature": 0.5}
results = self._llm.chat(text, [], gen_conf)
results = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
claims = results.strip().removesuffix(completion_delimiter)
history = [{"role": "system", "content": text}, {"role": "assistant", "content": results}]


+ 3
- 2
graphrag/community_reports_extractor.py Parādīt failu

@@ -76,7 +76,7 @@ class CommunityReportsExtractor:
text = perform_variable_replacements(self._extraction_prompt, variables=prompt_variables)
gen_conf = {"temperature": 0.3}
try:
response = self._llm.chat(text, [], gen_conf)
response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
token_count += num_tokens_from_string(text + response)
response = re.sub(r"^[^\{]*", "", response)
response = re.sub(r"[^\}]*$", "", response)
@@ -125,4 +125,5 @@ class CommunityReportsExtractor:
report_sections = "\n\n".join(
f"## {finding_summary(f)}\n\n{finding_explanation(f)}" for f in findings
)
return f"# {title}\n\n{summary}\n\n{report_sections}"
return f"# {title}\n\n{summary}\n\n{report_sections}"

+ 1
- 1
graphrag/entity_resolution.py Parādīt failu

@@ -125,7 +125,7 @@ class EntityResolution:
}
text = perform_variable_replacements(self._resolution_prompt, variables=variables)

response = self._llm.chat(text, [], gen_conf)
response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
result = self._process_results(len(candidate_resolution_i[1]), response,
prompt_variables.get(self._record_delimiter_key,
DEFAULT_RECORD_DELIMITER),

+ 1
- 1
graphrag/graph_extractor.py Parādīt failu

@@ -163,7 +163,7 @@ class GraphExtractor:
token_count = 0
text = perform_variable_replacements(self._extraction_prompt, variables=variables)
gen_conf = {"temperature": 0.3}
response = self._llm.chat(text, [], gen_conf)
response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
token_count = num_tokens_from_string(text + response)

results = response or ""

+ 1
- 1
graphrag/mind_map_extractor.py Parādīt failu

@@ -180,7 +180,7 @@ class MindMapExtractor:
}
text = perform_variable_replacements(self._mind_map_prompt, variables=variables)
gen_conf = {"temperature": 0.5}
response = self._llm.chat(text, [], gen_conf)
response = self._llm.chat(text, [{"role": "user", "content": "Output:"}], gen_conf)
response = re.sub(r"```[^\n]*", "", response)
print(response)
print("---------------------------------------------------\n", self._todict(markdown_to_json.dictify(response)))

Notiek ielāde…
Atcelt
Saglabāt