Browse Source

Fix: miss calculate of token number. (#6401)

### What problem does this PR solve?

#6308

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
tags/v0.18.0
Kevin Hu 7 months ago
parent
commit
8b7e53e643
No account linked to committer's email address
1 changed files with 1 additions and 1 deletions
  1. 1
    1
      graphrag/general/extractor.py

+ 1
- 1
graphrag/general/extractor.py View File

@@ -59,7 +59,7 @@ class Extractor:
response = get_llm_cache(self._llm.llm_name, system, hist, conf)
if response:
return response
_, system_msg = message_fit_in([{"role": "system", "content": system}], int(self._llm.max_length * 0.97))
_, system_msg = message_fit_in([{"role": "system", "content": system}], int(self._llm.max_length * 0.92))
response = self._llm.chat(system_msg[0]["content"], hist, conf)
response = re.sub(r"<think>.*</think>", "", response, flags=re.DOTALL)
if response.find("**ERROR**") >= 0:

Loading…
Cancel
Save