|
|
|
@@ -142,6 +142,7 @@ class GraphExtractor: |
|
|
|
total_token_count += token_count |
|
|
|
if callback: callback(msg=f"{doc_index+1}/{total}, elapsed: {timer() - st}s, used tokens: {total_token_count}") |
|
|
|
except Exception as e: |
|
|
|
if callback: callback("Knowledge graph extraction error:{}".format(str(e))) |
|
|
|
logging.exception("error extracting graph") |
|
|
|
self._on_error( |
|
|
|
e, |
|
|
|
@@ -184,6 +185,7 @@ class GraphExtractor: |
|
|
|
text = perform_variable_replacements(CONTINUE_PROMPT, history=history, variables=variables) |
|
|
|
history.append({"role": "user", "content": text}) |
|
|
|
response = self._llm.chat("", history, gen_conf) |
|
|
|
if response.find("**ERROR**") >=0: raise Exception(response) |
|
|
|
results += response or "" |
|
|
|
|
|
|
|
# if this is the final glean, don't bother updating the continuation flag |