Kaynağa Gözat

fix: #18132 when deepseek llm model, auto_generate name can't work (#18646)

Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
tags/1.4.1
cooper.wu 5 ay önce
ebeveyn
işleme
2cad98f01f
No account linked to committer's email address
1 değiştirilmiş dosya ile 7 ekleme ve 3 silme
  1. 7
    3
      api/core/llm_generator/llm_generator.py

+ 7
- 3
api/core/llm_generator/llm_generator.py Dosyayı Görüntüle

@@ -51,15 +51,19 @@ class LLMGenerator:
response = cast(
LLMResult,
model_instance.invoke_llm(
prompt_messages=list(prompts), model_parameters={"max_tokens": 100, "temperature": 1}, stream=False
prompt_messages=list(prompts), model_parameters={"max_tokens": 500, "temperature": 1}, stream=False
),
)
answer = cast(str, response.message.content)
cleaned_answer = re.sub(r"^.*(\{.*\}).*$", r"\1", answer, flags=re.DOTALL)
if cleaned_answer is None:
return ""
result_dict = json.loads(cleaned_answer)
answer = result_dict["Your Output"]
try:
result_dict = json.loads(cleaned_answer)
answer = result_dict["Your Output"]
except json.JSONDecodeError as e:
logging.exception("Failed to generate name after answer, use query instead")
answer = query
name = answer.strip()

if len(name) > 75:

Loading…
İptal
Kaydet