Browse Source

fix: xinference chat completion error (#952)

tags/0.3.15
takatost 2 years ago
parent
commit
4f3053a8cc
No account linked to committer's email address
1 changed files with 2 additions and 2 deletions
  1. 2
    2
      api/core/third_party/langchain/llms/xinference_llm.py

+ 2
- 2
api/core/third_party/langchain/llms/xinference_llm.py View File

return combined_text_output return combined_text_output
else: else:
completion = model.chat(prompt=prompt, generate_config=generate_config) completion = model.chat(prompt=prompt, generate_config=generate_config)
return completion["choices"][0]["text"]
return completion["choices"][0]["message"]["content"]
elif isinstance(model, RESTfulGenerateModelHandle): elif isinstance(model, RESTfulGenerateModelHandle):
generate_config: "LlamaCppGenerateConfig" = kwargs.get("generate_config", {}) generate_config: "LlamaCppGenerateConfig" = kwargs.get("generate_config", {})


completion = combined_text_output completion = combined_text_output
else: else:
completion = model.chat(prompt=prompt, generate_config=generate_config) completion = model.chat(prompt=prompt, generate_config=generate_config)
completion = completion["choices"][0]["text"]
completion = completion["choices"][0]["message"]["content"]


if stop is not None: if stop is not None:
completion = enforce_stop_tokens(completion, stop) completion = enforce_stop_tokens(completion, stop)

Loading…
Cancel
Save