| return combined_text_output | return combined_text_output | ||||
| else: | else: | ||||
| completion = model.chat(prompt=prompt, generate_config=generate_config) | completion = model.chat(prompt=prompt, generate_config=generate_config) | ||||
| return completion["choices"][0]["text"] | |||||
| return completion["choices"][0]["message"]["content"] | |||||
| elif isinstance(model, RESTfulGenerateModelHandle): | elif isinstance(model, RESTfulGenerateModelHandle): | ||||
| generate_config: "LlamaCppGenerateConfig" = kwargs.get("generate_config", {}) | generate_config: "LlamaCppGenerateConfig" = kwargs.get("generate_config", {}) | ||||
| completion = combined_text_output | completion = combined_text_output | ||||
| else: | else: | ||||
| completion = model.chat(prompt=prompt, generate_config=generate_config) | completion = model.chat(prompt=prompt, generate_config=generate_config) | ||||
| completion = completion["choices"][0]["text"] | |||||
| completion = completion["choices"][0]["message"]["content"] | |||||
| if stop is not None: | if stop is not None: | ||||
| completion = enforce_stop_tokens(completion, stop) | completion = enforce_stop_tokens(completion, stop) |