| if user: | if user: | ||||
| extra_model_kwargs["user"] = user | extra_model_kwargs["user"] = user | ||||
| # clear illegal prompt messages | |||||
| prompt_messages = self._clear_illegal_prompt_messages(model, prompt_messages) | |||||
| block_as_stream = False | |||||
| if model.startswith("o1"): | |||||
| if stream: | |||||
| block_as_stream = True | |||||
| stream = False | |||||
| if "stream_options" in extra_model_kwargs: | |||||
| del extra_model_kwargs["stream_options"] | |||||
| if "stop" in extra_model_kwargs: | |||||
| del extra_model_kwargs["stop"] | |||||
| # chat model | |||||
| response = client.chat.completions.create( | |||||
| messages=[self._convert_prompt_message_to_dict(m) for m in prompt_messages], | |||||
| model=model, | |||||
| stream=stream, | |||||
| **model_parameters, | |||||
| **extra_model_kwargs, | |||||
| ) | |||||
| # clear illegal prompt messages | |||||
| prompt_messages = self._clear_illegal_prompt_messages(model, prompt_messages) | |||||
| block_as_stream = False | |||||
| if model.startswith("o1"): | |||||
| if stream: | if stream: | ||||
| return self._handle_chat_generate_stream_response(model, credentials, response, prompt_messages, tools) | |||||
| block_as_stream = True | |||||
| stream = False | |||||
| if "stream_options" in extra_model_kwargs: | |||||
| del extra_model_kwargs["stream_options"] | |||||
| if "stop" in extra_model_kwargs: | |||||
| del extra_model_kwargs["stop"] | |||||
| # chat model | |||||
| response = client.chat.completions.create( | |||||
| messages=[self._convert_prompt_message_to_dict(m) for m in prompt_messages], | |||||
| model=model, | |||||
| stream=stream, | |||||
| **model_parameters, | |||||
| **extra_model_kwargs, | |||||
| ) | |||||
| if stream: | |||||
| return self._handle_chat_generate_stream_response(model, credentials, response, prompt_messages, tools) | |||||
| block_result = self._handle_chat_generate_response(model, credentials, response, prompt_messages, tools) | |||||
| block_result = self._handle_chat_generate_response(model, credentials, response, prompt_messages, tools) | |||||
| if block_as_stream: | |||||
| return self._handle_chat_block_as_stream_response(block_result, prompt_messages, stop) | |||||
| if block_as_stream: | |||||
| return self._handle_chat_block_as_stream_response(block_result, prompt_messages, stop) | |||||
| return block_result | |||||
| return block_result | |||||
| def _handle_chat_block_as_stream_response( | def _handle_chat_block_as_stream_response( | ||||
| self, | self, |