### What problem does this PR solve? https://github.com/infiniflow/ragflow/issues/9177 The reason should be due to the gemin internal use a different parameter name ` max_output_tokens (int): Optional. The maximum number of tokens to include in a response candidate. Note: The default value varies by model, see the ``Model.output_token_limit`` attribute of the ``Model`` returned from the ``getModel`` function. This field is a member of `oneof`_ ``_max_output_tokens``. ` ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue)tags/v0.20.1
| @@ -1075,6 +1075,9 @@ class GeminiChat(Base): | |||
| for k in list(gen_conf.keys()): | |||
| if k not in ["temperature", "top_p", "max_tokens"]: | |||
| del gen_conf[k] | |||
| # if max_tokens exists, rename it to max_output_tokens to match Gemini's API | |||
| if k == "max_tokens": | |||
| gen_conf["max_output_tokens"] = gen_conf.pop("max_tokens") | |||
| return gen_conf | |||
| def _chat(self, history, gen_conf={}, **kwargs): | |||