### What problem does this PR solve? #5944 ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue)tags/v0.17.2
| @@ -338,8 +338,6 @@ def list_app(): | |||
| llm_set = set([m["llm_name"] + "@" + m["fid"] for m in llms]) | |||
| for o in objs: | |||
| if not o.api_key: | |||
| continue | |||
| if o.llm_name + "@" + o.llm_factory in llm_set: | |||
| continue | |||
| llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True}) | |||
| @@ -287,12 +287,6 @@ def chat_completion_openai_like(tenant_id, chat_id): | |||
| answer = ans["answer"] | |||
| incremental = answer[should_split_index:] | |||
| token_used += len(incremental) | |||
| """ | |||
| bugfix: When calling the Create chat completion API, the response data is incoherent. | |||
| bug code: token_used += len(incremental) | |||
| fix author: 任奇 | |||
| """ | |||
| if incremental.endswith("</think>"): | |||
| response_data_len = len(incremental.rstrip("</think>")) | |||
| else: | |||