### What problem does this PR solve? ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue)tags/v0.12.0
| def stream_audio(): | def stream_audio(): | ||||
| try: | try: | ||||
| for chunk in tts_mdl.tts(text): | |||||
| yield chunk | |||||
| for txt in re.split(r"[,。/《》?;:!\n\r:;]+", text): | |||||
| for chunk in tts_mdl.tts(txt): | |||||
| yield chunk | |||||
| except Exception as e: | except Exception as e: | ||||
| yield ("data:" + json.dumps({"retcode": 500, "retmsg": str(e), | yield ("data:" + json.dumps({"retcode": 500, "retmsg": str(e), | ||||
| "data": {"answer": "**ERROR**: " + str(e)}}, | "data": {"answer": "**ERROR**: " + str(e)}}, |
| if msg: | if msg: | ||||
| return get_data_error_result(retmsg=msg) | return get_data_error_result(retmsg=msg) | ||||
| llm = { | |||||
| llm_config = { | |||||
| "api_key": req["api_key"], | "api_key": req["api_key"], | ||||
| "api_base": req.get("base_url", "") | "api_base": req.get("base_url", "") | ||||
| } | } | ||||
| for n in ["model_type", "llm_name"]: | for n in ["model_type", "llm_name"]: | ||||
| if n in req: | if n in req: | ||||
| llm[n] = req[n] | |||||
| llm_config[n] = req[n] | |||||
| if not TenantLLMService.filter_update( | |||||
| [TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory], llm): | |||||
| for llm in LLMService.query(fid=factory): | |||||
| for llm in LLMService.query(fid=factory): | |||||
| if not TenantLLMService.filter_update( | |||||
| [TenantLLM.tenant_id == current_user.id, | |||||
| TenantLLM.llm_factory == factory, | |||||
| TenantLLM.llm_name == llm.llm_name], | |||||
| llm_config): | |||||
| TenantLLMService.save( | TenantLLMService.save( | ||||
| tenant_id=current_user.id, | tenant_id=current_user.id, | ||||
| llm_factory=factory, | llm_factory=factory, | ||||
| llm_name=llm.llm_name, | llm_name=llm.llm_name, | ||||
| model_type=llm.model_type, | model_type=llm.model_type, | ||||
| api_key=req["api_key"], | |||||
| api_base=req.get("base_url", "") | |||||
| api_key=llm_config["api_key"], | |||||
| api_base=llm_config["api_base"] | |||||
| ) | ) | ||||
| return get_json_result(data=True) | return get_json_result(data=True) |
| class OpenAITTS(Base): | class OpenAITTS(Base): | ||||
| def __init__(self, key, model_name="tts-1", base_url="https://api.openai.com/v1"): | def __init__(self, key, model_name="tts-1", base_url="https://api.openai.com/v1"): | ||||
| if not base_url: base_url="https://api.openai.com/v1" | |||||
| self.api_key = key | self.api_key = key | ||||
| self.model_name = model_name | self.model_name = model_name | ||||
| self.base_url = base_url | self.base_url = base_url | ||||
| if response.status_code != 200: | if response.status_code != 200: | ||||
| raise Exception(f"**Error**: {response.status_code}, {response.text}") | raise Exception(f"**Error**: {response.status_code}, {response.text}") | ||||
| for chunk in response.iter_content(chunk_size=1024): | |||||
| for chunk in response.iter_content(): | |||||
| if chunk: | if chunk: | ||||
| yield chunk | yield chunk |