您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

tts_model.py 6.0KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import requests
  17. from typing import Annotated, Literal
  18. from abc import ABC
  19. import httpx
  20. import ormsgpack
  21. from pydantic import BaseModel, conint
  22. from rag.utils import num_tokens_from_string
  23. import json
  24. import re
  25. import time
  26. class ServeReferenceAudio(BaseModel):
  27. audio: bytes
  28. text: str
  29. class ServeTTSRequest(BaseModel):
  30. text: str
  31. chunk_length: Annotated[int, conint(ge=100, le=300, strict=True)] = 200
  32. # Audio format
  33. format: Literal["wav", "pcm", "mp3"] = "mp3"
  34. mp3_bitrate: Literal[64, 128, 192] = 128
  35. # References audios for in-context learning
  36. references: list[ServeReferenceAudio] = []
  37. # Reference id
  38. # For example, if you want use https://fish.audio/m/7f92f8afb8ec43bf81429cc1c9199cb1/
  39. # Just pass 7f92f8afb8ec43bf81429cc1c9199cb1
  40. reference_id: str | None = None
  41. # Normalize text for en & zh, this increase stability for numbers
  42. normalize: bool = True
  43. # Balance mode will reduce latency to 300ms, but may decrease stability
  44. latency: Literal["normal", "balanced"] = "normal"
  45. class Base(ABC):
  46. def __init__(self, key, model_name, base_url):
  47. pass
  48. def tts(self, audio):
  49. pass
  50. def normalize_text(self, text):
  51. return re.sub(r'(\*\*|##\d+\$\$|#)', '', text)
  52. class FishAudioTTS(Base):
  53. def __init__(self, key, model_name, base_url="https://api.fish.audio/v1/tts"):
  54. if not base_url:
  55. base_url = "https://api.fish.audio/v1/tts"
  56. key = json.loads(key)
  57. self.headers = {
  58. "api-key": key.get("fish_audio_ak"),
  59. "content-type": "application/msgpack",
  60. }
  61. self.ref_id = key.get("fish_audio_refid")
  62. self.base_url = base_url
  63. def tts(self, text):
  64. from http import HTTPStatus
  65. text = self.normalize_text(text)
  66. request = ServeTTSRequest(text=text, reference_id=self.ref_id)
  67. with httpx.Client() as client:
  68. try:
  69. with client.stream(
  70. method="POST",
  71. url=self.base_url,
  72. content=ormsgpack.packb(
  73. request, option=ormsgpack.OPT_SERIALIZE_PYDANTIC
  74. ),
  75. headers=self.headers,
  76. timeout=None,
  77. ) as response:
  78. if response.status_code == HTTPStatus.OK:
  79. for chunk in response.iter_bytes():
  80. yield chunk
  81. else:
  82. response.raise_for_status()
  83. yield num_tokens_from_string(text)
  84. except httpx.HTTPStatusError as e:
  85. raise RuntimeError(f"**ERROR**: {e}")
  86. class QwenTTS(Base):
  87. def __init__(self, key, model_name, base_url=""):
  88. import dashscope
  89. self.model_name = model_name
  90. dashscope.api_key = key
  91. def tts(self, text):
  92. from dashscope.api_entities.dashscope_response import SpeechSynthesisResponse
  93. from dashscope.audio.tts import ResultCallback, SpeechSynthesizer, SpeechSynthesisResult
  94. from collections import deque
  95. class Callback(ResultCallback):
  96. def __init__(self) -> None:
  97. self.dque = deque()
  98. def _run(self):
  99. while True:
  100. if not self.dque:
  101. time.sleep(0)
  102. continue
  103. val = self.dque.popleft()
  104. if val:
  105. yield val
  106. else:
  107. break
  108. def on_open(self):
  109. pass
  110. def on_complete(self):
  111. self.dque.append(None)
  112. def on_error(self, response: SpeechSynthesisResponse):
  113. raise RuntimeError(str(response))
  114. def on_close(self):
  115. pass
  116. def on_event(self, result: SpeechSynthesisResult):
  117. if result.get_audio_frame() is not None:
  118. self.dque.append(result.get_audio_frame())
  119. text = self.normalize_text(text)
  120. callback = Callback()
  121. SpeechSynthesizer.call(model=self.model_name,
  122. text=text,
  123. callback=callback,
  124. format="mp3")
  125. try:
  126. for data in callback._run():
  127. yield data
  128. yield num_tokens_from_string(text)
  129. except Exception as e:
  130. raise RuntimeError(f"**ERROR**: {e}")
  131. class OpenAITTS(Base):
  132. def __init__(self, key, model_name="tts-1", base_url="https://api.openai.com/v1"):
  133. if not base_url: base_url="https://api.openai.com/v1"
  134. self.api_key = key
  135. self.model_name = model_name
  136. self.base_url = base_url
  137. self.headers = {
  138. "Authorization": f"Bearer {self.api_key}",
  139. "Content-Type": "application/json"
  140. }
  141. def tts(self, text, voice="alloy"):
  142. text = self.normalize_text(text)
  143. payload = {
  144. "model": self.model_name,
  145. "voice": voice,
  146. "input": text
  147. }
  148. response = requests.post(f"{self.base_url}/audio/speech", headers=self.headers, json=payload, stream=True)
  149. if response.status_code != 200:
  150. raise Exception(f"**Error**: {response.status_code}, {response.text}")
  151. for chunk in response.iter_content():
  152. if chunk:
  153. yield chunk