Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.

init_data.py 7.7KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import time
  17. import uuid
  18. from api.db import LLMType, UserTenantRole
  19. from api.db.db_models import init_database_tables as init_web_db
  20. from api.db.services import UserService
  21. from api.db.services.llm_service import LLMFactoriesService, LLMService, TenantLLMService, LLMBundle
  22. from api.db.services.user_service import TenantService, UserTenantService
  23. from api.settings import CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, LLM_FACTORY, API_KEY
  24. def init_superuser():
  25. user_info = {
  26. "id": uuid.uuid1().hex,
  27. "password": "admin",
  28. "nickname": "admin",
  29. "is_superuser": True,
  30. "email": "kai.hu@infiniflow.org",
  31. "creator": "system",
  32. "status": "1",
  33. }
  34. tenant = {
  35. "id": user_info["id"],
  36. "name": user_info["nickname"] + "‘s Kingdom",
  37. "llm_id": CHAT_MDL,
  38. "embd_id": EMBEDDING_MDL,
  39. "asr_id": ASR_MDL,
  40. "parser_ids": PARSERS,
  41. "img2txt_id": IMAGE2TEXT_MDL
  42. }
  43. usr_tenant = {
  44. "tenant_id": user_info["id"],
  45. "user_id": user_info["id"],
  46. "invited_by": user_info["id"],
  47. "role": UserTenantRole.OWNER
  48. }
  49. tenant_llm = []
  50. for llm in LLMService.query(fid=LLM_FACTORY):
  51. tenant_llm.append(
  52. {"tenant_id": user_info["id"], "llm_factory": LLM_FACTORY, "llm_name": llm.llm_name, "model_type": llm.model_type,
  53. "api_key": API_KEY})
  54. if not UserService.save(**user_info):
  55. print("【ERROR】can't init admin.")
  56. return
  57. TenantService.save(**tenant)
  58. UserTenantService.save(**usr_tenant)
  59. TenantLLMService.insert_many(tenant_llm)
  60. UserService.save(**user_info)
  61. chat_mdl = LLMBundle(tenant["id"], LLMType.CHAT, tenant["llm_id"])
  62. msg = chat_mdl.chat(system="", history=[{"role": "user", "content": "Hello!"}], gen_conf={})
  63. if msg.find("ERROR: ") == 0:
  64. print("【ERROR】: '{}' dosen't work. {}".format(tenant["llm_id"]), msg)
  65. embd_mdl = LLMBundle(tenant["id"], LLMType.CHAT, tenant["embd_id"])
  66. v,c = embd_mdl.encode(["Hello!"])
  67. if c == 0:
  68. print("【ERROR】: '{}' dosen't work...".format(tenant["embd_id"]))
  69. def init_llm_factory():
  70. factory_infos = [{
  71. "name": "OpenAI",
  72. "logo": "",
  73. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  74. "status": "1",
  75. },{
  76. "name": "通义千问",
  77. "logo": "",
  78. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  79. "status": "1",
  80. },{
  81. "name": "智普AI",
  82. "logo": "",
  83. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  84. "status": "1",
  85. },{
  86. "name": "文心一言",
  87. "logo": "",
  88. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  89. "status": "1",
  90. },
  91. ]
  92. llm_infos = [
  93. # ---------------------- OpenAI ------------------------
  94. {
  95. "fid": factory_infos[0]["name"],
  96. "llm_name": "gpt-3.5-turbo",
  97. "tags": "LLM,CHAT,4K",
  98. "max_tokens": 4096,
  99. "model_type": LLMType.CHAT.value
  100. },{
  101. "fid": factory_infos[0]["name"],
  102. "llm_name": "gpt-3.5-turbo-16k-0613",
  103. "tags": "LLM,CHAT,16k",
  104. "max_tokens": 16385,
  105. "model_type": LLMType.CHAT.value
  106. },{
  107. "fid": factory_infos[0]["name"],
  108. "llm_name": "text-embedding-ada-002",
  109. "tags": "TEXT EMBEDDING,8K",
  110. "max_tokens": 8191,
  111. "model_type": LLMType.EMBEDDING.value
  112. },{
  113. "fid": factory_infos[0]["name"],
  114. "llm_name": "whisper-1",
  115. "tags": "SPEECH2TEXT",
  116. "max_tokens": 25*1024*1024,
  117. "model_type": LLMType.SPEECH2TEXT.value
  118. },{
  119. "fid": factory_infos[0]["name"],
  120. "llm_name": "gpt-4",
  121. "tags": "LLM,CHAT,8K",
  122. "max_tokens": 8191,
  123. "model_type": LLMType.CHAT.value
  124. },{
  125. "fid": factory_infos[0]["name"],
  126. "llm_name": "gpt-4-32k",
  127. "tags": "LLM,CHAT,32K",
  128. "max_tokens": 32768,
  129. "model_type": LLMType.CHAT.value
  130. },{
  131. "fid": factory_infos[0]["name"],
  132. "llm_name": "gpt-4-vision-preview",
  133. "tags": "LLM,CHAT,IMAGE2TEXT",
  134. "max_tokens": 765,
  135. "model_type": LLMType.IMAGE2TEXT.value
  136. },
  137. # ----------------------- Qwen -----------------------
  138. {
  139. "fid": factory_infos[1]["name"],
  140. "llm_name": "qwen-turbo",
  141. "tags": "LLM,CHAT,8K",
  142. "max_tokens": 8191,
  143. "model_type": LLMType.CHAT.value
  144. },{
  145. "fid": factory_infos[1]["name"],
  146. "llm_name": "qwen-plus",
  147. "tags": "LLM,CHAT,32K",
  148. "max_tokens": 32768,
  149. "model_type": LLMType.CHAT.value
  150. },{
  151. "fid": factory_infos[1]["name"],
  152. "llm_name": "text-embedding-v2",
  153. "tags": "TEXT EMBEDDING,2K",
  154. "max_tokens": 2048,
  155. "model_type": LLMType.EMBEDDING.value
  156. },{
  157. "fid": factory_infos[1]["name"],
  158. "llm_name": "paraformer-realtime-8k-v1",
  159. "tags": "SPEECH2TEXT",
  160. "max_tokens": 25*1024*1024,
  161. "model_type": LLMType.SPEECH2TEXT.value
  162. },{
  163. "fid": factory_infos[1]["name"],
  164. "llm_name": "qwen-vl-max",
  165. "tags": "LLM,CHAT,IMAGE2TEXT",
  166. "max_tokens": 765,
  167. "model_type": LLMType.IMAGE2TEXT.value
  168. },
  169. # ---------------------- ZhipuAI ----------------------
  170. {
  171. "fid": factory_infos[2]["name"],
  172. "llm_name": "glm-3-turbo",
  173. "tags": "LLM,CHAT,",
  174. "max_tokens": 128 * 1000,
  175. "model_type": LLMType.CHAT.value
  176. }, {
  177. "fid": factory_infos[2]["name"],
  178. "llm_name": "glm-4",
  179. "tags": "LLM,CHAT,",
  180. "max_tokens": 128 * 1000,
  181. "model_type": LLMType.CHAT.value
  182. }, {
  183. "fid": factory_infos[2]["name"],
  184. "llm_name": "glm-4v",
  185. "tags": "LLM,CHAT,IMAGE2TEXT",
  186. "max_tokens": 2000,
  187. "model_type": LLMType.IMAGE2TEXT.value
  188. },
  189. {
  190. "fid": factory_infos[2]["name"],
  191. "llm_name": "embedding-2",
  192. "tags": "TEXT EMBEDDING",
  193. "max_tokens": 512,
  194. "model_type": LLMType.SPEECH2TEXT.value
  195. },
  196. ]
  197. for info in factory_infos:
  198. LLMFactoriesService.save(**info)
  199. for info in llm_infos:
  200. LLMService.save(**info)
  201. def init_web_data():
  202. start_time = time.time()
  203. if not LLMService.get_all().count():init_llm_factory()
  204. if not UserService.get_all().count():
  205. init_superuser()
  206. print("init web data success:{}".format(time.time() - start_time))
  207. if __name__ == '__main__':
  208. init_web_db()
  209. init_web_data()