Você não pode selecionar mais de 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

init_data.py 9.9KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import time
  17. import uuid
  18. from api.db import LLMType, UserTenantRole
  19. from api.db.db_models import init_database_tables as init_web_db
  20. from api.db.services import UserService
  21. from api.db.services.llm_service import LLMFactoriesService, LLMService, TenantLLMService, LLMBundle
  22. from api.db.services.user_service import TenantService, UserTenantService
  23. from api.settings import CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, LLM_FACTORY, API_KEY
  24. def init_superuser():
  25. user_info = {
  26. "id": uuid.uuid1().hex,
  27. "password": "admin",
  28. "nickname": "admin",
  29. "is_superuser": True,
  30. "email": "admin@ragflow.io",
  31. "creator": "system",
  32. "status": "1",
  33. }
  34. tenant = {
  35. "id": user_info["id"],
  36. "name": user_info["nickname"] + "‘s Kingdom",
  37. "llm_id": CHAT_MDL,
  38. "embd_id": EMBEDDING_MDL,
  39. "asr_id": ASR_MDL,
  40. "parser_ids": PARSERS,
  41. "img2txt_id": IMAGE2TEXT_MDL
  42. }
  43. usr_tenant = {
  44. "tenant_id": user_info["id"],
  45. "user_id": user_info["id"],
  46. "invited_by": user_info["id"],
  47. "role": UserTenantRole.OWNER
  48. }
  49. tenant_llm = []
  50. for llm in LLMService.query(fid=LLM_FACTORY):
  51. tenant_llm.append(
  52. {"tenant_id": user_info["id"], "llm_factory": LLM_FACTORY, "llm_name": llm.llm_name, "model_type": llm.model_type,
  53. "api_key": API_KEY})
  54. if not UserService.save(**user_info):
  55. print("\033[93m【ERROR】\033[0mcan't init admin.")
  56. return
  57. TenantService.insert(**tenant)
  58. UserTenantService.insert(**usr_tenant)
  59. TenantLLMService.insert_many(tenant_llm)
  60. print("【INFO】Super user initialized. \033[93memail: admin@ragflow.io, password: admin\033[0m. Changing the password after logining is strongly recomanded.")
  61. chat_mdl = LLMBundle(tenant["id"], LLMType.CHAT, tenant["llm_id"])
  62. msg = chat_mdl.chat(system="", history=[{"role": "user", "content": "Hello!"}], gen_conf={})
  63. if msg.find("ERROR: ") == 0:
  64. print("\33[91m【ERROR】\33[0m: ", "'{}' dosen't work. {}".format(tenant["llm_id"], msg))
  65. embd_mdl = LLMBundle(tenant["id"], LLMType.EMBEDDING, tenant["embd_id"])
  66. v, c = embd_mdl.encode(["Hello!"])
  67. if c == 0:
  68. print("\33[91m【ERROR】\33[0m:", " '{}' dosen't work!".format(tenant["embd_id"]))
  69. factory_infos = [{
  70. "name": "OpenAI",
  71. "logo": "",
  72. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  73. "status": "1",
  74. },{
  75. "name": "通义千问",
  76. "logo": "",
  77. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  78. "status": "1",
  79. },{
  80. "name": "智谱AI",
  81. "logo": "",
  82. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  83. "status": "1",
  84. },
  85. {
  86. "name": "Local",
  87. "logo": "",
  88. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  89. "status": "1",
  90. },{
  91. "name": "Moonshot",
  92. "logo": "",
  93. "tags": "LLM,TEXT EMBEDDING",
  94. "status": "1",
  95. }
  96. # {
  97. # "name": "文心一言",
  98. # "logo": "",
  99. # "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  100. # "status": "1",
  101. # },
  102. ]
  103. def init_llm_factory():
  104. llm_infos = [
  105. # ---------------------- OpenAI ------------------------
  106. {
  107. "fid": factory_infos[0]["name"],
  108. "llm_name": "gpt-3.5-turbo",
  109. "tags": "LLM,CHAT,4K",
  110. "max_tokens": 4096,
  111. "model_type": LLMType.CHAT.value
  112. },{
  113. "fid": factory_infos[0]["name"],
  114. "llm_name": "gpt-3.5-turbo-16k-0613",
  115. "tags": "LLM,CHAT,16k",
  116. "max_tokens": 16385,
  117. "model_type": LLMType.CHAT.value
  118. },{
  119. "fid": factory_infos[0]["name"],
  120. "llm_name": "text-embedding-ada-002",
  121. "tags": "TEXT EMBEDDING,8K",
  122. "max_tokens": 8191,
  123. "model_type": LLMType.EMBEDDING.value
  124. },{
  125. "fid": factory_infos[0]["name"],
  126. "llm_name": "whisper-1",
  127. "tags": "SPEECH2TEXT",
  128. "max_tokens": 25*1024*1024,
  129. "model_type": LLMType.SPEECH2TEXT.value
  130. },{
  131. "fid": factory_infos[0]["name"],
  132. "llm_name": "gpt-4",
  133. "tags": "LLM,CHAT,8K",
  134. "max_tokens": 8191,
  135. "model_type": LLMType.CHAT.value
  136. },{
  137. "fid": factory_infos[0]["name"],
  138. "llm_name": "gpt-4-32k",
  139. "tags": "LLM,CHAT,32K",
  140. "max_tokens": 32768,
  141. "model_type": LLMType.CHAT.value
  142. },{
  143. "fid": factory_infos[0]["name"],
  144. "llm_name": "gpt-4-vision-preview",
  145. "tags": "LLM,CHAT,IMAGE2TEXT",
  146. "max_tokens": 765,
  147. "model_type": LLMType.IMAGE2TEXT.value
  148. },
  149. # ----------------------- Qwen -----------------------
  150. {
  151. "fid": factory_infos[1]["name"],
  152. "llm_name": "qwen-turbo",
  153. "tags": "LLM,CHAT,8K",
  154. "max_tokens": 8191,
  155. "model_type": LLMType.CHAT.value
  156. },{
  157. "fid": factory_infos[1]["name"],
  158. "llm_name": "qwen-plus",
  159. "tags": "LLM,CHAT,32K",
  160. "max_tokens": 32768,
  161. "model_type": LLMType.CHAT.value
  162. },{
  163. "fid": factory_infos[1]["name"],
  164. "llm_name": "qwen-max-1201",
  165. "tags": "LLM,CHAT,6K",
  166. "max_tokens": 5899,
  167. "model_type": LLMType.CHAT.value
  168. },{
  169. "fid": factory_infos[1]["name"],
  170. "llm_name": "text-embedding-v2",
  171. "tags": "TEXT EMBEDDING,2K",
  172. "max_tokens": 2048,
  173. "model_type": LLMType.EMBEDDING.value
  174. },{
  175. "fid": factory_infos[1]["name"],
  176. "llm_name": "paraformer-realtime-8k-v1",
  177. "tags": "SPEECH2TEXT",
  178. "max_tokens": 25*1024*1024,
  179. "model_type": LLMType.SPEECH2TEXT.value
  180. },{
  181. "fid": factory_infos[1]["name"],
  182. "llm_name": "qwen-vl-max",
  183. "tags": "LLM,CHAT,IMAGE2TEXT",
  184. "max_tokens": 765,
  185. "model_type": LLMType.IMAGE2TEXT.value
  186. },
  187. # ---------------------- ZhipuAI ----------------------
  188. {
  189. "fid": factory_infos[2]["name"],
  190. "llm_name": "glm-3-turbo",
  191. "tags": "LLM,CHAT,",
  192. "max_tokens": 128 * 1000,
  193. "model_type": LLMType.CHAT.value
  194. }, {
  195. "fid": factory_infos[2]["name"],
  196. "llm_name": "glm-4",
  197. "tags": "LLM,CHAT,",
  198. "max_tokens": 128 * 1000,
  199. "model_type": LLMType.CHAT.value
  200. }, {
  201. "fid": factory_infos[2]["name"],
  202. "llm_name": "glm-4v",
  203. "tags": "LLM,CHAT,IMAGE2TEXT",
  204. "max_tokens": 2000,
  205. "model_type": LLMType.IMAGE2TEXT.value
  206. },
  207. {
  208. "fid": factory_infos[2]["name"],
  209. "llm_name": "embedding-2",
  210. "tags": "TEXT EMBEDDING",
  211. "max_tokens": 512,
  212. "model_type": LLMType.EMBEDDING.value
  213. },
  214. # ---------------------- 本地 ----------------------
  215. {
  216. "fid": factory_infos[3]["name"],
  217. "llm_name": "qwen-14B-chat",
  218. "tags": "LLM,CHAT,",
  219. "max_tokens": 8191,
  220. "model_type": LLMType.CHAT.value
  221. }, {
  222. "fid": factory_infos[3]["name"],
  223. "llm_name": "flag-embedding",
  224. "tags": "TEXT EMBEDDING,",
  225. "max_tokens": 128 * 1000,
  226. "model_type": LLMType.EMBEDDING.value
  227. },
  228. # ------------------------ Moonshot -----------------------
  229. {
  230. "fid": factory_infos[4]["name"],
  231. "llm_name": "moonshot-v1-8k",
  232. "tags": "LLM,CHAT,",
  233. "max_tokens": 7900,
  234. "model_type": LLMType.CHAT.value
  235. }, {
  236. "fid": factory_infos[4]["name"],
  237. "llm_name": "flag-embedding",
  238. "tags": "TEXT EMBEDDING,",
  239. "max_tokens": 128 * 1000,
  240. "model_type": LLMType.EMBEDDING.value
  241. },{
  242. "fid": factory_infos[4]["name"],
  243. "llm_name": "moonshot-v1-32k",
  244. "tags": "LLM,CHAT,",
  245. "max_tokens": 32768,
  246. "model_type": LLMType.CHAT.value
  247. },{
  248. "fid": factory_infos[4]["name"],
  249. "llm_name": "moonshot-v1-128k",
  250. "tags": "LLM,CHAT",
  251. "max_tokens": 128 * 1000,
  252. "model_type": LLMType.CHAT.value
  253. },
  254. ]
  255. for info in factory_infos:
  256. try:
  257. LLMFactoriesService.save(**info)
  258. except Exception as e:
  259. pass
  260. for info in llm_infos:
  261. try:
  262. LLMService.save(**info)
  263. except Exception as e:
  264. pass
  265. def init_web_data():
  266. start_time = time.time()
  267. if LLMFactoriesService.get_all().count() != len(factory_infos):
  268. init_llm_factory()
  269. if not UserService.get_all().count():
  270. init_superuser()
  271. print("init web data success:{}".format(time.time() - start_time))
  272. if __name__ == '__main__':
  273. init_web_db()
  274. init_web_data()
  275. add_tenant_llm()