Você não pode selecionar mais de 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

init_data.py 15KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import os
  17. import time
  18. import uuid
  19. from copy import deepcopy
  20. from api.db import LLMType, UserTenantRole
  21. from api.db.db_models import init_database_tables as init_web_db, LLMFactories, LLM, TenantLLM
  22. from api.db.services import UserService
  23. from api.db.services.llm_service import LLMFactoriesService, LLMService, TenantLLMService, LLMBundle
  24. from api.db.services.user_service import TenantService, UserTenantService
  25. from api.settings import CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, LLM_FACTORY, API_KEY, LLM_BASE_URL
  26. def init_superuser():
  27. user_info = {
  28. "id": uuid.uuid1().hex,
  29. "password": "admin",
  30. "nickname": "admin",
  31. "is_superuser": True,
  32. "email": "admin@ragflow.io",
  33. "creator": "system",
  34. "status": "1",
  35. }
  36. tenant = {
  37. "id": user_info["id"],
  38. "name": user_info["nickname"] + "‘s Kingdom",
  39. "llm_id": CHAT_MDL,
  40. "embd_id": EMBEDDING_MDL,
  41. "asr_id": ASR_MDL,
  42. "parser_ids": PARSERS,
  43. "img2txt_id": IMAGE2TEXT_MDL
  44. }
  45. usr_tenant = {
  46. "tenant_id": user_info["id"],
  47. "user_id": user_info["id"],
  48. "invited_by": user_info["id"],
  49. "role": UserTenantRole.OWNER
  50. }
  51. tenant_llm = []
  52. for llm in LLMService.query(fid=LLM_FACTORY):
  53. tenant_llm.append(
  54. {"tenant_id": user_info["id"], "llm_factory": LLM_FACTORY, "llm_name": llm.llm_name, "model_type": llm.model_type,
  55. "api_key": API_KEY, "api_base": LLM_BASE_URL})
  56. if not UserService.save(**user_info):
  57. print("\033[93m【ERROR】\033[0mcan't init admin.")
  58. return
  59. TenantService.insert(**tenant)
  60. UserTenantService.insert(**usr_tenant)
  61. TenantLLMService.insert_many(tenant_llm)
  62. print(
  63. "【INFO】Super user initialized. \033[93memail: admin@ragflow.io, password: admin\033[0m. Changing the password after logining is strongly recomanded.")
  64. chat_mdl = LLMBundle(tenant["id"], LLMType.CHAT, tenant["llm_id"])
  65. msg = chat_mdl.chat(system="", history=[
  66. {"role": "user", "content": "Hello!"}], gen_conf={})
  67. if msg.find("ERROR: ") == 0:
  68. print(
  69. "\33[91m【ERROR】\33[0m: ",
  70. "'{}' dosen't work. {}".format(
  71. tenant["llm_id"],
  72. msg))
  73. embd_mdl = LLMBundle(tenant["id"], LLMType.EMBEDDING, tenant["embd_id"])
  74. v, c = embd_mdl.encode(["Hello!"])
  75. if c == 0:
  76. print(
  77. "\33[91m【ERROR】\33[0m:",
  78. " '{}' dosen't work!".format(
  79. tenant["embd_id"]))
  80. factory_infos = [{
  81. "name": "OpenAI",
  82. "logo": "",
  83. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  84. "status": "1",
  85. }, {
  86. "name": "Tongyi-Qianwen",
  87. "logo": "",
  88. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  89. "status": "1",
  90. }, {
  91. "name": "ZHIPU-AI",
  92. "logo": "",
  93. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  94. "status": "1",
  95. },
  96. {
  97. "name": "Ollama",
  98. "logo": "",
  99. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  100. "status": "1",
  101. }, {
  102. "name": "Moonshot",
  103. "logo": "",
  104. "tags": "LLM,TEXT EMBEDDING",
  105. "status": "1",
  106. }, {
  107. "name": "FastEmbed",
  108. "logo": "",
  109. "tags": "TEXT EMBEDDING",
  110. "status": "1",
  111. }, {
  112. "name": "Xinference",
  113. "logo": "",
  114. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  115. "status": "1",
  116. },{
  117. "name": "Youdao",
  118. "logo": "",
  119. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  120. "status": "1",
  121. },{
  122. "name": "DeepSeek",
  123. "logo": "",
  124. "tags": "LLM",
  125. "status": "1",
  126. },
  127. # {
  128. # "name": "文心一言",
  129. # "logo": "",
  130. # "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  131. # "status": "1",
  132. # },
  133. ]
  134. def init_llm_factory():
  135. llm_infos = [
  136. # ---------------------- OpenAI ------------------------
  137. {
  138. "fid": factory_infos[0]["name"],
  139. "llm_name": "gpt-4o",
  140. "tags": "LLM,CHAT,128K",
  141. "max_tokens": 128000,
  142. "model_type": LLMType.CHAT.value + "," + LLMType.IMAGE2TEXT.value
  143. }, {
  144. "fid": factory_infos[0]["name"],
  145. "llm_name": "gpt-3.5-turbo",
  146. "tags": "LLM,CHAT,4K",
  147. "max_tokens": 4096,
  148. "model_type": LLMType.CHAT.value
  149. }, {
  150. "fid": factory_infos[0]["name"],
  151. "llm_name": "gpt-3.5-turbo-16k-0613",
  152. "tags": "LLM,CHAT,16k",
  153. "max_tokens": 16385,
  154. "model_type": LLMType.CHAT.value
  155. }, {
  156. "fid": factory_infos[0]["name"],
  157. "llm_name": "text-embedding-ada-002",
  158. "tags": "TEXT EMBEDDING,8K",
  159. "max_tokens": 8191,
  160. "model_type": LLMType.EMBEDDING.value
  161. }, {
  162. "fid": factory_infos[0]["name"],
  163. "llm_name": "text-embedding-3-small",
  164. "tags": "TEXT EMBEDDING,8K",
  165. "max_tokens": 8191,
  166. "model_type": LLMType.EMBEDDING.value
  167. }, {
  168. "fid": factory_infos[0]["name"],
  169. "llm_name": "text-embedding-3-large",
  170. "tags": "TEXT EMBEDDING,8K",
  171. "max_tokens": 8191,
  172. "model_type": LLMType.EMBEDDING.value
  173. }, {
  174. "fid": factory_infos[0]["name"],
  175. "llm_name": "whisper-1",
  176. "tags": "SPEECH2TEXT",
  177. "max_tokens": 25 * 1024 * 1024,
  178. "model_type": LLMType.SPEECH2TEXT.value
  179. }, {
  180. "fid": factory_infos[0]["name"],
  181. "llm_name": "gpt-4",
  182. "tags": "LLM,CHAT,8K",
  183. "max_tokens": 8191,
  184. "model_type": LLMType.CHAT.value
  185. }, {
  186. "fid": factory_infos[0]["name"],
  187. "llm_name": "gpt-4-turbo",
  188. "tags": "LLM,CHAT,8K",
  189. "max_tokens": 8191,
  190. "model_type": LLMType.CHAT.value
  191. },{
  192. "fid": factory_infos[0]["name"],
  193. "llm_name": "gpt-4-32k",
  194. "tags": "LLM,CHAT,32K",
  195. "max_tokens": 32768,
  196. "model_type": LLMType.CHAT.value
  197. }, {
  198. "fid": factory_infos[0]["name"],
  199. "llm_name": "gpt-4-vision-preview",
  200. "tags": "LLM,CHAT,IMAGE2TEXT",
  201. "max_tokens": 765,
  202. "model_type": LLMType.IMAGE2TEXT.value
  203. },
  204. # ----------------------- Qwen -----------------------
  205. {
  206. "fid": factory_infos[1]["name"],
  207. "llm_name": "qwen-turbo",
  208. "tags": "LLM,CHAT,8K",
  209. "max_tokens": 8191,
  210. "model_type": LLMType.CHAT.value
  211. }, {
  212. "fid": factory_infos[1]["name"],
  213. "llm_name": "qwen-plus",
  214. "tags": "LLM,CHAT,32K",
  215. "max_tokens": 32768,
  216. "model_type": LLMType.CHAT.value
  217. }, {
  218. "fid": factory_infos[1]["name"],
  219. "llm_name": "qwen-max-1201",
  220. "tags": "LLM,CHAT,6K",
  221. "max_tokens": 5899,
  222. "model_type": LLMType.CHAT.value
  223. }, {
  224. "fid": factory_infos[1]["name"],
  225. "llm_name": "text-embedding-v2",
  226. "tags": "TEXT EMBEDDING,2K",
  227. "max_tokens": 2048,
  228. "model_type": LLMType.EMBEDDING.value
  229. }, {
  230. "fid": factory_infos[1]["name"],
  231. "llm_name": "paraformer-realtime-8k-v1",
  232. "tags": "SPEECH2TEXT",
  233. "max_tokens": 25 * 1024 * 1024,
  234. "model_type": LLMType.SPEECH2TEXT.value
  235. }, {
  236. "fid": factory_infos[1]["name"],
  237. "llm_name": "qwen-vl-max",
  238. "tags": "LLM,CHAT,IMAGE2TEXT",
  239. "max_tokens": 765,
  240. "model_type": LLMType.IMAGE2TEXT.value
  241. },
  242. # ---------------------- ZhipuAI ----------------------
  243. {
  244. "fid": factory_infos[2]["name"],
  245. "llm_name": "glm-3-turbo",
  246. "tags": "LLM,CHAT,",
  247. "max_tokens": 128 * 1000,
  248. "model_type": LLMType.CHAT.value
  249. }, {
  250. "fid": factory_infos[2]["name"],
  251. "llm_name": "glm-4",
  252. "tags": "LLM,CHAT,",
  253. "max_tokens": 128 * 1000,
  254. "model_type": LLMType.CHAT.value
  255. }, {
  256. "fid": factory_infos[2]["name"],
  257. "llm_name": "glm-4v",
  258. "tags": "LLM,CHAT,IMAGE2TEXT",
  259. "max_tokens": 2000,
  260. "model_type": LLMType.IMAGE2TEXT.value
  261. },
  262. {
  263. "fid": factory_infos[2]["name"],
  264. "llm_name": "embedding-2",
  265. "tags": "TEXT EMBEDDING",
  266. "max_tokens": 512,
  267. "model_type": LLMType.EMBEDDING.value
  268. },
  269. # ------------------------ Moonshot -----------------------
  270. {
  271. "fid": factory_infos[4]["name"],
  272. "llm_name": "moonshot-v1-8k",
  273. "tags": "LLM,CHAT,",
  274. "max_tokens": 7900,
  275. "model_type": LLMType.CHAT.value
  276. }, {
  277. "fid": factory_infos[4]["name"],
  278. "llm_name": "moonshot-v1-32k",
  279. "tags": "LLM,CHAT,",
  280. "max_tokens": 32768,
  281. "model_type": LLMType.CHAT.value
  282. }, {
  283. "fid": factory_infos[4]["name"],
  284. "llm_name": "moonshot-v1-128k",
  285. "tags": "LLM,CHAT",
  286. "max_tokens": 128 * 1000,
  287. "model_type": LLMType.CHAT.value
  288. },
  289. # ------------------------ FastEmbed -----------------------
  290. {
  291. "fid": factory_infos[5]["name"],
  292. "llm_name": "BAAI/bge-small-en-v1.5",
  293. "tags": "TEXT EMBEDDING,",
  294. "max_tokens": 512,
  295. "model_type": LLMType.EMBEDDING.value
  296. }, {
  297. "fid": factory_infos[5]["name"],
  298. "llm_name": "BAAI/bge-small-zh-v1.5",
  299. "tags": "TEXT EMBEDDING,",
  300. "max_tokens": 512,
  301. "model_type": LLMType.EMBEDDING.value
  302. }, {
  303. }, {
  304. "fid": factory_infos[5]["name"],
  305. "llm_name": "BAAI/bge-base-en-v1.5",
  306. "tags": "TEXT EMBEDDING,",
  307. "max_tokens": 512,
  308. "model_type": LLMType.EMBEDDING.value
  309. }, {
  310. }, {
  311. "fid": factory_infos[5]["name"],
  312. "llm_name": "BAAI/bge-large-en-v1.5",
  313. "tags": "TEXT EMBEDDING,",
  314. "max_tokens": 512,
  315. "model_type": LLMType.EMBEDDING.value
  316. }, {
  317. "fid": factory_infos[5]["name"],
  318. "llm_name": "sentence-transformers/all-MiniLM-L6-v2",
  319. "tags": "TEXT EMBEDDING,",
  320. "max_tokens": 512,
  321. "model_type": LLMType.EMBEDDING.value
  322. }, {
  323. "fid": factory_infos[5]["name"],
  324. "llm_name": "nomic-ai/nomic-embed-text-v1.5",
  325. "tags": "TEXT EMBEDDING,",
  326. "max_tokens": 8192,
  327. "model_type": LLMType.EMBEDDING.value
  328. }, {
  329. "fid": factory_infos[5]["name"],
  330. "llm_name": "jinaai/jina-embeddings-v2-small-en",
  331. "tags": "TEXT EMBEDDING,",
  332. "max_tokens": 2147483648,
  333. "model_type": LLMType.EMBEDDING.value
  334. }, {
  335. "fid": factory_infos[5]["name"],
  336. "llm_name": "jinaai/jina-embeddings-v2-base-en",
  337. "tags": "TEXT EMBEDDING,",
  338. "max_tokens": 2147483648,
  339. "model_type": LLMType.EMBEDDING.value
  340. },
  341. # ------------------------ Youdao -----------------------
  342. {
  343. "fid": factory_infos[7]["name"],
  344. "llm_name": "maidalun1020/bce-embedding-base_v1",
  345. "tags": "TEXT EMBEDDING,",
  346. "max_tokens": 512,
  347. "model_type": LLMType.EMBEDDING.value
  348. },
  349. # ------------------------ DeepSeek -----------------------
  350. {
  351. "fid": factory_infos[8]["name"],
  352. "llm_name": "deepseek-chat",
  353. "tags": "LLM,CHAT,",
  354. "max_tokens": 32768,
  355. "model_type": LLMType.CHAT.value
  356. },
  357. {
  358. "fid": factory_infos[8]["name"],
  359. "llm_name": "deepseek-coder",
  360. "tags": "LLM,CHAT,",
  361. "max_tokens": 16385,
  362. "model_type": LLMType.CHAT.value
  363. },
  364. ]
  365. for info in factory_infos:
  366. try:
  367. LLMFactoriesService.save(**info)
  368. except Exception as e:
  369. pass
  370. for info in llm_infos:
  371. try:
  372. LLMService.save(**info)
  373. except Exception as e:
  374. pass
  375. LLMFactoriesService.filter_delete([LLMFactories.name == "Local"])
  376. LLMService.filter_delete([LLM.fid == "Local"])
  377. LLMService.filter_delete([LLM.fid == "Moonshot", LLM.llm_name == "flag-embedding"])
  378. TenantLLMService.filter_delete([TenantLLM.llm_factory == "Moonshot", TenantLLM.llm_name == "flag-embedding"])
  379. LLMFactoriesService.filter_delete([LLMFactoriesService.model.name == "QAnything"])
  380. LLMService.filter_delete([LLMService.model.fid == "QAnything"])
  381. TenantLLMService.filter_update([TenantLLMService.model.llm_factory == "QAnything"], {"llm_factory": "Youdao"})
  382. ## insert openai two embedding models to the current openai user.
  383. print("Start to insert 2 OpenAI embedding models...")
  384. tenant_ids = set([row.tenant_id for row in TenantLLMService.get_openai_models()])
  385. for tid in tenant_ids:
  386. for row in TenantLLMService.get_openai_models(llm_factory="OpenAI", tenant_id=tid):
  387. row = row.to_dict()
  388. row["model_type"] = LLMType.EMBEDDING.value
  389. row["llm_name"] = "text-embedding-3-small"
  390. row["used_tokens"] = 0
  391. try:
  392. TenantLLMService.save(**row)
  393. row = deepcopy(row)
  394. row["llm_name"] = "text-embedding-3-large"
  395. TenantLLMService.save(**row)
  396. except Exception as e:
  397. pass
  398. break
  399. """
  400. drop table llm;
  401. drop table llm_factories;
  402. update tenant set parser_ids='naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One';
  403. alter table knowledgebase modify avatar longtext;
  404. alter table user modify avatar longtext;
  405. alter table dialog modify icon longtext;
  406. """
  407. def init_web_data():
  408. start_time = time.time()
  409. init_llm_factory()
  410. if not UserService.get_all().count():
  411. init_superuser()
  412. print("init web data success:{}".format(time.time() - start_time))
  413. if __name__ == '__main__':
  414. init_web_db()
  415. init_web_data()