Du kannst nicht mehr als 25 Themen auswählen Themen müssen mit entweder einem Buchstaben oder einer Ziffer beginnen. Sie können Bindestriche („-“) enthalten und bis zu 35 Zeichen lang sein.

init_data.py 15KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import os
  17. import time
  18. import uuid
  19. from copy import deepcopy
  20. from api.db import LLMType, UserTenantRole
  21. from api.db.db_models import init_database_tables as init_web_db, LLMFactories, LLM, TenantLLM
  22. from api.db.services import UserService
  23. from api.db.services.document_service import DocumentService
  24. from api.db.services.knowledgebase_service import KnowledgebaseService
  25. from api.db.services.llm_service import LLMFactoriesService, LLMService, TenantLLMService, LLMBundle
  26. from api.db.services.user_service import TenantService, UserTenantService
  27. from api.settings import CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, LLM_FACTORY, API_KEY, LLM_BASE_URL
  28. def init_superuser():
  29. user_info = {
  30. "id": uuid.uuid1().hex,
  31. "password": "admin",
  32. "nickname": "admin",
  33. "is_superuser": True,
  34. "email": "admin@ragflow.io",
  35. "creator": "system",
  36. "status": "1",
  37. }
  38. tenant = {
  39. "id": user_info["id"],
  40. "name": user_info["nickname"] + "‘s Kingdom",
  41. "llm_id": CHAT_MDL,
  42. "embd_id": EMBEDDING_MDL,
  43. "asr_id": ASR_MDL,
  44. "parser_ids": PARSERS,
  45. "img2txt_id": IMAGE2TEXT_MDL
  46. }
  47. usr_tenant = {
  48. "tenant_id": user_info["id"],
  49. "user_id": user_info["id"],
  50. "invited_by": user_info["id"],
  51. "role": UserTenantRole.OWNER
  52. }
  53. tenant_llm = []
  54. for llm in LLMService.query(fid=LLM_FACTORY):
  55. tenant_llm.append(
  56. {"tenant_id": user_info["id"], "llm_factory": LLM_FACTORY, "llm_name": llm.llm_name, "model_type": llm.model_type,
  57. "api_key": API_KEY, "api_base": LLM_BASE_URL})
  58. if not UserService.save(**user_info):
  59. print("\033[93m【ERROR】\033[0mcan't init admin.")
  60. return
  61. TenantService.insert(**tenant)
  62. UserTenantService.insert(**usr_tenant)
  63. TenantLLMService.insert_many(tenant_llm)
  64. print(
  65. "【INFO】Super user initialized. \033[93memail: admin@ragflow.io, password: admin\033[0m. Changing the password after logining is strongly recomanded.")
  66. chat_mdl = LLMBundle(tenant["id"], LLMType.CHAT, tenant["llm_id"])
  67. msg = chat_mdl.chat(system="", history=[
  68. {"role": "user", "content": "Hello!"}], gen_conf={})
  69. if msg.find("ERROR: ") == 0:
  70. print(
  71. "\33[91m【ERROR】\33[0m: ",
  72. "'{}' dosen't work. {}".format(
  73. tenant["llm_id"],
  74. msg))
  75. embd_mdl = LLMBundle(tenant["id"], LLMType.EMBEDDING, tenant["embd_id"])
  76. v, c = embd_mdl.encode(["Hello!"])
  77. if c == 0:
  78. print(
  79. "\33[91m【ERROR】\33[0m:",
  80. " '{}' dosen't work!".format(
  81. tenant["embd_id"]))
  82. factory_infos = [{
  83. "name": "OpenAI",
  84. "logo": "",
  85. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  86. "status": "1",
  87. }, {
  88. "name": "Tongyi-Qianwen",
  89. "logo": "",
  90. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  91. "status": "1",
  92. }, {
  93. "name": "ZHIPU-AI",
  94. "logo": "",
  95. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  96. "status": "1",
  97. },
  98. {
  99. "name": "Ollama",
  100. "logo": "",
  101. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  102. "status": "1",
  103. }, {
  104. "name": "Moonshot",
  105. "logo": "",
  106. "tags": "LLM,TEXT EMBEDDING",
  107. "status": "1",
  108. }, {
  109. "name": "FastEmbed",
  110. "logo": "",
  111. "tags": "TEXT EMBEDDING",
  112. "status": "1",
  113. }, {
  114. "name": "Xinference",
  115. "logo": "",
  116. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  117. "status": "1",
  118. },{
  119. "name": "Youdao",
  120. "logo": "",
  121. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  122. "status": "1",
  123. },{
  124. "name": "DeepSeek",
  125. "logo": "",
  126. "tags": "LLM",
  127. "status": "1",
  128. },
  129. # {
  130. # "name": "文心一言",
  131. # "logo": "",
  132. # "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  133. # "status": "1",
  134. # },
  135. ]
  136. def init_llm_factory():
  137. llm_infos = [
  138. # ---------------------- OpenAI ------------------------
  139. {
  140. "fid": factory_infos[0]["name"],
  141. "llm_name": "gpt-4o",
  142. "tags": "LLM,CHAT,128K",
  143. "max_tokens": 128000,
  144. "model_type": LLMType.CHAT.value + "," + LLMType.IMAGE2TEXT.value
  145. }, {
  146. "fid": factory_infos[0]["name"],
  147. "llm_name": "gpt-3.5-turbo",
  148. "tags": "LLM,CHAT,4K",
  149. "max_tokens": 4096,
  150. "model_type": LLMType.CHAT.value
  151. }, {
  152. "fid": factory_infos[0]["name"],
  153. "llm_name": "gpt-3.5-turbo-16k-0613",
  154. "tags": "LLM,CHAT,16k",
  155. "max_tokens": 16385,
  156. "model_type": LLMType.CHAT.value
  157. }, {
  158. "fid": factory_infos[0]["name"],
  159. "llm_name": "text-embedding-ada-002",
  160. "tags": "TEXT EMBEDDING,8K",
  161. "max_tokens": 8191,
  162. "model_type": LLMType.EMBEDDING.value
  163. }, {
  164. "fid": factory_infos[0]["name"],
  165. "llm_name": "text-embedding-3-small",
  166. "tags": "TEXT EMBEDDING,8K",
  167. "max_tokens": 8191,
  168. "model_type": LLMType.EMBEDDING.value
  169. }, {
  170. "fid": factory_infos[0]["name"],
  171. "llm_name": "text-embedding-3-large",
  172. "tags": "TEXT EMBEDDING,8K",
  173. "max_tokens": 8191,
  174. "model_type": LLMType.EMBEDDING.value
  175. }, {
  176. "fid": factory_infos[0]["name"],
  177. "llm_name": "whisper-1",
  178. "tags": "SPEECH2TEXT",
  179. "max_tokens": 25 * 1024 * 1024,
  180. "model_type": LLMType.SPEECH2TEXT.value
  181. }, {
  182. "fid": factory_infos[0]["name"],
  183. "llm_name": "gpt-4",
  184. "tags": "LLM,CHAT,8K",
  185. "max_tokens": 8191,
  186. "model_type": LLMType.CHAT.value
  187. }, {
  188. "fid": factory_infos[0]["name"],
  189. "llm_name": "gpt-4-turbo",
  190. "tags": "LLM,CHAT,8K",
  191. "max_tokens": 8191,
  192. "model_type": LLMType.CHAT.value
  193. },{
  194. "fid": factory_infos[0]["name"],
  195. "llm_name": "gpt-4-32k",
  196. "tags": "LLM,CHAT,32K",
  197. "max_tokens": 32768,
  198. "model_type": LLMType.CHAT.value
  199. }, {
  200. "fid": factory_infos[0]["name"],
  201. "llm_name": "gpt-4-vision-preview",
  202. "tags": "LLM,CHAT,IMAGE2TEXT",
  203. "max_tokens": 765,
  204. "model_type": LLMType.IMAGE2TEXT.value
  205. },
  206. # ----------------------- Qwen -----------------------
  207. {
  208. "fid": factory_infos[1]["name"],
  209. "llm_name": "qwen-turbo",
  210. "tags": "LLM,CHAT,8K",
  211. "max_tokens": 8191,
  212. "model_type": LLMType.CHAT.value
  213. }, {
  214. "fid": factory_infos[1]["name"],
  215. "llm_name": "qwen-plus",
  216. "tags": "LLM,CHAT,32K",
  217. "max_tokens": 32768,
  218. "model_type": LLMType.CHAT.value
  219. }, {
  220. "fid": factory_infos[1]["name"],
  221. "llm_name": "qwen-max-1201",
  222. "tags": "LLM,CHAT,6K",
  223. "max_tokens": 5899,
  224. "model_type": LLMType.CHAT.value
  225. }, {
  226. "fid": factory_infos[1]["name"],
  227. "llm_name": "text-embedding-v2",
  228. "tags": "TEXT EMBEDDING,2K",
  229. "max_tokens": 2048,
  230. "model_type": LLMType.EMBEDDING.value
  231. }, {
  232. "fid": factory_infos[1]["name"],
  233. "llm_name": "paraformer-realtime-8k-v1",
  234. "tags": "SPEECH2TEXT",
  235. "max_tokens": 25 * 1024 * 1024,
  236. "model_type": LLMType.SPEECH2TEXT.value
  237. }, {
  238. "fid": factory_infos[1]["name"],
  239. "llm_name": "qwen-vl-max",
  240. "tags": "LLM,CHAT,IMAGE2TEXT",
  241. "max_tokens": 765,
  242. "model_type": LLMType.IMAGE2TEXT.value
  243. },
  244. # ---------------------- ZhipuAI ----------------------
  245. {
  246. "fid": factory_infos[2]["name"],
  247. "llm_name": "glm-3-turbo",
  248. "tags": "LLM,CHAT,",
  249. "max_tokens": 128 * 1000,
  250. "model_type": LLMType.CHAT.value
  251. }, {
  252. "fid": factory_infos[2]["name"],
  253. "llm_name": "glm-4",
  254. "tags": "LLM,CHAT,",
  255. "max_tokens": 128 * 1000,
  256. "model_type": LLMType.CHAT.value
  257. }, {
  258. "fid": factory_infos[2]["name"],
  259. "llm_name": "glm-4v",
  260. "tags": "LLM,CHAT,IMAGE2TEXT",
  261. "max_tokens": 2000,
  262. "model_type": LLMType.IMAGE2TEXT.value
  263. },
  264. {
  265. "fid": factory_infos[2]["name"],
  266. "llm_name": "embedding-2",
  267. "tags": "TEXT EMBEDDING",
  268. "max_tokens": 512,
  269. "model_type": LLMType.EMBEDDING.value
  270. },
  271. # ------------------------ Moonshot -----------------------
  272. {
  273. "fid": factory_infos[4]["name"],
  274. "llm_name": "moonshot-v1-8k",
  275. "tags": "LLM,CHAT,",
  276. "max_tokens": 7900,
  277. "model_type": LLMType.CHAT.value
  278. }, {
  279. "fid": factory_infos[4]["name"],
  280. "llm_name": "moonshot-v1-32k",
  281. "tags": "LLM,CHAT,",
  282. "max_tokens": 32768,
  283. "model_type": LLMType.CHAT.value
  284. }, {
  285. "fid": factory_infos[4]["name"],
  286. "llm_name": "moonshot-v1-128k",
  287. "tags": "LLM,CHAT",
  288. "max_tokens": 128 * 1000,
  289. "model_type": LLMType.CHAT.value
  290. },
  291. # ------------------------ FastEmbed -----------------------
  292. {
  293. "fid": factory_infos[5]["name"],
  294. "llm_name": "BAAI/bge-small-en-v1.5",
  295. "tags": "TEXT EMBEDDING,",
  296. "max_tokens": 512,
  297. "model_type": LLMType.EMBEDDING.value
  298. }, {
  299. "fid": factory_infos[5]["name"],
  300. "llm_name": "BAAI/bge-small-zh-v1.5",
  301. "tags": "TEXT EMBEDDING,",
  302. "max_tokens": 512,
  303. "model_type": LLMType.EMBEDDING.value
  304. }, {
  305. }, {
  306. "fid": factory_infos[5]["name"],
  307. "llm_name": "BAAI/bge-base-en-v1.5",
  308. "tags": "TEXT EMBEDDING,",
  309. "max_tokens": 512,
  310. "model_type": LLMType.EMBEDDING.value
  311. }, {
  312. }, {
  313. "fid": factory_infos[5]["name"],
  314. "llm_name": "BAAI/bge-large-en-v1.5",
  315. "tags": "TEXT EMBEDDING,",
  316. "max_tokens": 512,
  317. "model_type": LLMType.EMBEDDING.value
  318. }, {
  319. "fid": factory_infos[5]["name"],
  320. "llm_name": "sentence-transformers/all-MiniLM-L6-v2",
  321. "tags": "TEXT EMBEDDING,",
  322. "max_tokens": 512,
  323. "model_type": LLMType.EMBEDDING.value
  324. }, {
  325. "fid": factory_infos[5]["name"],
  326. "llm_name": "nomic-ai/nomic-embed-text-v1.5",
  327. "tags": "TEXT EMBEDDING,",
  328. "max_tokens": 8192,
  329. "model_type": LLMType.EMBEDDING.value
  330. }, {
  331. "fid": factory_infos[5]["name"],
  332. "llm_name": "jinaai/jina-embeddings-v2-small-en",
  333. "tags": "TEXT EMBEDDING,",
  334. "max_tokens": 2147483648,
  335. "model_type": LLMType.EMBEDDING.value
  336. }, {
  337. "fid": factory_infos[5]["name"],
  338. "llm_name": "jinaai/jina-embeddings-v2-base-en",
  339. "tags": "TEXT EMBEDDING,",
  340. "max_tokens": 2147483648,
  341. "model_type": LLMType.EMBEDDING.value
  342. },
  343. # ------------------------ Youdao -----------------------
  344. {
  345. "fid": factory_infos[7]["name"],
  346. "llm_name": "maidalun1020/bce-embedding-base_v1",
  347. "tags": "TEXT EMBEDDING,",
  348. "max_tokens": 512,
  349. "model_type": LLMType.EMBEDDING.value
  350. },
  351. # ------------------------ DeepSeek -----------------------
  352. {
  353. "fid": factory_infos[8]["name"],
  354. "llm_name": "deepseek-chat",
  355. "tags": "LLM,CHAT,",
  356. "max_tokens": 32768,
  357. "model_type": LLMType.CHAT.value
  358. },
  359. {
  360. "fid": factory_infos[8]["name"],
  361. "llm_name": "deepseek-coder",
  362. "tags": "LLM,CHAT,",
  363. "max_tokens": 16385,
  364. "model_type": LLMType.CHAT.value
  365. },
  366. ]
  367. for info in factory_infos:
  368. try:
  369. LLMFactoriesService.save(**info)
  370. except Exception as e:
  371. pass
  372. for info in llm_infos:
  373. try:
  374. LLMService.save(**info)
  375. except Exception as e:
  376. pass
  377. LLMFactoriesService.filter_delete([LLMFactories.name == "Local"])
  378. LLMService.filter_delete([LLM.fid == "Local"])
  379. LLMService.filter_delete([LLM.fid == "Moonshot", LLM.llm_name == "flag-embedding"])
  380. TenantLLMService.filter_delete([TenantLLM.llm_factory == "Moonshot", TenantLLM.llm_name == "flag-embedding"])
  381. LLMFactoriesService.filter_delete([LLMFactoriesService.model.name == "QAnything"])
  382. LLMService.filter_delete([LLMService.model.fid == "QAnything"])
  383. TenantLLMService.filter_update([TenantLLMService.model.llm_factory == "QAnything"], {"llm_factory": "Youdao"})
  384. ## insert openai two embedding models to the current openai user.
  385. print("Start to insert 2 OpenAI embedding models...")
  386. tenant_ids = set([row["tenant_id"] for row in TenantLLMService.get_openai_models()])
  387. for tid in tenant_ids:
  388. for row in TenantLLMService.query(llm_factory="OpenAI", tenant_id=tid):
  389. row = row.to_dict()
  390. row["model_type"] = LLMType.EMBEDDING.value
  391. row["llm_name"] = "text-embedding-3-small"
  392. row["used_tokens"] = 0
  393. try:
  394. TenantLLMService.save(**row)
  395. row = deepcopy(row)
  396. row["llm_name"] = "text-embedding-3-large"
  397. TenantLLMService.save(**row)
  398. except Exception as e:
  399. pass
  400. break
  401. for kb_id in KnowledgebaseService.get_all_ids():
  402. KnowledgebaseService.update_by_id(kb_id, {"doc_num": DocumentService.get_kb_doc_count(kb_id)})
  403. """
  404. drop table llm;
  405. drop table llm_factories;
  406. update tenant set parser_ids='naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One';
  407. alter table knowledgebase modify avatar longtext;
  408. alter table user modify avatar longtext;
  409. alter table dialog modify icon longtext;
  410. """
  411. def init_web_data():
  412. start_time = time.time()
  413. init_llm_factory()
  414. if not UserService.get_all().count():
  415. init_superuser()
  416. print("init web data success:{}".format(time.time() - start_time))
  417. if __name__ == '__main__':
  418. init_web_db()
  419. init_web_data()