您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import os
  17. import time
  18. import uuid
  19. from copy import deepcopy
  20. from api.db import LLMType, UserTenantRole
  21. from api.db.db_models import init_database_tables as init_web_db, LLMFactories, LLM, TenantLLM
  22. from api.db.services import UserService
  23. from api.db.services.document_service import DocumentService
  24. from api.db.services.knowledgebase_service import KnowledgebaseService
  25. from api.db.services.llm_service import LLMFactoriesService, LLMService, TenantLLMService, LLMBundle
  26. from api.db.services.user_service import TenantService, UserTenantService
  27. from api.settings import CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, LLM_FACTORY, API_KEY, LLM_BASE_URL
  28. def init_superuser():
  29. user_info = {
  30. "id": uuid.uuid1().hex,
  31. "password": "admin",
  32. "nickname": "admin",
  33. "is_superuser": True,
  34. "email": "admin@ragflow.io",
  35. "creator": "system",
  36. "status": "1",
  37. }
  38. tenant = {
  39. "id": user_info["id"],
  40. "name": user_info["nickname"] + "‘s Kingdom",
  41. "llm_id": CHAT_MDL,
  42. "embd_id": EMBEDDING_MDL,
  43. "asr_id": ASR_MDL,
  44. "parser_ids": PARSERS,
  45. "img2txt_id": IMAGE2TEXT_MDL
  46. }
  47. usr_tenant = {
  48. "tenant_id": user_info["id"],
  49. "user_id": user_info["id"],
  50. "invited_by": user_info["id"],
  51. "role": UserTenantRole.OWNER
  52. }
  53. tenant_llm = []
  54. for llm in LLMService.query(fid=LLM_FACTORY):
  55. tenant_llm.append(
  56. {"tenant_id": user_info["id"], "llm_factory": LLM_FACTORY, "llm_name": llm.llm_name, "model_type": llm.model_type,
  57. "api_key": API_KEY, "api_base": LLM_BASE_URL})
  58. if not UserService.save(**user_info):
  59. print("\033[93m【ERROR】\033[0mcan't init admin.")
  60. return
  61. TenantService.insert(**tenant)
  62. UserTenantService.insert(**usr_tenant)
  63. TenantLLMService.insert_many(tenant_llm)
  64. print(
  65. "【INFO】Super user initialized. \033[93memail: admin@ragflow.io, password: admin\033[0m. Changing the password after logining is strongly recomanded.")
  66. chat_mdl = LLMBundle(tenant["id"], LLMType.CHAT, tenant["llm_id"])
  67. msg = chat_mdl.chat(system="", history=[
  68. {"role": "user", "content": "Hello!"}], gen_conf={})
  69. if msg.find("ERROR: ") == 0:
  70. print(
  71. "\33[91m【ERROR】\33[0m: ",
  72. "'{}' dosen't work. {}".format(
  73. tenant["llm_id"],
  74. msg))
  75. embd_mdl = LLMBundle(tenant["id"], LLMType.EMBEDDING, tenant["embd_id"])
  76. v, c = embd_mdl.encode(["Hello!"])
  77. if c == 0:
  78. print(
  79. "\33[91m【ERROR】\33[0m:",
  80. " '{}' dosen't work!".format(
  81. tenant["embd_id"]))
  82. factory_infos = [{
  83. "name": "OpenAI",
  84. "logo": "",
  85. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  86. "status": "1",
  87. }, {
  88. "name": "Tongyi-Qianwen",
  89. "logo": "",
  90. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  91. "status": "1",
  92. }, {
  93. "name": "ZHIPU-AI",
  94. "logo": "",
  95. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  96. "status": "1",
  97. },
  98. {
  99. "name": "Ollama",
  100. "logo": "",
  101. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  102. "status": "1",
  103. }, {
  104. "name": "Moonshot",
  105. "logo": "",
  106. "tags": "LLM,TEXT EMBEDDING",
  107. "status": "1",
  108. }, {
  109. "name": "FastEmbed",
  110. "logo": "",
  111. "tags": "TEXT EMBEDDING",
  112. "status": "1",
  113. }, {
  114. "name": "Xinference",
  115. "logo": "",
  116. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  117. "status": "1",
  118. },{
  119. "name": "Youdao",
  120. "logo": "",
  121. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  122. "status": "1",
  123. },{
  124. "name": "DeepSeek",
  125. "logo": "",
  126. "tags": "LLM",
  127. "status": "1",
  128. },{
  129. "name": "VolcEngine",
  130. "logo": "",
  131. "tags": "LLM, TEXT EMBEDDING",
  132. "status": "1",
  133. }
  134. # {
  135. # "name": "文心一言",
  136. # "logo": "",
  137. # "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  138. # "status": "1",
  139. # },
  140. ]
  141. def init_llm_factory():
  142. llm_infos = [
  143. # ---------------------- OpenAI ------------------------
  144. {
  145. "fid": factory_infos[0]["name"],
  146. "llm_name": "gpt-4o",
  147. "tags": "LLM,CHAT,128K",
  148. "max_tokens": 128000,
  149. "model_type": LLMType.CHAT.value + "," + LLMType.IMAGE2TEXT.value
  150. }, {
  151. "fid": factory_infos[0]["name"],
  152. "llm_name": "gpt-3.5-turbo",
  153. "tags": "LLM,CHAT,4K",
  154. "max_tokens": 4096,
  155. "model_type": LLMType.CHAT.value
  156. }, {
  157. "fid": factory_infos[0]["name"],
  158. "llm_name": "gpt-3.5-turbo-16k-0613",
  159. "tags": "LLM,CHAT,16k",
  160. "max_tokens": 16385,
  161. "model_type": LLMType.CHAT.value
  162. }, {
  163. "fid": factory_infos[0]["name"],
  164. "llm_name": "text-embedding-ada-002",
  165. "tags": "TEXT EMBEDDING,8K",
  166. "max_tokens": 8191,
  167. "model_type": LLMType.EMBEDDING.value
  168. }, {
  169. "fid": factory_infos[0]["name"],
  170. "llm_name": "text-embedding-3-small",
  171. "tags": "TEXT EMBEDDING,8K",
  172. "max_tokens": 8191,
  173. "model_type": LLMType.EMBEDDING.value
  174. }, {
  175. "fid": factory_infos[0]["name"],
  176. "llm_name": "text-embedding-3-large",
  177. "tags": "TEXT EMBEDDING,8K",
  178. "max_tokens": 8191,
  179. "model_type": LLMType.EMBEDDING.value
  180. }, {
  181. "fid": factory_infos[0]["name"],
  182. "llm_name": "whisper-1",
  183. "tags": "SPEECH2TEXT",
  184. "max_tokens": 25 * 1024 * 1024,
  185. "model_type": LLMType.SPEECH2TEXT.value
  186. }, {
  187. "fid": factory_infos[0]["name"],
  188. "llm_name": "gpt-4",
  189. "tags": "LLM,CHAT,8K",
  190. "max_tokens": 8191,
  191. "model_type": LLMType.CHAT.value
  192. }, {
  193. "fid": factory_infos[0]["name"],
  194. "llm_name": "gpt-4-turbo",
  195. "tags": "LLM,CHAT,8K",
  196. "max_tokens": 8191,
  197. "model_type": LLMType.CHAT.value
  198. },{
  199. "fid": factory_infos[0]["name"],
  200. "llm_name": "gpt-4-32k",
  201. "tags": "LLM,CHAT,32K",
  202. "max_tokens": 32768,
  203. "model_type": LLMType.CHAT.value
  204. }, {
  205. "fid": factory_infos[0]["name"],
  206. "llm_name": "gpt-4-vision-preview",
  207. "tags": "LLM,CHAT,IMAGE2TEXT",
  208. "max_tokens": 765,
  209. "model_type": LLMType.IMAGE2TEXT.value
  210. },
  211. # ----------------------- Qwen -----------------------
  212. {
  213. "fid": factory_infos[1]["name"],
  214. "llm_name": "qwen-turbo",
  215. "tags": "LLM,CHAT,8K",
  216. "max_tokens": 8191,
  217. "model_type": LLMType.CHAT.value
  218. }, {
  219. "fid": factory_infos[1]["name"],
  220. "llm_name": "qwen-plus",
  221. "tags": "LLM,CHAT,32K",
  222. "max_tokens": 32768,
  223. "model_type": LLMType.CHAT.value
  224. }, {
  225. "fid": factory_infos[1]["name"],
  226. "llm_name": "qwen-max-1201",
  227. "tags": "LLM,CHAT,6K",
  228. "max_tokens": 5899,
  229. "model_type": LLMType.CHAT.value
  230. }, {
  231. "fid": factory_infos[1]["name"],
  232. "llm_name": "text-embedding-v2",
  233. "tags": "TEXT EMBEDDING,2K",
  234. "max_tokens": 2048,
  235. "model_type": LLMType.EMBEDDING.value
  236. }, {
  237. "fid": factory_infos[1]["name"],
  238. "llm_name": "paraformer-realtime-8k-v1",
  239. "tags": "SPEECH2TEXT",
  240. "max_tokens": 25 * 1024 * 1024,
  241. "model_type": LLMType.SPEECH2TEXT.value
  242. }, {
  243. "fid": factory_infos[1]["name"],
  244. "llm_name": "qwen-vl-max",
  245. "tags": "LLM,CHAT,IMAGE2TEXT",
  246. "max_tokens": 765,
  247. "model_type": LLMType.IMAGE2TEXT.value
  248. },
  249. # ---------------------- ZhipuAI ----------------------
  250. {
  251. "fid": factory_infos[2]["name"],
  252. "llm_name": "glm-3-turbo",
  253. "tags": "LLM,CHAT,",
  254. "max_tokens": 128 * 1000,
  255. "model_type": LLMType.CHAT.value
  256. }, {
  257. "fid": factory_infos[2]["name"],
  258. "llm_name": "glm-4",
  259. "tags": "LLM,CHAT,",
  260. "max_tokens": 128 * 1000,
  261. "model_type": LLMType.CHAT.value
  262. }, {
  263. "fid": factory_infos[2]["name"],
  264. "llm_name": "glm-4v",
  265. "tags": "LLM,CHAT,IMAGE2TEXT",
  266. "max_tokens": 2000,
  267. "model_type": LLMType.IMAGE2TEXT.value
  268. },
  269. {
  270. "fid": factory_infos[2]["name"],
  271. "llm_name": "embedding-2",
  272. "tags": "TEXT EMBEDDING",
  273. "max_tokens": 512,
  274. "model_type": LLMType.EMBEDDING.value
  275. },
  276. # ------------------------ Moonshot -----------------------
  277. {
  278. "fid": factory_infos[4]["name"],
  279. "llm_name": "moonshot-v1-8k",
  280. "tags": "LLM,CHAT,",
  281. "max_tokens": 7900,
  282. "model_type": LLMType.CHAT.value
  283. }, {
  284. "fid": factory_infos[4]["name"],
  285. "llm_name": "moonshot-v1-32k",
  286. "tags": "LLM,CHAT,",
  287. "max_tokens": 32768,
  288. "model_type": LLMType.CHAT.value
  289. }, {
  290. "fid": factory_infos[4]["name"],
  291. "llm_name": "moonshot-v1-128k",
  292. "tags": "LLM,CHAT",
  293. "max_tokens": 128 * 1000,
  294. "model_type": LLMType.CHAT.value
  295. },
  296. # ------------------------ FastEmbed -----------------------
  297. {
  298. "fid": factory_infos[5]["name"],
  299. "llm_name": "BAAI/bge-small-en-v1.5",
  300. "tags": "TEXT EMBEDDING,",
  301. "max_tokens": 512,
  302. "model_type": LLMType.EMBEDDING.value
  303. }, {
  304. "fid": factory_infos[5]["name"],
  305. "llm_name": "BAAI/bge-small-zh-v1.5",
  306. "tags": "TEXT EMBEDDING,",
  307. "max_tokens": 512,
  308. "model_type": LLMType.EMBEDDING.value
  309. }, {
  310. }, {
  311. "fid": factory_infos[5]["name"],
  312. "llm_name": "BAAI/bge-base-en-v1.5",
  313. "tags": "TEXT EMBEDDING,",
  314. "max_tokens": 512,
  315. "model_type": LLMType.EMBEDDING.value
  316. }, {
  317. }, {
  318. "fid": factory_infos[5]["name"],
  319. "llm_name": "BAAI/bge-large-en-v1.5",
  320. "tags": "TEXT EMBEDDING,",
  321. "max_tokens": 512,
  322. "model_type": LLMType.EMBEDDING.value
  323. }, {
  324. "fid": factory_infos[5]["name"],
  325. "llm_name": "sentence-transformers/all-MiniLM-L6-v2",
  326. "tags": "TEXT EMBEDDING,",
  327. "max_tokens": 512,
  328. "model_type": LLMType.EMBEDDING.value
  329. }, {
  330. "fid": factory_infos[5]["name"],
  331. "llm_name": "nomic-ai/nomic-embed-text-v1.5",
  332. "tags": "TEXT EMBEDDING,",
  333. "max_tokens": 8192,
  334. "model_type": LLMType.EMBEDDING.value
  335. }, {
  336. "fid": factory_infos[5]["name"],
  337. "llm_name": "jinaai/jina-embeddings-v2-small-en",
  338. "tags": "TEXT EMBEDDING,",
  339. "max_tokens": 2147483648,
  340. "model_type": LLMType.EMBEDDING.value
  341. }, {
  342. "fid": factory_infos[5]["name"],
  343. "llm_name": "jinaai/jina-embeddings-v2-base-en",
  344. "tags": "TEXT EMBEDDING,",
  345. "max_tokens": 2147483648,
  346. "model_type": LLMType.EMBEDDING.value
  347. },
  348. # ------------------------ Youdao -----------------------
  349. {
  350. "fid": factory_infos[7]["name"],
  351. "llm_name": "maidalun1020/bce-embedding-base_v1",
  352. "tags": "TEXT EMBEDDING,",
  353. "max_tokens": 512,
  354. "model_type": LLMType.EMBEDDING.value
  355. },
  356. # ------------------------ DeepSeek -----------------------
  357. {
  358. "fid": factory_infos[8]["name"],
  359. "llm_name": "deepseek-chat",
  360. "tags": "LLM,CHAT,",
  361. "max_tokens": 32768,
  362. "model_type": LLMType.CHAT.value
  363. },
  364. {
  365. "fid": factory_infos[8]["name"],
  366. "llm_name": "deepseek-coder",
  367. "tags": "LLM,CHAT,",
  368. "max_tokens": 16385,
  369. "model_type": LLMType.CHAT.value
  370. },
  371. # ------------------------ VolcEngine -----------------------
  372. {
  373. "fid": factory_infos[9]["name"],
  374. "llm_name": "Skylark2-pro-32k",
  375. "tags": "LLM,CHAT,32k",
  376. "max_tokens": 32768,
  377. "model_type": LLMType.CHAT.value
  378. },
  379. {
  380. "fid": factory_infos[9]["name"],
  381. "llm_name": "Skylark2-pro-4k",
  382. "tags": "LLM,CHAT,4k",
  383. "max_tokens": 4096,
  384. "model_type": LLMType.CHAT.value
  385. },
  386. ]
  387. for info in factory_infos:
  388. try:
  389. LLMFactoriesService.save(**info)
  390. except Exception as e:
  391. pass
  392. for info in llm_infos:
  393. try:
  394. LLMService.save(**info)
  395. except Exception as e:
  396. pass
  397. LLMFactoriesService.filter_delete([LLMFactories.name == "Local"])
  398. LLMService.filter_delete([LLM.fid == "Local"])
  399. LLMService.filter_delete([LLM.fid == "Moonshot", LLM.llm_name == "flag-embedding"])
  400. TenantLLMService.filter_delete([TenantLLM.llm_factory == "Moonshot", TenantLLM.llm_name == "flag-embedding"])
  401. LLMFactoriesService.filter_delete([LLMFactoriesService.model.name == "QAnything"])
  402. LLMService.filter_delete([LLMService.model.fid == "QAnything"])
  403. TenantLLMService.filter_update([TenantLLMService.model.llm_factory == "QAnything"], {"llm_factory": "Youdao"})
  404. ## insert openai two embedding models to the current openai user.
  405. print("Start to insert 2 OpenAI embedding models...")
  406. tenant_ids = set([row["tenant_id"] for row in TenantLLMService.get_openai_models()])
  407. for tid in tenant_ids:
  408. for row in TenantLLMService.query(llm_factory="OpenAI", tenant_id=tid):
  409. row = row.to_dict()
  410. row["model_type"] = LLMType.EMBEDDING.value
  411. row["llm_name"] = "text-embedding-3-small"
  412. row["used_tokens"] = 0
  413. try:
  414. TenantLLMService.save(**row)
  415. row = deepcopy(row)
  416. row["llm_name"] = "text-embedding-3-large"
  417. TenantLLMService.save(**row)
  418. except Exception as e:
  419. pass
  420. break
  421. for kb_id in KnowledgebaseService.get_all_ids():
  422. KnowledgebaseService.update_by_id(kb_id, {"doc_num": DocumentService.get_kb_doc_count(kb_id)})
  423. """
  424. drop table llm;
  425. drop table llm_factories;
  426. update tenant set parser_ids='naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One';
  427. alter table knowledgebase modify avatar longtext;
  428. alter table user modify avatar longtext;
  429. alter table dialog modify icon longtext;
  430. """
  431. def init_web_data():
  432. start_time = time.time()
  433. init_llm_factory()
  434. if not UserService.get_all().count():
  435. init_superuser()
  436. print("init web data success:{}".format(time.time() - start_time))
  437. if __name__ == '__main__':
  438. init_web_db()
  439. init_web_data()