You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

init_data.py 13KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import os
  17. import time
  18. import uuid
  19. from api.db import LLMType, UserTenantRole
  20. from api.db.db_models import init_database_tables as init_web_db, LLMFactories, LLM, TenantLLM
  21. from api.db.services import UserService
  22. from api.db.services.llm_service import LLMFactoriesService, LLMService, TenantLLMService, LLMBundle
  23. from api.db.services.user_service import TenantService, UserTenantService
  24. from api.settings import CHAT_MDL, EMBEDDING_MDL, ASR_MDL, IMAGE2TEXT_MDL, PARSERS, LLM_FACTORY, API_KEY, LLM_BASE_URL
  25. def init_superuser():
  26. user_info = {
  27. "id": uuid.uuid1().hex,
  28. "password": "admin",
  29. "nickname": "admin",
  30. "is_superuser": True,
  31. "email": "admin@ragflow.io",
  32. "creator": "system",
  33. "status": "1",
  34. }
  35. tenant = {
  36. "id": user_info["id"],
  37. "name": user_info["nickname"] + "‘s Kingdom",
  38. "llm_id": CHAT_MDL,
  39. "embd_id": EMBEDDING_MDL,
  40. "asr_id": ASR_MDL,
  41. "parser_ids": PARSERS,
  42. "img2txt_id": IMAGE2TEXT_MDL
  43. }
  44. usr_tenant = {
  45. "tenant_id": user_info["id"],
  46. "user_id": user_info["id"],
  47. "invited_by": user_info["id"],
  48. "role": UserTenantRole.OWNER
  49. }
  50. tenant_llm = []
  51. for llm in LLMService.query(fid=LLM_FACTORY):
  52. tenant_llm.append(
  53. {"tenant_id": user_info["id"], "llm_factory": LLM_FACTORY, "llm_name": llm.llm_name, "model_type": llm.model_type,
  54. "api_key": API_KEY, "api_base": LLM_BASE_URL})
  55. if not UserService.save(**user_info):
  56. print("\033[93m【ERROR】\033[0mcan't init admin.")
  57. return
  58. TenantService.insert(**tenant)
  59. UserTenantService.insert(**usr_tenant)
  60. TenantLLMService.insert_many(tenant_llm)
  61. print(
  62. "【INFO】Super user initialized. \033[93memail: admin@ragflow.io, password: admin\033[0m. Changing the password after logining is strongly recomanded.")
  63. chat_mdl = LLMBundle(tenant["id"], LLMType.CHAT, tenant["llm_id"])
  64. msg = chat_mdl.chat(system="", history=[
  65. {"role": "user", "content": "Hello!"}], gen_conf={})
  66. if msg.find("ERROR: ") == 0:
  67. print(
  68. "\33[91m【ERROR】\33[0m: ",
  69. "'{}' dosen't work. {}".format(
  70. tenant["llm_id"],
  71. msg))
  72. embd_mdl = LLMBundle(tenant["id"], LLMType.EMBEDDING, tenant["embd_id"])
  73. v, c = embd_mdl.encode(["Hello!"])
  74. if c == 0:
  75. print(
  76. "\33[91m【ERROR】\33[0m:",
  77. " '{}' dosen't work!".format(
  78. tenant["embd_id"]))
  79. factory_infos = [{
  80. "name": "OpenAI",
  81. "logo": "",
  82. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  83. "status": "1",
  84. }, {
  85. "name": "Tongyi-Qianwen",
  86. "logo": "",
  87. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  88. "status": "1",
  89. }, {
  90. "name": "ZHIPU-AI",
  91. "logo": "",
  92. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  93. "status": "1",
  94. },
  95. {
  96. "name": "Ollama",
  97. "logo": "",
  98. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  99. "status": "1",
  100. }, {
  101. "name": "Moonshot",
  102. "logo": "",
  103. "tags": "LLM,TEXT EMBEDDING",
  104. "status": "1",
  105. }, {
  106. "name": "FastEmbed",
  107. "logo": "",
  108. "tags": "TEXT EMBEDDING",
  109. "status": "1",
  110. }, {
  111. "name": "Xinference",
  112. "logo": "",
  113. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  114. "status": "1",
  115. },{
  116. "name": "Youdao",
  117. "logo": "",
  118. "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  119. "status": "1",
  120. },
  121. # {
  122. # "name": "文心一言",
  123. # "logo": "",
  124. # "tags": "LLM,TEXT EMBEDDING,SPEECH2TEXT,MODERATION",
  125. # "status": "1",
  126. # },
  127. ]
  128. def init_llm_factory():
  129. llm_infos = [
  130. # ---------------------- OpenAI ------------------------
  131. {
  132. "fid": factory_infos[0]["name"],
  133. "llm_name": "gpt-3.5-turbo",
  134. "tags": "LLM,CHAT,4K",
  135. "max_tokens": 4096,
  136. "model_type": LLMType.CHAT.value
  137. }, {
  138. "fid": factory_infos[0]["name"],
  139. "llm_name": "gpt-3.5-turbo-16k-0613",
  140. "tags": "LLM,CHAT,16k",
  141. "max_tokens": 16385,
  142. "model_type": LLMType.CHAT.value
  143. }, {
  144. "fid": factory_infos[0]["name"],
  145. "llm_name": "text-embedding-ada-002",
  146. "tags": "TEXT EMBEDDING,8K",
  147. "max_tokens": 8191,
  148. "model_type": LLMType.EMBEDDING.value
  149. }, {
  150. "fid": factory_infos[0]["name"],
  151. "llm_name": "whisper-1",
  152. "tags": "SPEECH2TEXT",
  153. "max_tokens": 25 * 1024 * 1024,
  154. "model_type": LLMType.SPEECH2TEXT.value
  155. }, {
  156. "fid": factory_infos[0]["name"],
  157. "llm_name": "gpt-4",
  158. "tags": "LLM,CHAT,8K",
  159. "max_tokens": 8191,
  160. "model_type": LLMType.CHAT.value
  161. }, {
  162. "fid": factory_infos[0]["name"],
  163. "llm_name": "gpt-4-turbo",
  164. "tags": "LLM,CHAT,8K",
  165. "max_tokens": 8191,
  166. "model_type": LLMType.CHAT.value
  167. },{
  168. "fid": factory_infos[0]["name"],
  169. "llm_name": "gpt-4-32k",
  170. "tags": "LLM,CHAT,32K",
  171. "max_tokens": 32768,
  172. "model_type": LLMType.CHAT.value
  173. }, {
  174. "fid": factory_infos[0]["name"],
  175. "llm_name": "gpt-4-vision-preview",
  176. "tags": "LLM,CHAT,IMAGE2TEXT",
  177. "max_tokens": 765,
  178. "model_type": LLMType.IMAGE2TEXT.value
  179. },
  180. # ----------------------- Qwen -----------------------
  181. {
  182. "fid": factory_infos[1]["name"],
  183. "llm_name": "qwen-turbo",
  184. "tags": "LLM,CHAT,8K",
  185. "max_tokens": 8191,
  186. "model_type": LLMType.CHAT.value
  187. }, {
  188. "fid": factory_infos[1]["name"],
  189. "llm_name": "qwen-plus",
  190. "tags": "LLM,CHAT,32K",
  191. "max_tokens": 32768,
  192. "model_type": LLMType.CHAT.value
  193. }, {
  194. "fid": factory_infos[1]["name"],
  195. "llm_name": "qwen-max-1201",
  196. "tags": "LLM,CHAT,6K",
  197. "max_tokens": 5899,
  198. "model_type": LLMType.CHAT.value
  199. }, {
  200. "fid": factory_infos[1]["name"],
  201. "llm_name": "text-embedding-v2",
  202. "tags": "TEXT EMBEDDING,2K",
  203. "max_tokens": 2048,
  204. "model_type": LLMType.EMBEDDING.value
  205. }, {
  206. "fid": factory_infos[1]["name"],
  207. "llm_name": "paraformer-realtime-8k-v1",
  208. "tags": "SPEECH2TEXT",
  209. "max_tokens": 25 * 1024 * 1024,
  210. "model_type": LLMType.SPEECH2TEXT.value
  211. }, {
  212. "fid": factory_infos[1]["name"],
  213. "llm_name": "qwen-vl-max",
  214. "tags": "LLM,CHAT,IMAGE2TEXT",
  215. "max_tokens": 765,
  216. "model_type": LLMType.IMAGE2TEXT.value
  217. },
  218. # ---------------------- ZhipuAI ----------------------
  219. {
  220. "fid": factory_infos[2]["name"],
  221. "llm_name": "glm-3-turbo",
  222. "tags": "LLM,CHAT,",
  223. "max_tokens": 128 * 1000,
  224. "model_type": LLMType.CHAT.value
  225. }, {
  226. "fid": factory_infos[2]["name"],
  227. "llm_name": "glm-4",
  228. "tags": "LLM,CHAT,",
  229. "max_tokens": 128 * 1000,
  230. "model_type": LLMType.CHAT.value
  231. }, {
  232. "fid": factory_infos[2]["name"],
  233. "llm_name": "glm-4v",
  234. "tags": "LLM,CHAT,IMAGE2TEXT",
  235. "max_tokens": 2000,
  236. "model_type": LLMType.IMAGE2TEXT.value
  237. },
  238. {
  239. "fid": factory_infos[2]["name"],
  240. "llm_name": "embedding-2",
  241. "tags": "TEXT EMBEDDING",
  242. "max_tokens": 512,
  243. "model_type": LLMType.EMBEDDING.value
  244. },
  245. # ------------------------ Moonshot -----------------------
  246. {
  247. "fid": factory_infos[4]["name"],
  248. "llm_name": "moonshot-v1-8k",
  249. "tags": "LLM,CHAT,",
  250. "max_tokens": 7900,
  251. "model_type": LLMType.CHAT.value
  252. }, {
  253. "fid": factory_infos[4]["name"],
  254. "llm_name": "moonshot-v1-32k",
  255. "tags": "LLM,CHAT,",
  256. "max_tokens": 32768,
  257. "model_type": LLMType.CHAT.value
  258. }, {
  259. "fid": factory_infos[4]["name"],
  260. "llm_name": "moonshot-v1-128k",
  261. "tags": "LLM,CHAT",
  262. "max_tokens": 128 * 1000,
  263. "model_type": LLMType.CHAT.value
  264. },
  265. # ------------------------ FastEmbed -----------------------
  266. {
  267. "fid": factory_infos[5]["name"],
  268. "llm_name": "BAAI/bge-small-en-v1.5",
  269. "tags": "TEXT EMBEDDING,",
  270. "max_tokens": 512,
  271. "model_type": LLMType.EMBEDDING.value
  272. }, {
  273. "fid": factory_infos[5]["name"],
  274. "llm_name": "BAAI/bge-small-zh-v1.5",
  275. "tags": "TEXT EMBEDDING,",
  276. "max_tokens": 512,
  277. "model_type": LLMType.EMBEDDING.value
  278. }, {
  279. }, {
  280. "fid": factory_infos[5]["name"],
  281. "llm_name": "BAAI/bge-base-en-v1.5",
  282. "tags": "TEXT EMBEDDING,",
  283. "max_tokens": 512,
  284. "model_type": LLMType.EMBEDDING.value
  285. }, {
  286. }, {
  287. "fid": factory_infos[5]["name"],
  288. "llm_name": "BAAI/bge-large-en-v1.5",
  289. "tags": "TEXT EMBEDDING,",
  290. "max_tokens": 512,
  291. "model_type": LLMType.EMBEDDING.value
  292. }, {
  293. "fid": factory_infos[5]["name"],
  294. "llm_name": "sentence-transformers/all-MiniLM-L6-v2",
  295. "tags": "TEXT EMBEDDING,",
  296. "max_tokens": 512,
  297. "model_type": LLMType.EMBEDDING.value
  298. }, {
  299. "fid": factory_infos[5]["name"],
  300. "llm_name": "nomic-ai/nomic-embed-text-v1.5",
  301. "tags": "TEXT EMBEDDING,",
  302. "max_tokens": 8192,
  303. "model_type": LLMType.EMBEDDING.value
  304. }, {
  305. "fid": factory_infos[5]["name"],
  306. "llm_name": "jinaai/jina-embeddings-v2-small-en",
  307. "tags": "TEXT EMBEDDING,",
  308. "max_tokens": 2147483648,
  309. "model_type": LLMType.EMBEDDING.value
  310. }, {
  311. "fid": factory_infos[5]["name"],
  312. "llm_name": "jinaai/jina-embeddings-v2-base-en",
  313. "tags": "TEXT EMBEDDING,",
  314. "max_tokens": 2147483648,
  315. "model_type": LLMType.EMBEDDING.value
  316. },
  317. # ------------------------ Youdao -----------------------
  318. {
  319. "fid": factory_infos[7]["name"],
  320. "llm_name": "maidalun1020/bce-embedding-base_v1",
  321. "tags": "TEXT EMBEDDING,",
  322. "max_tokens": 512,
  323. "model_type": LLMType.EMBEDDING.value
  324. },
  325. ]
  326. for info in factory_infos:
  327. try:
  328. LLMFactoriesService.save(**info)
  329. except Exception as e:
  330. pass
  331. for info in llm_infos:
  332. try:
  333. LLMService.save(**info)
  334. except Exception as e:
  335. pass
  336. LLMFactoriesService.filter_delete([LLMFactories.name == "Local"])
  337. LLMService.filter_delete([LLM.fid == "Local"])
  338. LLMService.filter_delete([LLM.fid == "Moonshot", LLM.llm_name == "flag-embedding"])
  339. TenantLLMService.filter_delete([TenantLLM.llm_factory == "Moonshot", TenantLLM.llm_name == "flag-embedding"])
  340. LLMFactoriesService.filter_update([LLMFactoriesService.model.name == "QAnything"], {"name": "Youdao"})
  341. LLMService.filter_update([LLMService.model.fid == "QAnything"], {"fid": "Youdao"})
  342. TenantLLMService.filter_update([TenantLLMService.model.llm_factory == "QAnything"], {"llm_factory": "Youdao"})
  343. """
  344. drop table llm;
  345. drop table llm_factories;
  346. update tenant set parser_ids='naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One';
  347. alter table knowledgebase modify avatar longtext;
  348. alter table user modify avatar longtext;
  349. alter table dialog modify icon longtext;
  350. """
  351. def init_web_data():
  352. start_time = time.time()
  353. init_llm_factory()
  354. if not UserService.get_all().count():
  355. init_superuser()
  356. print("init web data success:{}".format(time.time() - start_time))
  357. if __name__ == '__main__':
  358. init_web_db()
  359. init_web_data()