Você não pode selecionar mais de 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

dataset.py 17KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. from flask import request
  17. from api.db import StatusEnum, FileSource
  18. from api.db.db_models import File
  19. from api.db.services.document_service import DocumentService
  20. from api.db.services.file2document_service import File2DocumentService
  21. from api.db.services.file_service import FileService
  22. from api.db.services.knowledgebase_service import KnowledgebaseService
  23. from api.db.services.llm_service import TenantLLMService, LLMService
  24. from api.db.services.user_service import TenantService
  25. from api import settings
  26. from api.utils import get_uuid
  27. from api.utils.api_utils import (
  28. get_result,
  29. token_required,
  30. get_error_data_result,
  31. valid,
  32. get_parser_config,
  33. )
  34. @manager.route("/datasets", methods=["POST"])
  35. @token_required
  36. def create(tenant_id):
  37. """
  38. Create a new dataset.
  39. ---
  40. tags:
  41. - Datasets
  42. security:
  43. - ApiKeyAuth: []
  44. parameters:
  45. - in: header
  46. name: Authorization
  47. type: string
  48. required: true
  49. description: Bearer token for authentication.
  50. - in: body
  51. name: body
  52. description: Dataset creation parameters.
  53. required: true
  54. schema:
  55. type: object
  56. required:
  57. - name
  58. properties:
  59. name:
  60. type: string
  61. description: Name of the dataset.
  62. permission:
  63. type: string
  64. enum: ['me', 'team']
  65. description: Dataset permission.
  66. language:
  67. type: string
  68. enum: ['Chinese', 'English']
  69. description: Language of the dataset.
  70. chunk_method:
  71. type: string
  72. enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
  73. "presentation", "picture", "one", "knowledge_graph", "email"]
  74. description: Chunking method.
  75. parser_config:
  76. type: object
  77. description: Parser configuration.
  78. responses:
  79. 200:
  80. description: Successful operation.
  81. schema:
  82. type: object
  83. properties:
  84. data:
  85. type: object
  86. """
  87. req = request.json
  88. e, t = TenantService.get_by_id(tenant_id)
  89. permission = req.get("permission")
  90. language = req.get("language")
  91. chunk_method = req.get("chunk_method")
  92. parser_config = req.get("parser_config")
  93. valid_permission = ["me", "team"]
  94. valid_language = ["Chinese", "English"]
  95. valid_chunk_method = [
  96. "naive",
  97. "manual",
  98. "qa",
  99. "table",
  100. "paper",
  101. "book",
  102. "laws",
  103. "presentation",
  104. "picture",
  105. "one",
  106. "knowledge_graph",
  107. "email",
  108. ]
  109. check_validation = valid(
  110. permission,
  111. valid_permission,
  112. language,
  113. valid_language,
  114. chunk_method,
  115. valid_chunk_method,
  116. )
  117. if check_validation:
  118. return check_validation
  119. req["parser_config"] = get_parser_config(chunk_method, parser_config)
  120. if "tenant_id" in req:
  121. return get_error_data_result(message="`tenant_id` must not be provided")
  122. if "chunk_count" in req or "document_count" in req:
  123. return get_error_data_result(
  124. message="`chunk_count` or `document_count` must not be provided"
  125. )
  126. if "name" not in req:
  127. return get_error_data_result(message="`name` is not empty!")
  128. req["id"] = get_uuid()
  129. req["name"] = req["name"].strip()
  130. if req["name"] == "":
  131. return get_error_data_result(message="`name` is not empty string!")
  132. if KnowledgebaseService.query(
  133. name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value
  134. ):
  135. return get_error_data_result(
  136. message="Duplicated dataset name in creating dataset."
  137. )
  138. req["tenant_id"] = req["created_by"] = tenant_id
  139. if not req.get("embedding_model"):
  140. req["embedding_model"] = t.embd_id
  141. else:
  142. valid_embedding_models = [
  143. "BAAI/bge-large-zh-v1.5",
  144. "BAAI/bge-base-en-v1.5",
  145. "BAAI/bge-large-en-v1.5",
  146. "BAAI/bge-small-en-v1.5",
  147. "BAAI/bge-small-zh-v1.5",
  148. "jinaai/jina-embeddings-v2-base-en",
  149. "jinaai/jina-embeddings-v2-small-en",
  150. "nomic-ai/nomic-embed-text-v1.5",
  151. "sentence-transformers/all-MiniLM-L6-v2",
  152. "text-embedding-v2",
  153. "text-embedding-v3",
  154. "maidalun1020/bce-embedding-base_v1",
  155. ]
  156. embd_model = LLMService.query(
  157. llm_name=req["embedding_model"], model_type="embedding"
  158. )
  159. if embd_model:
  160. if req["embedding_model"] not in valid_embedding_models and not TenantLLMService.query(tenant_id=tenant_id,model_type="embedding",llm_name=req.get("embedding_model"),):
  161. return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
  162. if not embd_model:
  163. embd_model=TenantLLMService.query(tenant_id=tenant_id,model_type="embedding", llm_name=req.get("embedding_model"))
  164. if not embd_model:
  165. return get_error_data_result(
  166. f"`embedding_model` {req.get('embedding_model')} doesn't exist"
  167. )
  168. key_mapping = {
  169. "chunk_num": "chunk_count",
  170. "doc_num": "document_count",
  171. "parser_id": "chunk_method",
  172. "embd_id": "embedding_model",
  173. }
  174. mapped_keys = {
  175. new_key: req[old_key]
  176. for new_key, old_key in key_mapping.items()
  177. if old_key in req
  178. }
  179. req.update(mapped_keys)
  180. if not KnowledgebaseService.save(**req):
  181. return get_error_data_result(message="Create dataset error.(Database error)")
  182. renamed_data = {}
  183. e, k = KnowledgebaseService.get_by_id(req["id"])
  184. for key, value in k.to_dict().items():
  185. new_key = key_mapping.get(key, key)
  186. renamed_data[new_key] = value
  187. return get_result(data=renamed_data)
  188. @manager.route("/datasets", methods=["DELETE"])
  189. @token_required
  190. def delete(tenant_id):
  191. """
  192. Delete datasets.
  193. ---
  194. tags:
  195. - Datasets
  196. security:
  197. - ApiKeyAuth: []
  198. parameters:
  199. - in: header
  200. name: Authorization
  201. type: string
  202. required: true
  203. description: Bearer token for authentication.
  204. - in: body
  205. name: body
  206. description: Dataset deletion parameters.
  207. required: true
  208. schema:
  209. type: object
  210. properties:
  211. ids:
  212. type: array
  213. items:
  214. type: string
  215. description: List of dataset IDs to delete.
  216. responses:
  217. 200:
  218. description: Successful operation.
  219. schema:
  220. type: object
  221. """
  222. req = request.json
  223. if not req:
  224. ids = None
  225. else:
  226. ids = req.get("ids")
  227. if not ids:
  228. id_list = []
  229. kbs = KnowledgebaseService.query(tenant_id=tenant_id)
  230. for kb in kbs:
  231. id_list.append(kb.id)
  232. else:
  233. id_list = ids
  234. for id in id_list:
  235. kbs = KnowledgebaseService.query(id=id, tenant_id=tenant_id)
  236. if not kbs:
  237. return get_error_data_result(message=f"You don't own the dataset {id}")
  238. for doc in DocumentService.query(kb_id=id):
  239. if not DocumentService.remove_document(doc, tenant_id):
  240. return get_error_data_result(
  241. message="Remove document error.(Database error)"
  242. )
  243. f2d = File2DocumentService.get_by_document_id(doc.id)
  244. FileService.filter_delete(
  245. [
  246. File.source_type == FileSource.KNOWLEDGEBASE,
  247. File.id == f2d[0].file_id,
  248. ]
  249. )
  250. File2DocumentService.delete_by_document_id(doc.id)
  251. if not KnowledgebaseService.delete_by_id(id):
  252. return get_error_data_result(message="Delete dataset error.(Database error)")
  253. return get_result(code=settings.RetCode.SUCCESS)
  254. @manager.route("/datasets/<dataset_id>", methods=["PUT"])
  255. @token_required
  256. def update(tenant_id, dataset_id):
  257. """
  258. Update a dataset.
  259. ---
  260. tags:
  261. - Datasets
  262. security:
  263. - ApiKeyAuth: []
  264. parameters:
  265. - in: path
  266. name: dataset_id
  267. type: string
  268. required: true
  269. description: ID of the dataset to update.
  270. - in: header
  271. name: Authorization
  272. type: string
  273. required: true
  274. description: Bearer token for authentication.
  275. - in: body
  276. name: body
  277. description: Dataset update parameters.
  278. required: true
  279. schema:
  280. type: object
  281. properties:
  282. name:
  283. type: string
  284. description: New name of the dataset.
  285. permission:
  286. type: string
  287. enum: ['me', 'team']
  288. description: Updated permission.
  289. language:
  290. type: string
  291. enum: ['Chinese', 'English']
  292. description: Updated language.
  293. chunk_method:
  294. type: string
  295. enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
  296. "presentation", "picture", "one", "knowledge_graph", "email"]
  297. description: Updated chunking method.
  298. parser_config:
  299. type: object
  300. description: Updated parser configuration.
  301. responses:
  302. 200:
  303. description: Successful operation.
  304. schema:
  305. type: object
  306. """
  307. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  308. return get_error_data_result(message="You don't own the dataset")
  309. req = request.json
  310. e, t = TenantService.get_by_id(tenant_id)
  311. invalid_keys = {"id", "embd_id", "chunk_num", "doc_num", "parser_id"}
  312. if any(key in req for key in invalid_keys):
  313. return get_error_data_result(message="The input parameters are invalid.")
  314. permission = req.get("permission")
  315. language = req.get("language")
  316. chunk_method = req.get("chunk_method")
  317. parser_config = req.get("parser_config")
  318. valid_permission = ["me", "team"]
  319. valid_language = ["Chinese", "English"]
  320. valid_chunk_method = [
  321. "naive",
  322. "manual",
  323. "qa",
  324. "table",
  325. "paper",
  326. "book",
  327. "laws",
  328. "presentation",
  329. "picture",
  330. "one",
  331. "knowledge_graph",
  332. "email",
  333. ]
  334. check_validation = valid(
  335. permission,
  336. valid_permission,
  337. language,
  338. valid_language,
  339. chunk_method,
  340. valid_chunk_method,
  341. )
  342. if check_validation:
  343. return check_validation
  344. if "tenant_id" in req:
  345. if req["tenant_id"] != tenant_id:
  346. return get_error_data_result(message="Can't change `tenant_id`.")
  347. e, kb = KnowledgebaseService.get_by_id(dataset_id)
  348. if "parser_config" in req:
  349. temp_dict = kb.parser_config
  350. temp_dict.update(req["parser_config"])
  351. req["parser_config"] = temp_dict
  352. if "chunk_count" in req:
  353. if req["chunk_count"] != kb.chunk_num:
  354. return get_error_data_result(message="Can't change `chunk_count`.")
  355. req.pop("chunk_count")
  356. if "document_count" in req:
  357. if req["document_count"] != kb.doc_num:
  358. return get_error_data_result(message="Can't change `document_count`.")
  359. req.pop("document_count")
  360. if "chunk_method" in req:
  361. if kb.chunk_num != 0 and req["chunk_method"] != kb.parser_id:
  362. return get_error_data_result(
  363. message="If `chunk_count` is not 0, `chunk_method` is not changeable."
  364. )
  365. req["parser_id"] = req.pop("chunk_method")
  366. if req["parser_id"] != kb.parser_id:
  367. if not req.get("parser_config"):
  368. req["parser_config"] = get_parser_config(chunk_method, parser_config)
  369. if "embedding_model" in req:
  370. if kb.chunk_num != 0 and req["embedding_model"] != kb.embd_id:
  371. return get_error_data_result(
  372. message="If `chunk_count` is not 0, `embedding_model` is not changeable."
  373. )
  374. if not req.get("embedding_model"):
  375. return get_error_data_result("`embedding_model` can't be empty")
  376. valid_embedding_models = [
  377. "BAAI/bge-large-zh-v1.5",
  378. "BAAI/bge-base-en-v1.5",
  379. "BAAI/bge-large-en-v1.5",
  380. "BAAI/bge-small-en-v1.5",
  381. "BAAI/bge-small-zh-v1.5",
  382. "jinaai/jina-embeddings-v2-base-en",
  383. "jinaai/jina-embeddings-v2-small-en",
  384. "nomic-ai/nomic-embed-text-v1.5",
  385. "sentence-transformers/all-MiniLM-L6-v2",
  386. "text-embedding-v2",
  387. "text-embedding-v3",
  388. "maidalun1020/bce-embedding-base_v1",
  389. ]
  390. embd_model = LLMService.query(
  391. llm_name=req["embedding_model"], model_type="embedding"
  392. )
  393. if embd_model:
  394. if req["embedding_model"] not in valid_embedding_models and not TenantLLMService.query(tenant_id=tenant_id,model_type="embedding",llm_name=req.get("embedding_model"),):
  395. return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
  396. if not embd_model:
  397. embd_model=TenantLLMService.query(tenant_id=tenant_id,model_type="embedding", llm_name=req.get("embedding_model"))
  398. if not embd_model:
  399. return get_error_data_result(
  400. f"`embedding_model` {req.get('embedding_model')} doesn't exist"
  401. )
  402. req["embd_id"] = req.pop("embedding_model")
  403. if "name" in req:
  404. req["name"] = req["name"].strip()
  405. if (
  406. req["name"].lower() != kb.name.lower()
  407. and len(
  408. KnowledgebaseService.query(
  409. name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value
  410. )
  411. )
  412. > 0
  413. ):
  414. return get_error_data_result(
  415. message="Duplicated dataset name in updating dataset."
  416. )
  417. if not KnowledgebaseService.update_by_id(kb.id, req):
  418. return get_error_data_result(message="Update dataset error.(Database error)")
  419. return get_result(code=settings.RetCode.SUCCESS)
  420. @manager.route("/datasets", methods=["GET"])
  421. @token_required
  422. def list(tenant_id):
  423. """
  424. List datasets.
  425. ---
  426. tags:
  427. - Datasets
  428. security:
  429. - ApiKeyAuth: []
  430. parameters:
  431. - in: query
  432. name: id
  433. type: string
  434. required: false
  435. description: Dataset ID to filter.
  436. - in: query
  437. name: name
  438. type: string
  439. required: false
  440. description: Dataset name to filter.
  441. - in: query
  442. name: page
  443. type: integer
  444. required: false
  445. default: 1
  446. description: Page number.
  447. - in: query
  448. name: page_size
  449. type: integer
  450. required: false
  451. default: 1024
  452. description: Number of items per page.
  453. - in: query
  454. name: orderby
  455. type: string
  456. required: false
  457. default: "create_time"
  458. description: Field to order by.
  459. - in: query
  460. name: desc
  461. type: boolean
  462. required: false
  463. default: true
  464. description: Order in descending.
  465. - in: header
  466. name: Authorization
  467. type: string
  468. required: true
  469. description: Bearer token for authentication.
  470. responses:
  471. 200:
  472. description: Successful operation.
  473. schema:
  474. type: array
  475. items:
  476. type: object
  477. """
  478. id = request.args.get("id")
  479. name = request.args.get("name")
  480. if id:
  481. kbs = KnowledgebaseService.get_kb_by_id(id,tenant_id)
  482. if not kbs:
  483. return get_error_data_result(f"You don't own the dataset {id}")
  484. if name:
  485. kbs = KnowledgebaseService.get_kb_by_name(name,tenant_id)
  486. if not kbs:
  487. return get_error_data_result(f"You don't own the dataset {name}")
  488. page_number = int(request.args.get("page", 1))
  489. items_per_page = int(request.args.get("page_size", 30))
  490. orderby = request.args.get("orderby", "create_time")
  491. if request.args.get("desc") == "False" or request.args.get("desc") == "false":
  492. desc = False
  493. else:
  494. desc = True
  495. tenants = TenantService.get_joined_tenants_by_user_id(tenant_id)
  496. kbs = KnowledgebaseService.get_list(
  497. [m["tenant_id"] for m in tenants],
  498. tenant_id,
  499. page_number,
  500. items_per_page,
  501. orderby,
  502. desc,
  503. id,
  504. name,
  505. )
  506. renamed_list = []
  507. for kb in kbs:
  508. key_mapping = {
  509. "chunk_num": "chunk_count",
  510. "doc_num": "document_count",
  511. "parser_id": "chunk_method",
  512. "embd_id": "embedding_model",
  513. }
  514. renamed_data = {}
  515. for key, value in kb.items():
  516. new_key = key_mapping.get(key, key)
  517. renamed_data[new_key] = value
  518. renamed_list.append(renamed_data)
  519. return get_result(data=renamed_list)