Du kannst nicht mehr als 25 Themen auswählen Themen müssen mit entweder einem Buchstaben oder einer Ziffer beginnen. Sie können Bindestriche („-“) enthalten und bis zu 35 Zeichen lang sein.

dataset.py 19KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. from flask import request
  17. from api.db import StatusEnum, FileSource
  18. from api.db.db_models import File
  19. from api.db.services.document_service import DocumentService
  20. from api.db.services.file2document_service import File2DocumentService
  21. from api.db.services.file_service import FileService
  22. from api.db.services.knowledgebase_service import KnowledgebaseService
  23. from api.db.services.llm_service import TenantLLMService, LLMService
  24. from api.db.services.user_service import TenantService
  25. from api import settings
  26. from api.utils import get_uuid
  27. from api.utils.api_utils import (
  28. get_result,
  29. token_required,
  30. get_error_data_result,
  31. valid,
  32. get_parser_config, valid_parser_config, dataset_readonly_fields,check_duplicate_ids
  33. )
  34. @manager.route("/datasets", methods=["POST"]) # noqa: F821
  35. @token_required
  36. def create(tenant_id):
  37. """
  38. Create a new dataset.
  39. ---
  40. tags:
  41. - Datasets
  42. security:
  43. - ApiKeyAuth: []
  44. parameters:
  45. - in: header
  46. name: Authorization
  47. type: string
  48. required: true
  49. description: Bearer token for authentication.
  50. - in: body
  51. name: body
  52. description: Dataset creation parameters.
  53. required: true
  54. schema:
  55. type: object
  56. required:
  57. - name
  58. properties:
  59. name:
  60. type: string
  61. description: Name of the dataset.
  62. permission:
  63. type: string
  64. enum: ['me', 'team']
  65. description: Dataset permission.
  66. chunk_method:
  67. type: string
  68. enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
  69. "presentation", "picture", "one", "knowledge_graph", "email", "tag"
  70. ]
  71. description: Chunking method.
  72. parser_config:
  73. type: object
  74. description: Parser configuration.
  75. responses:
  76. 200:
  77. description: Successful operation.
  78. schema:
  79. type: object
  80. properties:
  81. data:
  82. type: object
  83. """
  84. req = request.json
  85. for k in req.keys():
  86. if dataset_readonly_fields(k):
  87. return get_result(code=settings.RetCode.ARGUMENT_ERROR, message=f"'{k}' is readonly.")
  88. e, t = TenantService.get_by_id(tenant_id)
  89. permission = req.get("permission")
  90. chunk_method = req.get("chunk_method")
  91. parser_config = req.get("parser_config")
  92. valid_parser_config(parser_config)
  93. valid_permission = ["me", "team"]
  94. valid_chunk_method = [
  95. "naive",
  96. "manual",
  97. "qa",
  98. "table",
  99. "paper",
  100. "book",
  101. "laws",
  102. "presentation",
  103. "picture",
  104. "one",
  105. "knowledge_graph",
  106. "email",
  107. "tag"
  108. ]
  109. check_validation = valid(
  110. permission,
  111. valid_permission,
  112. chunk_method,
  113. valid_chunk_method,
  114. )
  115. if check_validation:
  116. return check_validation
  117. req["parser_config"] = get_parser_config(chunk_method, parser_config)
  118. if "tenant_id" in req:
  119. return get_error_data_result(message="`tenant_id` must not be provided")
  120. if "chunk_count" in req or "document_count" in req:
  121. return get_error_data_result(
  122. message="`chunk_count` or `document_count` must not be provided"
  123. )
  124. if "name" not in req:
  125. return get_error_data_result(message="`name` is not empty!")
  126. req["id"] = get_uuid()
  127. req["name"] = req["name"].strip()
  128. if req["name"] == "":
  129. return get_error_data_result(message="`name` is not empty string!")
  130. if len(req["name"]) >= 128:
  131. return get_error_data_result(
  132. message="Dataset name should not be longer than 128 characters."
  133. )
  134. if KnowledgebaseService.query(
  135. name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value
  136. ):
  137. return get_error_data_result(
  138. message="Duplicated dataset name in creating dataset."
  139. )
  140. req["tenant_id"] = tenant_id
  141. req["created_by"] = tenant_id
  142. if not req.get("embedding_model"):
  143. req["embedding_model"] = t.embd_id
  144. else:
  145. valid_embedding_models = [
  146. "BAAI/bge-large-zh-v1.5",
  147. "maidalun1020/bce-embedding-base_v1",
  148. ]
  149. embd_model = LLMService.query(
  150. llm_name=req["embedding_model"], model_type="embedding"
  151. )
  152. if embd_model:
  153. if req["embedding_model"] not in valid_embedding_models and not TenantLLMService.query(tenant_id=tenant_id,model_type="embedding",llm_name=req.get("embedding_model"),):
  154. return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
  155. if not embd_model:
  156. embd_model=TenantLLMService.query(tenant_id=tenant_id,model_type="embedding", llm_name=req.get("embedding_model"))
  157. if not embd_model:
  158. return get_error_data_result(
  159. f"`embedding_model` {req.get('embedding_model')} doesn't exist"
  160. )
  161. key_mapping = {
  162. "chunk_num": "chunk_count",
  163. "doc_num": "document_count",
  164. "parser_id": "chunk_method",
  165. "embd_id": "embedding_model",
  166. }
  167. mapped_keys = {
  168. new_key: req[old_key]
  169. for new_key, old_key in key_mapping.items()
  170. if old_key in req
  171. }
  172. req.update(mapped_keys)
  173. flds = list(req.keys())
  174. for f in flds:
  175. if req[f] == "" and f in ["permission", "parser_id", "chunk_method"]:
  176. del req[f]
  177. if not KnowledgebaseService.save(**req):
  178. return get_error_data_result(message="Create dataset error.(Database error)")
  179. renamed_data = {}
  180. e, k = KnowledgebaseService.get_by_id(req["id"])
  181. for key, value in k.to_dict().items():
  182. new_key = key_mapping.get(key, key)
  183. renamed_data[new_key] = value
  184. return get_result(data=renamed_data)
  185. @manager.route("/datasets", methods=["DELETE"]) # noqa: F821
  186. @token_required
  187. def delete(tenant_id):
  188. """
  189. Delete datasets.
  190. ---
  191. tags:
  192. - Datasets
  193. security:
  194. - ApiKeyAuth: []
  195. parameters:
  196. - in: header
  197. name: Authorization
  198. type: string
  199. required: true
  200. description: Bearer token for authentication.
  201. - in: body
  202. name: body
  203. description: Dataset deletion parameters.
  204. required: true
  205. schema:
  206. type: object
  207. properties:
  208. ids:
  209. type: array
  210. items:
  211. type: string
  212. description: List of dataset IDs to delete.
  213. responses:
  214. 200:
  215. description: Successful operation.
  216. schema:
  217. type: object
  218. """
  219. errors = []
  220. success_count = 0
  221. req = request.json
  222. if not req:
  223. ids = None
  224. else:
  225. ids = req.get("ids")
  226. if not ids:
  227. id_list = []
  228. kbs = KnowledgebaseService.query(tenant_id=tenant_id)
  229. for kb in kbs:
  230. id_list.append(kb.id)
  231. else:
  232. id_list = ids
  233. unique_id_list, duplicate_messages = check_duplicate_ids(id_list, "dataset")
  234. id_list = unique_id_list
  235. for id in id_list:
  236. kbs = KnowledgebaseService.query(id=id, tenant_id=tenant_id)
  237. if not kbs:
  238. errors.append(f"You don't own the dataset {id}")
  239. continue
  240. for doc in DocumentService.query(kb_id=id):
  241. if not DocumentService.remove_document(doc, tenant_id):
  242. errors.append(f"Remove document error for dataset {id}")
  243. continue
  244. f2d = File2DocumentService.get_by_document_id(doc.id)
  245. FileService.filter_delete(
  246. [
  247. File.source_type == FileSource.KNOWLEDGEBASE,
  248. File.id == f2d[0].file_id,
  249. ]
  250. )
  251. File2DocumentService.delete_by_document_id(doc.id)
  252. FileService.filter_delete(
  253. [File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kbs[0].name])
  254. if not KnowledgebaseService.delete_by_id(id):
  255. errors.append(f"Delete dataset error for {id}")
  256. continue
  257. success_count += 1
  258. if errors:
  259. if success_count > 0:
  260. return get_result(
  261. data={"success_count": success_count, "errors": errors},
  262. message=f"Partially deleted {success_count} datasets with {len(errors)} errors"
  263. )
  264. else:
  265. return get_error_data_result(message="; ".join(errors))
  266. if duplicate_messages:
  267. if success_count > 0:
  268. return get_result(message=f"Partially deleted {success_count} datasets with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages},)
  269. else:
  270. return get_error_data_result(message=";".join(duplicate_messages))
  271. return get_result(code=settings.RetCode.SUCCESS)
  272. @manager.route("/datasets/<dataset_id>", methods=["PUT"]) # noqa: F821
  273. @token_required
  274. def update(tenant_id, dataset_id):
  275. """
  276. Update a dataset.
  277. ---
  278. tags:
  279. - Datasets
  280. security:
  281. - ApiKeyAuth: []
  282. parameters:
  283. - in: path
  284. name: dataset_id
  285. type: string
  286. required: true
  287. description: ID of the dataset to update.
  288. - in: header
  289. name: Authorization
  290. type: string
  291. required: true
  292. description: Bearer token for authentication.
  293. - in: body
  294. name: body
  295. description: Dataset update parameters.
  296. required: true
  297. schema:
  298. type: object
  299. properties:
  300. name:
  301. type: string
  302. description: New name of the dataset.
  303. permission:
  304. type: string
  305. enum: ['me', 'team']
  306. description: Updated permission.
  307. chunk_method:
  308. type: string
  309. enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
  310. "presentation", "picture", "one", "knowledge_graph", "email", "tag"
  311. ]
  312. description: Updated chunking method.
  313. parser_config:
  314. type: object
  315. description: Updated parser configuration.
  316. responses:
  317. 200:
  318. description: Successful operation.
  319. schema:
  320. type: object
  321. """
  322. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  323. return get_error_data_result(message="You don't own the dataset")
  324. req = request.json
  325. for k in req.keys():
  326. if dataset_readonly_fields(k):
  327. return get_result(code=settings.RetCode.ARGUMENT_ERROR, message=f"'{k}' is readonly.")
  328. e, t = TenantService.get_by_id(tenant_id)
  329. invalid_keys = {"id", "embd_id", "chunk_num", "doc_num", "parser_id", "create_date", "create_time", "created_by", "status","token_num","update_date","update_time"}
  330. if any(key in req for key in invalid_keys):
  331. return get_error_data_result(message="The input parameters are invalid.")
  332. permission = req.get("permission")
  333. chunk_method = req.get("chunk_method")
  334. parser_config = req.get("parser_config")
  335. valid_parser_config(parser_config)
  336. valid_permission = ["me", "team"]
  337. valid_chunk_method = [
  338. "naive",
  339. "manual",
  340. "qa",
  341. "table",
  342. "paper",
  343. "book",
  344. "laws",
  345. "presentation",
  346. "picture",
  347. "one",
  348. "knowledge_graph",
  349. "email",
  350. "tag"
  351. ]
  352. check_validation = valid(
  353. permission,
  354. valid_permission,
  355. chunk_method,
  356. valid_chunk_method,
  357. )
  358. if check_validation:
  359. return check_validation
  360. if "tenant_id" in req:
  361. if req["tenant_id"] != tenant_id:
  362. return get_error_data_result(message="Can't change `tenant_id`.")
  363. e, kb = KnowledgebaseService.get_by_id(dataset_id)
  364. if "parser_config" in req:
  365. temp_dict = kb.parser_config
  366. temp_dict.update(req["parser_config"])
  367. req["parser_config"] = temp_dict
  368. if "chunk_count" in req:
  369. if req["chunk_count"] != kb.chunk_num:
  370. return get_error_data_result(message="Can't change `chunk_count`.")
  371. req.pop("chunk_count")
  372. if "document_count" in req:
  373. if req["document_count"] != kb.doc_num:
  374. return get_error_data_result(message="Can't change `document_count`.")
  375. req.pop("document_count")
  376. if req.get("chunk_method"):
  377. if kb.chunk_num != 0 and req["chunk_method"] != kb.parser_id:
  378. return get_error_data_result(
  379. message="If `chunk_count` is not 0, `chunk_method` is not changeable."
  380. )
  381. req["parser_id"] = req.pop("chunk_method")
  382. if req["parser_id"] != kb.parser_id:
  383. if not req.get("parser_config"):
  384. req["parser_config"] = get_parser_config(chunk_method, parser_config)
  385. if "embedding_model" in req:
  386. if kb.chunk_num != 0 and req["embedding_model"] != kb.embd_id:
  387. return get_error_data_result(
  388. message="If `chunk_count` is not 0, `embedding_model` is not changeable."
  389. )
  390. if not req.get("embedding_model"):
  391. return get_error_data_result("`embedding_model` can't be empty")
  392. valid_embedding_models = [
  393. "BAAI/bge-large-zh-v1.5",
  394. "BAAI/bge-base-en-v1.5",
  395. "BAAI/bge-large-en-v1.5",
  396. "BAAI/bge-small-en-v1.5",
  397. "BAAI/bge-small-zh-v1.5",
  398. "jinaai/jina-embeddings-v2-base-en",
  399. "jinaai/jina-embeddings-v2-small-en",
  400. "nomic-ai/nomic-embed-text-v1.5",
  401. "sentence-transformers/all-MiniLM-L6-v2",
  402. "text-embedding-v2",
  403. "text-embedding-v3",
  404. "maidalun1020/bce-embedding-base_v1",
  405. ]
  406. embd_model = LLMService.query(
  407. llm_name=req["embedding_model"], model_type="embedding"
  408. )
  409. if embd_model:
  410. if req["embedding_model"] not in valid_embedding_models and not TenantLLMService.query(tenant_id=tenant_id,model_type="embedding",llm_name=req.get("embedding_model"),):
  411. return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
  412. if not embd_model:
  413. embd_model=TenantLLMService.query(tenant_id=tenant_id,model_type="embedding", llm_name=req.get("embedding_model"))
  414. if not embd_model:
  415. return get_error_data_result(
  416. f"`embedding_model` {req.get('embedding_model')} doesn't exist"
  417. )
  418. req["embd_id"] = req.pop("embedding_model")
  419. if "name" in req:
  420. req["name"] = req["name"].strip()
  421. if len(req["name"]) >= 128:
  422. return get_error_data_result(
  423. message="Dataset name should not be longer than 128 characters."
  424. )
  425. if (
  426. req["name"].lower() != kb.name.lower()
  427. and len(
  428. KnowledgebaseService.query(
  429. name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value
  430. )
  431. )
  432. > 0
  433. ):
  434. return get_error_data_result(
  435. message="Duplicated dataset name in updating dataset."
  436. )
  437. flds = list(req.keys())
  438. for f in flds:
  439. if req[f] == "" and f in ["permission", "parser_id", "chunk_method"]:
  440. del req[f]
  441. if not KnowledgebaseService.update_by_id(kb.id, req):
  442. return get_error_data_result(message="Update dataset error.(Database error)")
  443. return get_result(code=settings.RetCode.SUCCESS)
  444. @manager.route("/datasets", methods=["GET"]) # noqa: F821
  445. @token_required
  446. def list_datasets(tenant_id):
  447. """
  448. List datasets.
  449. ---
  450. tags:
  451. - Datasets
  452. security:
  453. - ApiKeyAuth: []
  454. parameters:
  455. - in: query
  456. name: id
  457. type: string
  458. required: false
  459. description: Dataset ID to filter.
  460. - in: query
  461. name: name
  462. type: string
  463. required: false
  464. description: Dataset name to filter.
  465. - in: query
  466. name: page
  467. type: integer
  468. required: false
  469. default: 1
  470. description: Page number.
  471. - in: query
  472. name: page_size
  473. type: integer
  474. required: false
  475. default: 1024
  476. description: Number of items per page.
  477. - in: query
  478. name: orderby
  479. type: string
  480. required: false
  481. default: "create_time"
  482. description: Field to order by.
  483. - in: query
  484. name: desc
  485. type: boolean
  486. required: false
  487. default: true
  488. description: Order in descending.
  489. - in: header
  490. name: Authorization
  491. type: string
  492. required: true
  493. description: Bearer token for authentication.
  494. responses:
  495. 200:
  496. description: Successful operation.
  497. schema:
  498. type: array
  499. items:
  500. type: object
  501. """
  502. id = request.args.get("id")
  503. name = request.args.get("name")
  504. if id:
  505. kbs = KnowledgebaseService.get_kb_by_id(id,tenant_id)
  506. if not kbs:
  507. return get_error_data_result(f"You don't own the dataset {id}")
  508. if name:
  509. kbs = KnowledgebaseService.get_kb_by_name(name,tenant_id)
  510. if not kbs:
  511. return get_error_data_result(f"You don't own the dataset {name}")
  512. page_number = int(request.args.get("page", 1))
  513. items_per_page = int(request.args.get("page_size", 30))
  514. orderby = request.args.get("orderby", "create_time")
  515. if request.args.get("desc", "false").lower() not in ["true", "false"]:
  516. return get_error_data_result("desc should be true or false")
  517. if request.args.get("desc", "true").lower() == "false":
  518. desc = False
  519. else:
  520. desc = True
  521. tenants = TenantService.get_joined_tenants_by_user_id(tenant_id)
  522. kbs = KnowledgebaseService.get_list(
  523. [m["tenant_id"] for m in tenants],
  524. tenant_id,
  525. page_number,
  526. items_per_page,
  527. orderby,
  528. desc,
  529. id,
  530. name,
  531. )
  532. renamed_list = []
  533. for kb in kbs:
  534. key_mapping = {
  535. "chunk_num": "chunk_count",
  536. "doc_num": "document_count",
  537. "parser_id": "chunk_method",
  538. "embd_id": "embedding_model",
  539. }
  540. renamed_data = {}
  541. for key, value in kb.items():
  542. new_key = key_mapping.get(key, key)
  543. renamed_data[new_key] = value
  544. renamed_list.append(renamed_data)
  545. return get_result(data=renamed_list)