選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。

dataset.py 18KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import logging
  17. from flask import request
  18. from peewee import OperationalError
  19. from api import settings
  20. from api.db import FileSource, StatusEnum
  21. from api.db.db_models import File
  22. from api.db.services.document_service import DocumentService
  23. from api.db.services.file2document_service import File2DocumentService
  24. from api.db.services.file_service import FileService
  25. from api.db.services.knowledgebase_service import KnowledgebaseService
  26. from api.db.services.llm_service import LLMService, TenantLLMService
  27. from api.db.services.user_service import TenantService
  28. from api.utils import get_uuid
  29. from api.utils.api_utils import (
  30. check_duplicate_ids,
  31. dataset_readonly_fields,
  32. get_error_argument_result,
  33. get_error_data_result,
  34. get_parser_config,
  35. get_result,
  36. token_required,
  37. valid,
  38. valid_parser_config,
  39. verify_embedding_availability,
  40. )
  41. from api.utils.validation_utils import CreateDatasetReq, validate_and_parse_json_request
  42. @manager.route("/datasets", methods=["POST"]) # noqa: F821
  43. @token_required
  44. def create(tenant_id):
  45. """
  46. Create a new dataset.
  47. ---
  48. tags:
  49. - Datasets
  50. security:
  51. - ApiKeyAuth: []
  52. parameters:
  53. - in: header
  54. name: Authorization
  55. type: string
  56. required: true
  57. description: Bearer token for authentication.
  58. - in: body
  59. name: body
  60. description: Dataset creation parameters.
  61. required: true
  62. schema:
  63. type: object
  64. required:
  65. - name
  66. properties:
  67. name:
  68. type: string
  69. description: Name of the dataset.
  70. avatar:
  71. type: string
  72. description: Base64 encoding of the avatar.
  73. description:
  74. type: string
  75. description: Description of the dataset.
  76. embedding_model:
  77. type: string
  78. description: Embedding model Name.
  79. permission:
  80. type: string
  81. enum: ['me', 'team']
  82. description: Dataset permission.
  83. chunk_method:
  84. type: string
  85. enum: ["naive", "book", "email", "laws", "manual", "one", "paper",
  86. "picture", "presentation", "qa", "table", "tag"
  87. ]
  88. description: Chunking method.
  89. pagerank:
  90. type: integer
  91. description: Set page rank.
  92. parser_config:
  93. type: object
  94. description: Parser configuration.
  95. responses:
  96. 200:
  97. description: Successful operation.
  98. schema:
  99. type: object
  100. properties:
  101. data:
  102. type: object
  103. """
  104. # Field name transformations during model dump:
  105. # | Original | Dump Output |
  106. # |----------------|-------------|
  107. # | embedding_model| embd_id |
  108. # | chunk_method | parser_id |
  109. req, err = validate_and_parse_json_request(request, CreateDatasetReq)
  110. if err is not None:
  111. return get_error_argument_result(err)
  112. try:
  113. if KnowledgebaseService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
  114. return get_error_argument_result(message=f"Dataset name '{req['name']}' already exists")
  115. except OperationalError as e:
  116. logging.exception(e)
  117. return get_error_data_result(message="Database operation failed")
  118. req["parser_config"] = get_parser_config(req["parser_id"], req["parser_config"])
  119. req["id"] = get_uuid()
  120. req["tenant_id"] = tenant_id
  121. req["created_by"] = tenant_id
  122. try:
  123. ok, t = TenantService.get_by_id(tenant_id)
  124. if not ok:
  125. return get_error_data_result(message="Tenant not found")
  126. except OperationalError as e:
  127. logging.exception(e)
  128. return get_error_data_result(message="Database operation failed")
  129. if not req.get("embd_id"):
  130. req["embd_id"] = t.embd_id
  131. else:
  132. ok, err = verify_embedding_availability(req["embd_id"], tenant_id)
  133. if not ok:
  134. return err
  135. try:
  136. if not KnowledgebaseService.save(**req):
  137. return get_error_data_result(message="Database operation failed")
  138. except OperationalError as e:
  139. logging.exception(e)
  140. return get_error_data_result(message="Database operation failed")
  141. try:
  142. ok, k = KnowledgebaseService.get_by_id(req["id"])
  143. if not ok:
  144. return get_error_data_result(message="Dataset created failed")
  145. except OperationalError as e:
  146. logging.exception(e)
  147. return get_error_data_result(message="Database operation failed")
  148. response_data = {}
  149. key_mapping = {
  150. "chunk_num": "chunk_count",
  151. "doc_num": "document_count",
  152. "parser_id": "chunk_method",
  153. "embd_id": "embedding_model",
  154. }
  155. for key, value in k.to_dict().items():
  156. new_key = key_mapping.get(key, key)
  157. response_data[new_key] = value
  158. return get_result(data=response_data)
  159. @manager.route("/datasets", methods=["DELETE"]) # noqa: F821
  160. @token_required
  161. def delete(tenant_id):
  162. """
  163. Delete datasets.
  164. ---
  165. tags:
  166. - Datasets
  167. security:
  168. - ApiKeyAuth: []
  169. parameters:
  170. - in: header
  171. name: Authorization
  172. type: string
  173. required: true
  174. description: Bearer token for authentication.
  175. - in: body
  176. name: body
  177. description: Dataset deletion parameters.
  178. required: true
  179. schema:
  180. type: object
  181. properties:
  182. ids:
  183. type: array
  184. items:
  185. type: string
  186. description: List of dataset IDs to delete.
  187. responses:
  188. 200:
  189. description: Successful operation.
  190. schema:
  191. type: object
  192. """
  193. errors = []
  194. success_count = 0
  195. req = request.json
  196. if not req:
  197. ids = None
  198. else:
  199. ids = req.get("ids")
  200. if not ids:
  201. id_list = []
  202. kbs = KnowledgebaseService.query(tenant_id=tenant_id)
  203. for kb in kbs:
  204. id_list.append(kb.id)
  205. else:
  206. id_list = ids
  207. unique_id_list, duplicate_messages = check_duplicate_ids(id_list, "dataset")
  208. id_list = unique_id_list
  209. for id in id_list:
  210. kbs = KnowledgebaseService.query(id=id, tenant_id=tenant_id)
  211. if not kbs:
  212. errors.append(f"You don't own the dataset {id}")
  213. continue
  214. for doc in DocumentService.query(kb_id=id):
  215. if not DocumentService.remove_document(doc, tenant_id):
  216. errors.append(f"Remove document error for dataset {id}")
  217. continue
  218. f2d = File2DocumentService.get_by_document_id(doc.id)
  219. FileService.filter_delete(
  220. [
  221. File.source_type == FileSource.KNOWLEDGEBASE,
  222. File.id == f2d[0].file_id,
  223. ]
  224. )
  225. File2DocumentService.delete_by_document_id(doc.id)
  226. FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kbs[0].name])
  227. if not KnowledgebaseService.delete_by_id(id):
  228. errors.append(f"Delete dataset error for {id}")
  229. continue
  230. success_count += 1
  231. if errors:
  232. if success_count > 0:
  233. return get_result(data={"success_count": success_count, "errors": errors}, message=f"Partially deleted {success_count} datasets with {len(errors)} errors")
  234. else:
  235. return get_error_data_result(message="; ".join(errors))
  236. if duplicate_messages:
  237. if success_count > 0:
  238. return get_result(
  239. message=f"Partially deleted {success_count} datasets with {len(duplicate_messages)} errors",
  240. data={"success_count": success_count, "errors": duplicate_messages},
  241. )
  242. else:
  243. return get_error_data_result(message=";".join(duplicate_messages))
  244. return get_result(code=settings.RetCode.SUCCESS)
  245. @manager.route("/datasets/<dataset_id>", methods=["PUT"]) # noqa: F821
  246. @token_required
  247. def update(tenant_id, dataset_id):
  248. """
  249. Update a dataset.
  250. ---
  251. tags:
  252. - Datasets
  253. security:
  254. - ApiKeyAuth: []
  255. parameters:
  256. - in: path
  257. name: dataset_id
  258. type: string
  259. required: true
  260. description: ID of the dataset to update.
  261. - in: header
  262. name: Authorization
  263. type: string
  264. required: true
  265. description: Bearer token for authentication.
  266. - in: body
  267. name: body
  268. description: Dataset update parameters.
  269. required: true
  270. schema:
  271. type: object
  272. properties:
  273. name:
  274. type: string
  275. description: New name of the dataset.
  276. permission:
  277. type: string
  278. enum: ['me', 'team']
  279. description: Updated permission.
  280. chunk_method:
  281. type: string
  282. enum: ["naive", "manual", "qa", "table", "paper", "book", "laws",
  283. "presentation", "picture", "one", "email", "tag"
  284. ]
  285. description: Updated chunking method.
  286. parser_config:
  287. type: object
  288. description: Updated parser configuration.
  289. responses:
  290. 200:
  291. description: Successful operation.
  292. schema:
  293. type: object
  294. """
  295. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  296. return get_error_data_result(message="You don't own the dataset")
  297. req = request.json
  298. for k in req.keys():
  299. if dataset_readonly_fields(k):
  300. return get_result(code=settings.RetCode.ARGUMENT_ERROR, message=f"'{k}' is readonly.")
  301. e, t = TenantService.get_by_id(tenant_id)
  302. invalid_keys = {"id", "embd_id", "chunk_num", "doc_num", "parser_id", "create_date", "create_time", "created_by", "status", "token_num", "update_date", "update_time"}
  303. if any(key in req for key in invalid_keys):
  304. return get_error_data_result(message="The input parameters are invalid.")
  305. permission = req.get("permission")
  306. chunk_method = req.get("chunk_method")
  307. parser_config = req.get("parser_config")
  308. valid_parser_config(parser_config)
  309. valid_permission = ["me", "team"]
  310. valid_chunk_method = ["naive", "manual", "qa", "table", "paper", "book", "laws", "presentation", "picture", "one", "email", "tag"]
  311. check_validation = valid(
  312. permission,
  313. valid_permission,
  314. chunk_method,
  315. valid_chunk_method,
  316. )
  317. if check_validation:
  318. return check_validation
  319. if "tenant_id" in req:
  320. if req["tenant_id"] != tenant_id:
  321. return get_error_data_result(message="Can't change `tenant_id`.")
  322. e, kb = KnowledgebaseService.get_by_id(dataset_id)
  323. if "parser_config" in req:
  324. temp_dict = kb.parser_config
  325. temp_dict.update(req["parser_config"])
  326. req["parser_config"] = temp_dict
  327. if "chunk_count" in req:
  328. if req["chunk_count"] != kb.chunk_num:
  329. return get_error_data_result(message="Can't change `chunk_count`.")
  330. req.pop("chunk_count")
  331. if "document_count" in req:
  332. if req["document_count"] != kb.doc_num:
  333. return get_error_data_result(message="Can't change `document_count`.")
  334. req.pop("document_count")
  335. if req.get("chunk_method"):
  336. if kb.chunk_num != 0 and req["chunk_method"] != kb.parser_id:
  337. return get_error_data_result(message="If `chunk_count` is not 0, `chunk_method` is not changeable.")
  338. req["parser_id"] = req.pop("chunk_method")
  339. if req["parser_id"] != kb.parser_id:
  340. if not req.get("parser_config"):
  341. req["parser_config"] = get_parser_config(chunk_method, parser_config)
  342. if "embedding_model" in req:
  343. if kb.chunk_num != 0 and req["embedding_model"] != kb.embd_id:
  344. return get_error_data_result(message="If `chunk_count` is not 0, `embedding_model` is not changeable.")
  345. if not req.get("embedding_model"):
  346. return get_error_data_result("`embedding_model` can't be empty")
  347. valid_embedding_models = [
  348. "BAAI/bge-large-zh-v1.5",
  349. "BAAI/bge-base-en-v1.5",
  350. "BAAI/bge-large-en-v1.5",
  351. "BAAI/bge-small-en-v1.5",
  352. "BAAI/bge-small-zh-v1.5",
  353. "jinaai/jina-embeddings-v2-base-en",
  354. "jinaai/jina-embeddings-v2-small-en",
  355. "nomic-ai/nomic-embed-text-v1.5",
  356. "sentence-transformers/all-MiniLM-L6-v2",
  357. "text-embedding-v2",
  358. "text-embedding-v3",
  359. "maidalun1020/bce-embedding-base_v1",
  360. ]
  361. embd_model = LLMService.query(llm_name=req["embedding_model"], model_type="embedding")
  362. if embd_model:
  363. if req["embedding_model"] not in valid_embedding_models and not TenantLLMService.query(
  364. tenant_id=tenant_id,
  365. model_type="embedding",
  366. llm_name=req.get("embedding_model"),
  367. ):
  368. return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
  369. if not embd_model:
  370. embd_model = TenantLLMService.query(tenant_id=tenant_id, model_type="embedding", llm_name=req.get("embedding_model"))
  371. if not embd_model:
  372. return get_error_data_result(f"`embedding_model` {req.get('embedding_model')} doesn't exist")
  373. req["embd_id"] = req.pop("embedding_model")
  374. if "name" in req:
  375. req["name"] = req["name"].strip()
  376. if len(req["name"]) >= 128:
  377. return get_error_data_result(message="Dataset name should not be longer than 128 characters.")
  378. if req["name"].lower() != kb.name.lower() and len(KnowledgebaseService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) > 0:
  379. return get_error_data_result(message="Duplicated dataset name in updating dataset.")
  380. flds = list(req.keys())
  381. for f in flds:
  382. if req[f] == "" and f in ["permission", "parser_id", "chunk_method"]:
  383. del req[f]
  384. if not KnowledgebaseService.update_by_id(kb.id, req):
  385. return get_error_data_result(message="Update dataset error.(Database error)")
  386. return get_result(code=settings.RetCode.SUCCESS)
  387. @manager.route("/datasets", methods=["GET"]) # noqa: F821
  388. @token_required
  389. def list_datasets(tenant_id):
  390. """
  391. List datasets.
  392. ---
  393. tags:
  394. - Datasets
  395. security:
  396. - ApiKeyAuth: []
  397. parameters:
  398. - in: query
  399. name: id
  400. type: string
  401. required: false
  402. description: Dataset ID to filter.
  403. - in: query
  404. name: name
  405. type: string
  406. required: false
  407. description: Dataset name to filter.
  408. - in: query
  409. name: page
  410. type: integer
  411. required: false
  412. default: 1
  413. description: Page number.
  414. - in: query
  415. name: page_size
  416. type: integer
  417. required: false
  418. default: 1024
  419. description: Number of items per page.
  420. - in: query
  421. name: orderby
  422. type: string
  423. required: false
  424. default: "create_time"
  425. description: Field to order by.
  426. - in: query
  427. name: desc
  428. type: boolean
  429. required: false
  430. default: true
  431. description: Order in descending.
  432. - in: header
  433. name: Authorization
  434. type: string
  435. required: true
  436. description: Bearer token for authentication.
  437. responses:
  438. 200:
  439. description: Successful operation.
  440. schema:
  441. type: array
  442. items:
  443. type: object
  444. """
  445. id = request.args.get("id")
  446. name = request.args.get("name")
  447. if id:
  448. kbs = KnowledgebaseService.get_kb_by_id(id, tenant_id)
  449. if not kbs:
  450. return get_error_data_result(f"You don't own the dataset {id}")
  451. if name:
  452. kbs = KnowledgebaseService.get_kb_by_name(name, tenant_id)
  453. if not kbs:
  454. return get_error_data_result(f"You don't own the dataset {name}")
  455. page_number = int(request.args.get("page", 1))
  456. items_per_page = int(request.args.get("page_size", 30))
  457. orderby = request.args.get("orderby", "create_time")
  458. if request.args.get("desc", "false").lower() not in ["true", "false"]:
  459. return get_error_data_result("desc should be true or false")
  460. if request.args.get("desc", "true").lower() == "false":
  461. desc = False
  462. else:
  463. desc = True
  464. tenants = TenantService.get_joined_tenants_by_user_id(tenant_id)
  465. kbs = KnowledgebaseService.get_list(
  466. [m["tenant_id"] for m in tenants],
  467. tenant_id,
  468. page_number,
  469. items_per_page,
  470. orderby,
  471. desc,
  472. id,
  473. name,
  474. )
  475. renamed_list = []
  476. for kb in kbs:
  477. key_mapping = {
  478. "chunk_num": "chunk_count",
  479. "doc_num": "document_count",
  480. "parser_id": "chunk_method",
  481. "embd_id": "embedding_model",
  482. }
  483. renamed_data = {}
  484. for key, value in kb.items():
  485. new_key = key_mapping.get(key, key)
  486. renamed_data[new_key] = value
  487. renamed_list.append(renamed_data)
  488. return get_result(data=renamed_list)