You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

doc.py 45KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import pathlib
  17. import datetime
  18. from api.db.services.dialog_service import keyword_extraction
  19. from rag.app.qa import rmPrefix, beAdoc
  20. from rag.nlp import rag_tokenizer
  21. from api.db import LLMType, ParserType
  22. from api.db.services.llm_service import TenantLLMService
  23. from api import settings
  24. import hashlib
  25. import re
  26. from api.utils.api_utils import token_required
  27. from api.db.db_models import Task
  28. from api.db.services.task_service import TaskService, queue_tasks
  29. from api.utils.api_utils import server_error_response
  30. from api.utils.api_utils import get_result, get_error_data_result
  31. from io import BytesIO
  32. from flask import request, send_file
  33. from api.db import FileSource, TaskStatus, FileType
  34. from api.db.db_models import File
  35. from api.db.services.document_service import DocumentService
  36. from api.db.services.file2document_service import File2DocumentService
  37. from api.db.services.file_service import FileService
  38. from api.db.services.knowledgebase_service import KnowledgebaseService
  39. from api.utils.api_utils import construct_json_result, get_parser_config
  40. from rag.nlp import search
  41. from rag.utils import rmSpace
  42. from rag.utils.storage_factory import STORAGE_IMPL
  43. import os
  44. MAXIMUM_OF_UPLOADING_FILES = 256
  45. @manager.route("/datasets/<dataset_id>/documents", methods=["POST"])
  46. @token_required
  47. def upload(dataset_id, tenant_id):
  48. """
  49. Upload documents to a dataset.
  50. ---
  51. tags:
  52. - Documents
  53. security:
  54. - ApiKeyAuth: []
  55. parameters:
  56. - in: path
  57. name: dataset_id
  58. type: string
  59. required: true
  60. description: ID of the dataset.
  61. - in: header
  62. name: Authorization
  63. type: string
  64. required: true
  65. description: Bearer token for authentication.
  66. - in: formData
  67. name: file
  68. type: file
  69. required: true
  70. description: Document files to upload.
  71. responses:
  72. 200:
  73. description: Successfully uploaded documents.
  74. schema:
  75. type: object
  76. properties:
  77. data:
  78. type: array
  79. items:
  80. type: object
  81. properties:
  82. id:
  83. type: string
  84. description: Document ID.
  85. name:
  86. type: string
  87. description: Document name.
  88. chunk_count:
  89. type: integer
  90. description: Number of chunks.
  91. token_count:
  92. type: integer
  93. description: Number of tokens.
  94. dataset_id:
  95. type: string
  96. description: ID of the dataset.
  97. chunk_method:
  98. type: string
  99. description: Chunking method used.
  100. run:
  101. type: string
  102. description: Processing status.
  103. """
  104. if "file" not in request.files:
  105. return get_error_data_result(
  106. message="No file part!", code=settings.RetCode.ARGUMENT_ERROR
  107. )
  108. file_objs = request.files.getlist("file")
  109. for file_obj in file_objs:
  110. if file_obj.filename == "":
  111. return get_result(
  112. message="No file selected!", code=settings.RetCode.ARGUMENT_ERROR
  113. )
  114. '''
  115. # total size
  116. total_size = 0
  117. for file_obj in file_objs:
  118. file_obj.seek(0, os.SEEK_END)
  119. total_size += file_obj.tell()
  120. file_obj.seek(0)
  121. MAX_TOTAL_FILE_SIZE = 10 * 1024 * 1024
  122. if total_size > MAX_TOTAL_FILE_SIZE:
  123. return get_result(
  124. message=f"Total file size exceeds 10MB limit! ({total_size / (1024 * 1024):.2f} MB)",
  125. code=settings.RetCode.ARGUMENT_ERROR,
  126. )
  127. '''
  128. e, kb = KnowledgebaseService.get_by_id(dataset_id)
  129. if not e:
  130. raise LookupError(f"Can't find the dataset with ID {dataset_id}!")
  131. err, files = FileService.upload_document(kb, file_objs, tenant_id)
  132. if err:
  133. return get_result(message="\n".join(err), code=settings.RetCode.SERVER_ERROR)
  134. # rename key's name
  135. renamed_doc_list = []
  136. for file in files:
  137. doc = file[0]
  138. key_mapping = {
  139. "chunk_num": "chunk_count",
  140. "kb_id": "dataset_id",
  141. "token_num": "token_count",
  142. "parser_id": "chunk_method",
  143. }
  144. renamed_doc = {}
  145. for key, value in doc.items():
  146. new_key = key_mapping.get(key, key)
  147. renamed_doc[new_key] = value
  148. renamed_doc["run"] = "UNSTART"
  149. renamed_doc_list.append(renamed_doc)
  150. return get_result(data=renamed_doc_list)
  151. @manager.route("/datasets/<dataset_id>/documents/<document_id>", methods=["PUT"])
  152. @token_required
  153. def update_doc(tenant_id, dataset_id, document_id):
  154. """
  155. Update a document within a dataset.
  156. ---
  157. tags:
  158. - Documents
  159. security:
  160. - ApiKeyAuth: []
  161. parameters:
  162. - in: path
  163. name: dataset_id
  164. type: string
  165. required: true
  166. description: ID of the dataset.
  167. - in: path
  168. name: document_id
  169. type: string
  170. required: true
  171. description: ID of the document to update.
  172. - in: header
  173. name: Authorization
  174. type: string
  175. required: true
  176. description: Bearer token for authentication.
  177. - in: body
  178. name: body
  179. description: Document update parameters.
  180. required: true
  181. schema:
  182. type: object
  183. properties:
  184. name:
  185. type: string
  186. description: New name of the document.
  187. parser_config:
  188. type: object
  189. description: Parser configuration.
  190. chunk_method:
  191. type: string
  192. description: Chunking method.
  193. responses:
  194. 200:
  195. description: Document updated successfully.
  196. schema:
  197. type: object
  198. """
  199. req = request.json
  200. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  201. return get_error_data_result(message="You don't own the dataset.")
  202. doc = DocumentService.query(kb_id=dataset_id, id=document_id)
  203. if not doc:
  204. return get_error_data_result(message="The dataset doesn't own the document.")
  205. doc = doc[0]
  206. if "chunk_count" in req:
  207. if req["chunk_count"] != doc.chunk_num:
  208. return get_error_data_result(message="Can't change `chunk_count`.")
  209. if "token_count" in req:
  210. if req["token_count"] != doc.token_num:
  211. return get_error_data_result(message="Can't change `token_count`.")
  212. if "progress" in req:
  213. if req["progress"] != doc.progress:
  214. return get_error_data_result(message="Can't change `progress`.")
  215. if "name" in req and req["name"] != doc.name:
  216. if (
  217. pathlib.Path(req["name"].lower()).suffix
  218. != pathlib.Path(doc.name.lower()).suffix
  219. ):
  220. return get_result(
  221. message="The extension of file can't be changed",
  222. code=settings.RetCode.ARGUMENT_ERROR,
  223. )
  224. for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
  225. if d.name == req["name"]:
  226. return get_error_data_result(
  227. message="Duplicated document name in the same dataset."
  228. )
  229. if not DocumentService.update_by_id(document_id, {"name": req["name"]}):
  230. return get_error_data_result(message="Database error (Document rename)!")
  231. informs = File2DocumentService.get_by_document_id(document_id)
  232. if informs:
  233. e, file = FileService.get_by_id(informs[0].file_id)
  234. FileService.update_by_id(file.id, {"name": req["name"]})
  235. if "parser_config" in req:
  236. DocumentService.update_parser_config(doc.id, req["parser_config"])
  237. if "chunk_method" in req:
  238. valid_chunk_method = {
  239. "naive",
  240. "manual",
  241. "qa",
  242. "table",
  243. "paper",
  244. "book",
  245. "laws",
  246. "presentation",
  247. "picture",
  248. "one",
  249. "knowledge_graph",
  250. "email",
  251. }
  252. if req.get("chunk_method") not in valid_chunk_method:
  253. return get_error_data_result(
  254. f"`chunk_method` {req['chunk_method']} doesn't exist"
  255. )
  256. if doc.parser_id.lower() == req["chunk_method"].lower():
  257. return get_result()
  258. if doc.type == FileType.VISUAL or re.search(r"\.(ppt|pptx|pages)$", doc.name):
  259. return get_error_data_result(message="Not supported yet!")
  260. e = DocumentService.update_by_id(
  261. doc.id,
  262. {
  263. "parser_id": req["chunk_method"],
  264. "progress": 0,
  265. "progress_msg": "",
  266. "run": TaskStatus.UNSTART.value,
  267. },
  268. )
  269. if not e:
  270. return get_error_data_result(message="Document not found!")
  271. req["parser_config"] = get_parser_config(
  272. req["chunk_method"], req.get("parser_config")
  273. )
  274. DocumentService.update_parser_config(doc.id, req["parser_config"])
  275. if doc.token_num > 0:
  276. e = DocumentService.increment_chunk_num(
  277. doc.id,
  278. doc.kb_id,
  279. doc.token_num * -1,
  280. doc.chunk_num * -1,
  281. doc.process_duation * -1,
  282. )
  283. if not e:
  284. return get_error_data_result(message="Document not found!")
  285. settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), dataset_id)
  286. return get_result()
  287. @manager.route("/datasets/<dataset_id>/documents/<document_id>", methods=["GET"])
  288. @token_required
  289. def download(tenant_id, dataset_id, document_id):
  290. """
  291. Download a document from a dataset.
  292. ---
  293. tags:
  294. - Documents
  295. security:
  296. - ApiKeyAuth: []
  297. produces:
  298. - application/octet-stream
  299. parameters:
  300. - in: path
  301. name: dataset_id
  302. type: string
  303. required: true
  304. description: ID of the dataset.
  305. - in: path
  306. name: document_id
  307. type: string
  308. required: true
  309. description: ID of the document to download.
  310. - in: header
  311. name: Authorization
  312. type: string
  313. required: true
  314. description: Bearer token for authentication.
  315. responses:
  316. 200:
  317. description: Document file stream.
  318. schema:
  319. type: file
  320. 400:
  321. description: Error message.
  322. schema:
  323. type: object
  324. """
  325. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  326. return get_error_data_result(message=f"You do not own the dataset {dataset_id}.")
  327. doc = DocumentService.query(kb_id=dataset_id, id=document_id)
  328. if not doc:
  329. return get_error_data_result(
  330. message=f"The dataset not own the document {document_id}."
  331. )
  332. # The process of downloading
  333. doc_id, doc_location = File2DocumentService.get_storage_address(
  334. doc_id=document_id
  335. ) # minio address
  336. file_stream = STORAGE_IMPL.get(doc_id, doc_location)
  337. if not file_stream:
  338. return construct_json_result(
  339. message="This file is empty.", code=settings.RetCode.DATA_ERROR
  340. )
  341. file = BytesIO(file_stream)
  342. # Use send_file with a proper filename and MIME type
  343. return send_file(
  344. file,
  345. as_attachment=True,
  346. download_name=doc[0].name,
  347. mimetype="application/octet-stream", # Set a default MIME type
  348. )
  349. @manager.route("/datasets/<dataset_id>/documents", methods=["GET"])
  350. @token_required
  351. def list_docs(dataset_id, tenant_id):
  352. """
  353. List documents in a dataset.
  354. ---
  355. tags:
  356. - Documents
  357. security:
  358. - ApiKeyAuth: []
  359. parameters:
  360. - in: path
  361. name: dataset_id
  362. type: string
  363. required: true
  364. description: ID of the dataset.
  365. - in: query
  366. name: id
  367. type: string
  368. required: false
  369. description: Filter by document ID.
  370. - in: query
  371. name: page
  372. type: integer
  373. required: false
  374. default: 1
  375. description: Page number.
  376. - in: query
  377. name: page_size
  378. type: integer
  379. required: false
  380. default: 30
  381. description: Number of items per page.
  382. - in: query
  383. name: orderby
  384. type: string
  385. required: false
  386. default: "create_time"
  387. description: Field to order by.
  388. - in: query
  389. name: desc
  390. type: boolean
  391. required: false
  392. default: true
  393. description: Order in descending.
  394. - in: header
  395. name: Authorization
  396. type: string
  397. required: true
  398. description: Bearer token for authentication.
  399. responses:
  400. 200:
  401. description: List of documents.
  402. schema:
  403. type: object
  404. properties:
  405. total:
  406. type: integer
  407. description: Total number of documents.
  408. docs:
  409. type: array
  410. items:
  411. type: object
  412. properties:
  413. id:
  414. type: string
  415. description: Document ID.
  416. name:
  417. type: string
  418. description: Document name.
  419. chunk_count:
  420. type: integer
  421. description: Number of chunks.
  422. token_count:
  423. type: integer
  424. description: Number of tokens.
  425. dataset_id:
  426. type: string
  427. description: ID of the dataset.
  428. chunk_method:
  429. type: string
  430. description: Chunking method used.
  431. run:
  432. type: string
  433. description: Processing status.
  434. """
  435. if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
  436. return get_error_data_result(message=f"You don't own the dataset {dataset_id}. ")
  437. id = request.args.get("id")
  438. name = request.args.get("name")
  439. if not DocumentService.query(id=id, kb_id=dataset_id):
  440. return get_error_data_result(message=f"You don't own the document {id}.")
  441. if not DocumentService.query(name=name, kb_id=dataset_id):
  442. return get_error_data_result(message=f"You don't own the document {name}.")
  443. page = int(request.args.get("page", 1))
  444. keywords = request.args.get("keywords", "")
  445. page_size = int(request.args.get("page_size", 30))
  446. orderby = request.args.get("orderby", "create_time")
  447. if request.args.get("desc") == "False":
  448. desc = False
  449. else:
  450. desc = True
  451. docs, tol = DocumentService.get_list(
  452. dataset_id, page, page_size, orderby, desc, keywords, id, name
  453. )
  454. # rename key's name
  455. renamed_doc_list = []
  456. for doc in docs:
  457. key_mapping = {
  458. "chunk_num": "chunk_count",
  459. "kb_id": "dataset_id",
  460. "token_num": "token_count",
  461. "parser_id": "chunk_method",
  462. }
  463. run_mapping = {
  464. "0": "UNSTART",
  465. "1": "RUNNING",
  466. "2": "CANCEL",
  467. "3": "DONE",
  468. "4": "FAIL",
  469. }
  470. renamed_doc = {}
  471. for key, value in doc.items():
  472. if key == "run":
  473. renamed_doc["run"] = run_mapping.get(str(value))
  474. new_key = key_mapping.get(key, key)
  475. renamed_doc[new_key] = value
  476. if key == "run":
  477. renamed_doc["run"] = run_mapping.get(value)
  478. renamed_doc_list.append(renamed_doc)
  479. return get_result(data={"total": tol, "docs": renamed_doc_list})
  480. @manager.route("/datasets/<dataset_id>/documents", methods=["DELETE"])
  481. @token_required
  482. def delete(tenant_id, dataset_id):
  483. """
  484. Delete documents from a dataset.
  485. ---
  486. tags:
  487. - Documents
  488. security:
  489. - ApiKeyAuth: []
  490. parameters:
  491. - in: path
  492. name: dataset_id
  493. type: string
  494. required: true
  495. description: ID of the dataset.
  496. - in: body
  497. name: body
  498. description: Document deletion parameters.
  499. required: true
  500. schema:
  501. type: object
  502. properties:
  503. ids:
  504. type: array
  505. items:
  506. type: string
  507. description: List of document IDs to delete.
  508. - in: header
  509. name: Authorization
  510. type: string
  511. required: true
  512. description: Bearer token for authentication.
  513. responses:
  514. 200:
  515. description: Documents deleted successfully.
  516. schema:
  517. type: object
  518. """
  519. if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
  520. return get_error_data_result(message=f"You don't own the dataset {dataset_id}. ")
  521. req = request.json
  522. if not req:
  523. doc_ids = None
  524. else:
  525. doc_ids = req.get("ids")
  526. if not doc_ids:
  527. doc_list = []
  528. docs = DocumentService.query(kb_id=dataset_id)
  529. for doc in docs:
  530. doc_list.append(doc.id)
  531. else:
  532. doc_list = doc_ids
  533. root_folder = FileService.get_root_folder(tenant_id)
  534. pf_id = root_folder["id"]
  535. FileService.init_knowledgebase_docs(pf_id, tenant_id)
  536. errors = ""
  537. for doc_id in doc_list:
  538. try:
  539. e, doc = DocumentService.get_by_id(doc_id)
  540. if not e:
  541. return get_error_data_result(message="Document not found!")
  542. tenant_id = DocumentService.get_tenant_id(doc_id)
  543. if not tenant_id:
  544. return get_error_data_result(message="Tenant not found!")
  545. b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
  546. if not DocumentService.remove_document(doc, tenant_id):
  547. return get_error_data_result(
  548. message="Database error (Document removal)!"
  549. )
  550. f2d = File2DocumentService.get_by_document_id(doc_id)
  551. FileService.filter_delete(
  552. [
  553. File.source_type == FileSource.KNOWLEDGEBASE,
  554. File.id == f2d[0].file_id,
  555. ]
  556. )
  557. File2DocumentService.delete_by_document_id(doc_id)
  558. STORAGE_IMPL.rm(b, n)
  559. except Exception as e:
  560. errors += str(e)
  561. if errors:
  562. return get_result(message=errors, code=settings.RetCode.SERVER_ERROR)
  563. return get_result()
  564. @manager.route("/datasets/<dataset_id>/chunks", methods=["POST"])
  565. @token_required
  566. def parse(tenant_id, dataset_id):
  567. """
  568. Start parsing documents into chunks.
  569. ---
  570. tags:
  571. - Chunks
  572. security:
  573. - ApiKeyAuth: []
  574. parameters:
  575. - in: path
  576. name: dataset_id
  577. type: string
  578. required: true
  579. description: ID of the dataset.
  580. - in: body
  581. name: body
  582. description: Parsing parameters.
  583. required: true
  584. schema:
  585. type: object
  586. properties:
  587. document_ids:
  588. type: array
  589. items:
  590. type: string
  591. description: List of document IDs to parse.
  592. - in: header
  593. name: Authorization
  594. type: string
  595. required: true
  596. description: Bearer token for authentication.
  597. responses:
  598. 200:
  599. description: Parsing started successfully.
  600. schema:
  601. type: object
  602. """
  603. if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
  604. return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
  605. req = request.json
  606. if not req.get("document_ids"):
  607. return get_error_data_result("`document_ids` is required")
  608. for id in req["document_ids"]:
  609. doc = DocumentService.query(id=id, kb_id=dataset_id)
  610. if not doc:
  611. return get_error_data_result(message=f"You don't own the document {id}.")
  612. if doc[0].progress != 0.0:
  613. return get_error_data_result(
  614. "Can't stop parsing document with progress at 0 or 100"
  615. )
  616. info = {"run": "1", "progress": 0}
  617. info["progress_msg"] = ""
  618. info["chunk_num"] = 0
  619. info["token_num"] = 0
  620. DocumentService.update_by_id(id, info)
  621. settings.docStoreConn.delete({"doc_id": id}, search.index_name(tenant_id), dataset_id)
  622. TaskService.filter_delete([Task.doc_id == id])
  623. e, doc = DocumentService.get_by_id(id)
  624. doc = doc.to_dict()
  625. doc["tenant_id"] = tenant_id
  626. bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
  627. queue_tasks(doc, bucket, name)
  628. return get_result()
  629. @manager.route("/datasets/<dataset_id>/chunks", methods=["DELETE"])
  630. @token_required
  631. def stop_parsing(tenant_id, dataset_id):
  632. """
  633. Stop parsing documents into chunks.
  634. ---
  635. tags:
  636. - Chunks
  637. security:
  638. - ApiKeyAuth: []
  639. parameters:
  640. - in: path
  641. name: dataset_id
  642. type: string
  643. required: true
  644. description: ID of the dataset.
  645. - in: body
  646. name: body
  647. description: Stop parsing parameters.
  648. required: true
  649. schema:
  650. type: object
  651. properties:
  652. document_ids:
  653. type: array
  654. items:
  655. type: string
  656. description: List of document IDs to stop parsing.
  657. - in: header
  658. name: Authorization
  659. type: string
  660. required: true
  661. description: Bearer token for authentication.
  662. responses:
  663. 200:
  664. description: Parsing stopped successfully.
  665. schema:
  666. type: object
  667. """
  668. if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
  669. return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
  670. req = request.json
  671. if not req.get("document_ids"):
  672. return get_error_data_result("`document_ids` is required")
  673. for id in req["document_ids"]:
  674. doc = DocumentService.query(id=id, kb_id=dataset_id)
  675. if not doc:
  676. return get_error_data_result(message=f"You don't own the document {id}.")
  677. if int(doc[0].progress) == 1 or int(doc[0].progress) == 0:
  678. return get_error_data_result(
  679. "Can't stop parsing document with progress at 0 or 1"
  680. )
  681. info = {"run": "2", "progress": 0, "chunk_num": 0}
  682. DocumentService.update_by_id(id, info)
  683. settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), dataset_id)
  684. return get_result()
  685. @manager.route("/datasets/<dataset_id>/documents/<document_id>/chunks", methods=["GET"])
  686. @token_required
  687. def list_chunks(tenant_id, dataset_id, document_id):
  688. """
  689. List chunks of a document.
  690. ---
  691. tags:
  692. - Chunks
  693. security:
  694. - ApiKeyAuth: []
  695. parameters:
  696. - in: path
  697. name: dataset_id
  698. type: string
  699. required: true
  700. description: ID of the dataset.
  701. - in: path
  702. name: document_id
  703. type: string
  704. required: true
  705. description: ID of the document.
  706. - in: query
  707. name: page
  708. type: integer
  709. required: false
  710. default: 1
  711. description: Page number.
  712. - in: query
  713. name: page_size
  714. type: integer
  715. required: false
  716. default: 30
  717. description: Number of items per page.
  718. - in: header
  719. name: Authorization
  720. type: string
  721. required: true
  722. description: Bearer token for authentication.
  723. responses:
  724. 200:
  725. description: List of chunks.
  726. schema:
  727. type: object
  728. properties:
  729. total:
  730. type: integer
  731. description: Total number of chunks.
  732. chunks:
  733. type: array
  734. items:
  735. type: object
  736. properties:
  737. id:
  738. type: string
  739. description: Chunk ID.
  740. content:
  741. type: string
  742. description: Chunk content.
  743. document_id:
  744. type: string
  745. description: ID of the document.
  746. important_keywords:
  747. type: array
  748. items:
  749. type: string
  750. description: Important keywords.
  751. image_id:
  752. type: string
  753. description: Image ID associated with the chunk.
  754. doc:
  755. type: object
  756. description: Document details.
  757. """
  758. if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
  759. return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
  760. doc = DocumentService.query(id=document_id, kb_id=dataset_id)
  761. if not doc:
  762. return get_error_data_result(
  763. message=f"You don't own the document {document_id}."
  764. )
  765. doc = doc[0]
  766. req = request.args
  767. doc_id = document_id
  768. page = int(req.get("page", 1))
  769. size = int(req.get("page_size", 30))
  770. question = req.get("keywords", "")
  771. query = {
  772. "doc_ids": [doc_id],
  773. "page": page,
  774. "size": size,
  775. "question": question,
  776. "sort": True,
  777. }
  778. key_mapping = {
  779. "chunk_num": "chunk_count",
  780. "kb_id": "dataset_id",
  781. "token_num": "token_count",
  782. "parser_id": "chunk_method",
  783. }
  784. run_mapping = {
  785. "0": "UNSTART",
  786. "1": "RUNNING",
  787. "2": "CANCEL",
  788. "3": "DONE",
  789. "4": "FAIL",
  790. }
  791. doc = doc.to_dict()
  792. renamed_doc = {}
  793. for key, value in doc.items():
  794. new_key = key_mapping.get(key, key)
  795. renamed_doc[new_key] = value
  796. if key == "run":
  797. renamed_doc["run"] = run_mapping.get(str(value))
  798. res = {"total": 0, "chunks": [], "doc": renamed_doc}
  799. origin_chunks = []
  800. if settings.docStoreConn.indexExist(search.index_name(tenant_id), dataset_id):
  801. sres = settings.retrievaler.search(query, search.index_name(tenant_id), [dataset_id], emb_mdl=None,
  802. highlight=True)
  803. res["total"] = sres.total
  804. sign = 0
  805. for id in sres.ids:
  806. d = {
  807. "id": id,
  808. "content_with_weight": (
  809. rmSpace(sres.highlight[id])
  810. if question and id in sres.highlight
  811. else sres.field[id].get("content_with_weight", "")
  812. ),
  813. "doc_id": sres.field[id]["doc_id"],
  814. "docnm_kwd": sres.field[id]["docnm_kwd"],
  815. "important_kwd": sres.field[id].get("important_kwd", []),
  816. "img_id": sres.field[id].get("img_id", ""),
  817. "available_int": sres.field[id].get("available_int", 1),
  818. "positions": sres.field[id].get("position_int", "").split("\t"),
  819. }
  820. if len(d["positions"]) % 5 == 0:
  821. poss = []
  822. for i in range(0, len(d["positions"]), 5):
  823. poss.append(
  824. [
  825. float(d["positions"][i]),
  826. float(d["positions"][i + 1]),
  827. float(d["positions"][i + 2]),
  828. float(d["positions"][i + 3]),
  829. float(d["positions"][i + 4]),
  830. ]
  831. )
  832. d["positions"] = poss
  833. origin_chunks.append(d)
  834. if req.get("id"):
  835. if req.get("id") == id:
  836. origin_chunks.clear()
  837. origin_chunks.append(d)
  838. sign = 1
  839. break
  840. if req.get("id"):
  841. if sign == 0:
  842. return get_error_data_result(f"Can't find this chunk {req.get('id')}")
  843. for chunk in origin_chunks:
  844. key_mapping = {
  845. "id": "id",
  846. "content_with_weight": "content",
  847. "doc_id": "document_id",
  848. "important_kwd": "important_keywords",
  849. "img_id": "image_id",
  850. "available_int": "available",
  851. }
  852. renamed_chunk = {}
  853. for key, value in chunk.items():
  854. new_key = key_mapping.get(key, key)
  855. renamed_chunk[new_key] = value
  856. if renamed_chunk["available"] == 0:
  857. renamed_chunk["available"] = False
  858. if renamed_chunk["available"] == 1:
  859. renamed_chunk["available"] = True
  860. res["chunks"].append(renamed_chunk)
  861. return get_result(data=res)
  862. @manager.route(
  863. "/datasets/<dataset_id>/documents/<document_id>/chunks", methods=["POST"]
  864. )
  865. @token_required
  866. def add_chunk(tenant_id, dataset_id, document_id):
  867. """
  868. Add a chunk to a document.
  869. ---
  870. tags:
  871. - Chunks
  872. security:
  873. - ApiKeyAuth: []
  874. parameters:
  875. - in: path
  876. name: dataset_id
  877. type: string
  878. required: true
  879. description: ID of the dataset.
  880. - in: path
  881. name: document_id
  882. type: string
  883. required: true
  884. description: ID of the document.
  885. - in: body
  886. name: body
  887. description: Chunk data.
  888. required: true
  889. schema:
  890. type: object
  891. properties:
  892. content:
  893. type: string
  894. required: true
  895. description: Content of the chunk.
  896. important_keywords:
  897. type: array
  898. items:
  899. type: string
  900. description: Important keywords.
  901. - in: header
  902. name: Authorization
  903. type: string
  904. required: true
  905. description: Bearer token for authentication.
  906. responses:
  907. 200:
  908. description: Chunk added successfully.
  909. schema:
  910. type: object
  911. properties:
  912. chunk:
  913. type: object
  914. properties:
  915. id:
  916. type: string
  917. description: Chunk ID.
  918. content:
  919. type: string
  920. description: Chunk content.
  921. document_id:
  922. type: string
  923. description: ID of the document.
  924. important_keywords:
  925. type: array
  926. items:
  927. type: string
  928. description: Important keywords.
  929. """
  930. if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
  931. return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
  932. doc = DocumentService.query(id=document_id, kb_id=dataset_id)
  933. if not doc:
  934. return get_error_data_result(
  935. message=f"You don't own the document {document_id}."
  936. )
  937. doc = doc[0]
  938. req = request.json
  939. if not req.get("content"):
  940. return get_error_data_result(message="`content` is required")
  941. if "important_keywords" in req:
  942. if type(req["important_keywords"]) != list:
  943. return get_error_data_result(
  944. "`important_keywords` is required to be a list"
  945. )
  946. md5 = hashlib.md5()
  947. md5.update((req["content"] + document_id).encode("utf-8"))
  948. chunk_id = md5.hexdigest()
  949. d = {
  950. "id": chunk_id,
  951. "content_ltks": rag_tokenizer.tokenize(req["content"]),
  952. "content_with_weight": req["content"],
  953. }
  954. d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
  955. d["important_kwd"] = req.get("important_keywords", [])
  956. d["important_tks"] = rag_tokenizer.tokenize(
  957. " ".join(req.get("important_keywords", []))
  958. )
  959. d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
  960. d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
  961. d["kb_id"] = dataset_id
  962. d["docnm_kwd"] = doc.name
  963. d["doc_id"] = document_id
  964. embd_id = DocumentService.get_embd_id(document_id)
  965. embd_mdl = TenantLLMService.model_instance(
  966. tenant_id, LLMType.EMBEDDING.value, embd_id
  967. )
  968. v, c = embd_mdl.encode([doc.name, req["content"]])
  969. v = 0.1 * v[0] + 0.9 * v[1]
  970. d["q_%d_vec" % len(v)] = v.tolist()
  971. settings.docStoreConn.insert([d], search.index_name(tenant_id), dataset_id)
  972. DocumentService.increment_chunk_num(doc.id, doc.kb_id, c, 1, 0)
  973. # rename keys
  974. key_mapping = {
  975. "id": "id",
  976. "content_with_weight": "content",
  977. "doc_id": "document_id",
  978. "important_kwd": "important_keywords",
  979. "kb_id": "dataset_id",
  980. "create_timestamp_flt": "create_timestamp",
  981. "create_time": "create_time",
  982. "document_keyword": "document",
  983. }
  984. renamed_chunk = {}
  985. for key, value in d.items():
  986. if key in key_mapping:
  987. new_key = key_mapping.get(key, key)
  988. renamed_chunk[new_key] = value
  989. return get_result(data={"chunk": renamed_chunk})
  990. # return get_result(data={"chunk_id": chunk_id})
  991. @manager.route(
  992. "datasets/<dataset_id>/documents/<document_id>/chunks", methods=["DELETE"]
  993. )
  994. @token_required
  995. def rm_chunk(tenant_id, dataset_id, document_id):
  996. """
  997. Remove chunks from a document.
  998. ---
  999. tags:
  1000. - Chunks
  1001. security:
  1002. - ApiKeyAuth: []
  1003. parameters:
  1004. - in: path
  1005. name: dataset_id
  1006. type: string
  1007. required: true
  1008. description: ID of the dataset.
  1009. - in: path
  1010. name: document_id
  1011. type: string
  1012. required: true
  1013. description: ID of the document.
  1014. - in: body
  1015. name: body
  1016. description: Chunk removal parameters.
  1017. required: true
  1018. schema:
  1019. type: object
  1020. properties:
  1021. chunk_ids:
  1022. type: array
  1023. items:
  1024. type: string
  1025. description: List of chunk IDs to remove.
  1026. - in: header
  1027. name: Authorization
  1028. type: string
  1029. required: true
  1030. description: Bearer token for authentication.
  1031. responses:
  1032. 200:
  1033. description: Chunks removed successfully.
  1034. schema:
  1035. type: object
  1036. """
  1037. if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
  1038. return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
  1039. req = request.json
  1040. condition = {"doc_id": document_id}
  1041. if "chunk_ids" in req:
  1042. condition["id"] = req["chunk_ids"]
  1043. chunk_number = settings.docStoreConn.delete(condition, search.index_name(tenant_id), dataset_id)
  1044. if chunk_number != 0:
  1045. DocumentService.decrement_chunk_num(document_id, dataset_id, 1, chunk_number, 0)
  1046. if "chunk_ids" in req and chunk_number != len(req["chunk_ids"]):
  1047. return get_error_data_result(message=f"rm_chunk deleted chunks {chunk_number}, expect {len(req['chunk_ids'])}")
  1048. return get_result(message=f"deleted {chunk_number} chunks")
  1049. @manager.route(
  1050. "/datasets/<dataset_id>/documents/<document_id>/chunks/<chunk_id>", methods=["PUT"]
  1051. )
  1052. @token_required
  1053. def update_chunk(tenant_id, dataset_id, document_id, chunk_id):
  1054. """
  1055. Update a chunk within a document.
  1056. ---
  1057. tags:
  1058. - Chunks
  1059. security:
  1060. - ApiKeyAuth: []
  1061. parameters:
  1062. - in: path
  1063. name: dataset_id
  1064. type: string
  1065. required: true
  1066. description: ID of the dataset.
  1067. - in: path
  1068. name: document_id
  1069. type: string
  1070. required: true
  1071. description: ID of the document.
  1072. - in: path
  1073. name: chunk_id
  1074. type: string
  1075. required: true
  1076. description: ID of the chunk to update.
  1077. - in: body
  1078. name: body
  1079. description: Chunk update parameters.
  1080. required: true
  1081. schema:
  1082. type: object
  1083. properties:
  1084. content:
  1085. type: string
  1086. description: Updated content of the chunk.
  1087. important_keywords:
  1088. type: array
  1089. items:
  1090. type: string
  1091. description: Updated important keywords.
  1092. available:
  1093. type: boolean
  1094. description: Availability status of the chunk.
  1095. - in: header
  1096. name: Authorization
  1097. type: string
  1098. required: true
  1099. description: Bearer token for authentication.
  1100. responses:
  1101. 200:
  1102. description: Chunk updated successfully.
  1103. schema:
  1104. type: object
  1105. """
  1106. chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant_id), [dataset_id])
  1107. if chunk is None:
  1108. return get_error_data_result(f"Can't find this chunk {chunk_id}")
  1109. if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
  1110. return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
  1111. doc = DocumentService.query(id=document_id, kb_id=dataset_id)
  1112. if not doc:
  1113. return get_error_data_result(
  1114. message=f"You don't own the document {document_id}."
  1115. )
  1116. doc = doc[0]
  1117. req = request.json
  1118. if "content" in req:
  1119. content = req["content"]
  1120. else:
  1121. content = chunk.get("content_with_weight", "")
  1122. d = {"id": chunk_id, "content_with_weight": content}
  1123. d["content_ltks"] = rag_tokenizer.tokenize(d["content_with_weight"])
  1124. d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
  1125. if "important_keywords" in req:
  1126. if not isinstance(req["important_keywords"], list):
  1127. return get_error_data_result("`important_keywords` should be a list")
  1128. d["important_kwd"] = req.get("important_keywords")
  1129. d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_keywords"]))
  1130. if "available" in req:
  1131. d["available_int"] = int(req["available"])
  1132. embd_id = DocumentService.get_embd_id(document_id)
  1133. embd_mdl = TenantLLMService.model_instance(
  1134. tenant_id, LLMType.EMBEDDING.value, embd_id
  1135. )
  1136. if doc.parser_id == ParserType.QA:
  1137. arr = [t for t in re.split(r"[\n\t]", d["content_with_weight"]) if len(t) > 1]
  1138. if len(arr) != 2:
  1139. return get_error_data_result(
  1140. message="Q&A must be separated by TAB/ENTER key."
  1141. )
  1142. q, a = rmPrefix(arr[0]), rmPrefix(arr[1])
  1143. d = beAdoc(
  1144. d, arr[0], arr[1], not any([rag_tokenizer.is_chinese(t) for t in q + a])
  1145. )
  1146. v, c = embd_mdl.encode([doc.name, d["content_with_weight"]])
  1147. v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
  1148. d["q_%d_vec" % len(v)] = v.tolist()
  1149. settings.docStoreConn.update({"id": chunk_id}, d, search.index_name(tenant_id), dataset_id)
  1150. return get_result()
  1151. @manager.route("/retrieval", methods=["POST"])
  1152. @token_required
  1153. def retrieval_test(tenant_id):
  1154. """
  1155. Retrieve chunks based on a query.
  1156. ---
  1157. tags:
  1158. - Retrieval
  1159. security:
  1160. - ApiKeyAuth: []
  1161. parameters:
  1162. - in: body
  1163. name: body
  1164. description: Retrieval parameters.
  1165. required: true
  1166. schema:
  1167. type: object
  1168. properties:
  1169. dataset_ids:
  1170. type: array
  1171. items:
  1172. type: string
  1173. required: true
  1174. description: List of dataset IDs to search in.
  1175. question:
  1176. type: string
  1177. required: true
  1178. description: Query string.
  1179. document_ids:
  1180. type: array
  1181. items:
  1182. type: string
  1183. description: List of document IDs to filter.
  1184. similarity_threshold:
  1185. type: number
  1186. format: float
  1187. description: Similarity threshold.
  1188. vector_similarity_weight:
  1189. type: number
  1190. format: float
  1191. description: Vector similarity weight.
  1192. top_k:
  1193. type: integer
  1194. description: Maximum number of chunks to return.
  1195. highlight:
  1196. type: boolean
  1197. description: Whether to highlight matched content.
  1198. - in: header
  1199. name: Authorization
  1200. type: string
  1201. required: true
  1202. description: Bearer token for authentication.
  1203. responses:
  1204. 200:
  1205. description: Retrieval results.
  1206. schema:
  1207. type: object
  1208. properties:
  1209. chunks:
  1210. type: array
  1211. items:
  1212. type: object
  1213. properties:
  1214. id:
  1215. type: string
  1216. description: Chunk ID.
  1217. content:
  1218. type: string
  1219. description: Chunk content.
  1220. document_id:
  1221. type: string
  1222. description: ID of the document.
  1223. dataset_id:
  1224. type: string
  1225. description: ID of the dataset.
  1226. similarity:
  1227. type: number
  1228. format: float
  1229. description: Similarity score.
  1230. """
  1231. req = request.json
  1232. if not req.get("dataset_ids"):
  1233. return get_error_data_result("`dataset_ids` is required.")
  1234. kb_ids = req["dataset_ids"]
  1235. if not isinstance(kb_ids, list):
  1236. return get_error_data_result("`dataset_ids` should be a list")
  1237. kbs = KnowledgebaseService.get_by_ids(kb_ids)
  1238. for id in kb_ids:
  1239. if not KnowledgebaseService.accessible(kb_id=id, user_id=tenant_id):
  1240. return get_error_data_result(f"You don't own the dataset {id}.")
  1241. embd_nms = list(set([kb.embd_id for kb in kbs]))
  1242. if len(embd_nms) != 1:
  1243. return get_result(
  1244. message='Datasets use different embedding models."',
  1245. code=settings.RetCode.AUTHENTICATION_ERROR,
  1246. )
  1247. if "question" not in req:
  1248. return get_error_data_result("`question` is required.")
  1249. page = int(req.get("page", 1))
  1250. size = int(req.get("page_size", 30))
  1251. question = req["question"]
  1252. doc_ids = req.get("document_ids", [])
  1253. if not isinstance(doc_ids, list):
  1254. return get_error_data_result("`documents` should be a list")
  1255. doc_ids_list = KnowledgebaseService.list_documents_by_ids(kb_ids)
  1256. for doc_id in doc_ids:
  1257. if doc_id not in doc_ids_list:
  1258. return get_error_data_result(
  1259. f"The datasets don't own the document {doc_id}"
  1260. )
  1261. similarity_threshold = float(req.get("similarity_threshold", 0.2))
  1262. vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
  1263. top = int(req.get("top_k", 1024))
  1264. if req.get("highlight") == "False" or req.get("highlight") == "false":
  1265. highlight = False
  1266. else:
  1267. highlight = True
  1268. try:
  1269. e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
  1270. if not e:
  1271. return get_error_data_result(message="Dataset not found!")
  1272. embd_mdl = TenantLLMService.model_instance(
  1273. kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id
  1274. )
  1275. rerank_mdl = None
  1276. if req.get("rerank_id"):
  1277. rerank_mdl = TenantLLMService.model_instance(
  1278. kb.tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"]
  1279. )
  1280. if req.get("keyword", False):
  1281. chat_mdl = TenantLLMService.model_instance(kb.tenant_id, LLMType.CHAT)
  1282. question += keyword_extraction(chat_mdl, question)
  1283. retr = settings.retrievaler if kb.parser_id != ParserType.KG else settings.kg_retrievaler
  1284. ranks = retr.retrieval(
  1285. question,
  1286. embd_mdl,
  1287. kb.tenant_id,
  1288. kb_ids,
  1289. page,
  1290. size,
  1291. similarity_threshold,
  1292. vector_similarity_weight,
  1293. top,
  1294. doc_ids,
  1295. rerank_mdl=rerank_mdl,
  1296. highlight=highlight,
  1297. )
  1298. for c in ranks["chunks"]:
  1299. c.pop("vector", None)
  1300. ##rename keys
  1301. renamed_chunks = []
  1302. for chunk in ranks["chunks"]:
  1303. key_mapping = {
  1304. "chunk_id": "id",
  1305. "content_with_weight": "content",
  1306. "doc_id": "document_id",
  1307. "important_kwd": "important_keywords",
  1308. "docnm_kwd": "document_keyword",
  1309. }
  1310. rename_chunk = {}
  1311. for key, value in chunk.items():
  1312. new_key = key_mapping.get(key, key)
  1313. rename_chunk[new_key] = value
  1314. renamed_chunks.append(rename_chunk)
  1315. ranks["chunks"] = renamed_chunks
  1316. return get_result(data=ranks)
  1317. except Exception as e:
  1318. if str(e).find("not_found") > 0:
  1319. return get_result(
  1320. message="No chunk found! Check the chunk status please!",
  1321. code=settings.RetCode.DATA_ERROR,
  1322. )
  1323. return server_error_response(e)