Você não pode selecionar mais de 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

doc.py 47KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import pathlib
  17. import datetime
  18. from rag.app.qa import rmPrefix, beAdoc
  19. from rag.nlp import rag_tokenizer
  20. from api.db import LLMType, ParserType
  21. from api.db.services.llm_service import TenantLLMService, LLMBundle
  22. from api import settings
  23. import xxhash
  24. import re
  25. from api.utils.api_utils import token_required
  26. from api.db.db_models import Task
  27. from api.db.services.task_service import TaskService, queue_tasks
  28. from api.utils.api_utils import server_error_response
  29. from api.utils.api_utils import get_result, get_error_data_result
  30. from io import BytesIO
  31. from flask import request, send_file
  32. from api.db import FileSource, TaskStatus, FileType
  33. from api.db.db_models import File
  34. from api.db.services.document_service import DocumentService
  35. from api.db.services.file2document_service import File2DocumentService
  36. from api.db.services.file_service import FileService
  37. from api.db.services.knowledgebase_service import KnowledgebaseService
  38. from api.utils.api_utils import construct_json_result, get_parser_config
  39. from rag.nlp import search
  40. from rag.prompts import keyword_extraction
  41. from rag.app.tag import label_question
  42. from rag.utils import rmSpace
  43. from rag.utils.storage_factory import STORAGE_IMPL
  44. from pydantic import BaseModel, Field, validator
  45. MAXIMUM_OF_UPLOADING_FILES = 256
  46. class Chunk(BaseModel):
  47. id: str = ""
  48. content: str = ""
  49. document_id: str = ""
  50. docnm_kwd: str = ""
  51. important_keywords: list = Field(default_factory=list)
  52. questions: list = Field(default_factory=list)
  53. question_tks: str = ""
  54. image_id: str = ""
  55. available: bool = True
  56. positions: list[list[int]] = Field(default_factory=list)
  57. @validator('positions')
  58. def validate_positions(cls, value):
  59. for sublist in value:
  60. if len(sublist) != 5:
  61. raise ValueError("Each sublist in positions must have a length of 5")
  62. return value
  63. @manager.route("/datasets/<dataset_id>/documents", methods=["POST"]) # noqa: F821
  64. @token_required
  65. def upload(dataset_id, tenant_id):
  66. """
  67. Upload documents to a dataset.
  68. ---
  69. tags:
  70. - Documents
  71. security:
  72. - ApiKeyAuth: []
  73. parameters:
  74. - in: path
  75. name: dataset_id
  76. type: string
  77. required: true
  78. description: ID of the dataset.
  79. - in: header
  80. name: Authorization
  81. type: string
  82. required: true
  83. description: Bearer token for authentication.
  84. - in: formData
  85. name: file
  86. type: file
  87. required: true
  88. description: Document files to upload.
  89. responses:
  90. 200:
  91. description: Successfully uploaded documents.
  92. schema:
  93. type: object
  94. properties:
  95. data:
  96. type: array
  97. items:
  98. type: object
  99. properties:
  100. id:
  101. type: string
  102. description: Document ID.
  103. name:
  104. type: string
  105. description: Document name.
  106. chunk_count:
  107. type: integer
  108. description: Number of chunks.
  109. token_count:
  110. type: integer
  111. description: Number of tokens.
  112. dataset_id:
  113. type: string
  114. description: ID of the dataset.
  115. chunk_method:
  116. type: string
  117. description: Chunking method used.
  118. run:
  119. type: string
  120. description: Processing status.
  121. """
  122. if "file" not in request.files:
  123. return get_error_data_result(
  124. message="No file part!", code=settings.RetCode.ARGUMENT_ERROR
  125. )
  126. file_objs = request.files.getlist("file")
  127. for file_obj in file_objs:
  128. if file_obj.filename == "":
  129. return get_result(
  130. message="No file selected!", code=settings.RetCode.ARGUMENT_ERROR
  131. )
  132. '''
  133. # total size
  134. total_size = 0
  135. for file_obj in file_objs:
  136. file_obj.seek(0, os.SEEK_END)
  137. total_size += file_obj.tell()
  138. file_obj.seek(0)
  139. MAX_TOTAL_FILE_SIZE = 10 * 1024 * 1024
  140. if total_size > MAX_TOTAL_FILE_SIZE:
  141. return get_result(
  142. message=f"Total file size exceeds 10MB limit! ({total_size / (1024 * 1024):.2f} MB)",
  143. code=settings.RetCode.ARGUMENT_ERROR,
  144. )
  145. '''
  146. e, kb = KnowledgebaseService.get_by_id(dataset_id)
  147. if not e:
  148. raise LookupError(f"Can't find the dataset with ID {dataset_id}!")
  149. err, files = FileService.upload_document(kb, file_objs, tenant_id)
  150. if err:
  151. return get_result(message="\n".join(err), code=settings.RetCode.SERVER_ERROR)
  152. # rename key's name
  153. renamed_doc_list = []
  154. for file in files:
  155. doc = file[0]
  156. key_mapping = {
  157. "chunk_num": "chunk_count",
  158. "kb_id": "dataset_id",
  159. "token_num": "token_count",
  160. "parser_id": "chunk_method",
  161. }
  162. renamed_doc = {}
  163. for key, value in doc.items():
  164. new_key = key_mapping.get(key, key)
  165. renamed_doc[new_key] = value
  166. renamed_doc["run"] = "UNSTART"
  167. renamed_doc_list.append(renamed_doc)
  168. return get_result(data=renamed_doc_list)
  169. @manager.route("/datasets/<dataset_id>/documents/<document_id>", methods=["PUT"]) # noqa: F821
  170. @token_required
  171. def update_doc(tenant_id, dataset_id, document_id):
  172. """
  173. Update a document within a dataset.
  174. ---
  175. tags:
  176. - Documents
  177. security:
  178. - ApiKeyAuth: []
  179. parameters:
  180. - in: path
  181. name: dataset_id
  182. type: string
  183. required: true
  184. description: ID of the dataset.
  185. - in: path
  186. name: document_id
  187. type: string
  188. required: true
  189. description: ID of the document to update.
  190. - in: header
  191. name: Authorization
  192. type: string
  193. required: true
  194. description: Bearer token for authentication.
  195. - in: body
  196. name: body
  197. description: Document update parameters.
  198. required: true
  199. schema:
  200. type: object
  201. properties:
  202. name:
  203. type: string
  204. description: New name of the document.
  205. parser_config:
  206. type: object
  207. description: Parser configuration.
  208. chunk_method:
  209. type: string
  210. description: Chunking method.
  211. responses:
  212. 200:
  213. description: Document updated successfully.
  214. schema:
  215. type: object
  216. """
  217. req = request.json
  218. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  219. return get_error_data_result(message="You don't own the dataset.")
  220. doc = DocumentService.query(kb_id=dataset_id, id=document_id)
  221. if not doc:
  222. return get_error_data_result(message="The dataset doesn't own the document.")
  223. doc = doc[0]
  224. if "chunk_count" in req:
  225. if req["chunk_count"] != doc.chunk_num:
  226. return get_error_data_result(message="Can't change `chunk_count`.")
  227. if "token_count" in req:
  228. if req["token_count"] != doc.token_num:
  229. return get_error_data_result(message="Can't change `token_count`.")
  230. if "progress" in req:
  231. if req["progress"] != doc.progress:
  232. return get_error_data_result(message="Can't change `progress`.")
  233. if "meta_fields" in req:
  234. if not isinstance(req["meta_fields"], dict):
  235. return get_error_data_result(message="meta_fields must be a dictionary")
  236. DocumentService.update_meta_fields(document_id, req["meta_fields"])
  237. if "name" in req and req["name"] != doc.name:
  238. if (
  239. pathlib.Path(req["name"].lower()).suffix
  240. != pathlib.Path(doc.name.lower()).suffix
  241. ):
  242. return get_result(
  243. message="The extension of file can't be changed",
  244. code=settings.RetCode.ARGUMENT_ERROR,
  245. )
  246. for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
  247. if d.name == req["name"]:
  248. return get_error_data_result(
  249. message="Duplicated document name in the same dataset."
  250. )
  251. if not DocumentService.update_by_id(document_id, {"name": req["name"]}):
  252. return get_error_data_result(message="Database error (Document rename)!")
  253. informs = File2DocumentService.get_by_document_id(document_id)
  254. if informs:
  255. e, file = FileService.get_by_id(informs[0].file_id)
  256. FileService.update_by_id(file.id, {"name": req["name"]})
  257. if "parser_config" in req:
  258. DocumentService.update_parser_config(doc.id, req["parser_config"])
  259. if "chunk_method" in req:
  260. valid_chunk_method = {
  261. "naive",
  262. "manual",
  263. "qa",
  264. "table",
  265. "paper",
  266. "book",
  267. "laws",
  268. "presentation",
  269. "picture",
  270. "one",
  271. "knowledge_graph",
  272. "email",
  273. "tag"
  274. }
  275. if req.get("chunk_method") not in valid_chunk_method:
  276. return get_error_data_result(
  277. f"`chunk_method` {req['chunk_method']} doesn't exist"
  278. )
  279. if doc.parser_id.lower() == req["chunk_method"].lower():
  280. return get_result()
  281. if doc.type == FileType.VISUAL or re.search(r"\.(ppt|pptx|pages)$", doc.name):
  282. return get_error_data_result(message="Not supported yet!")
  283. e = DocumentService.update_by_id(
  284. doc.id,
  285. {
  286. "parser_id": req["chunk_method"],
  287. "progress": 0,
  288. "progress_msg": "",
  289. "run": TaskStatus.UNSTART.value,
  290. },
  291. )
  292. if not e:
  293. return get_error_data_result(message="Document not found!")
  294. req["parser_config"] = get_parser_config(
  295. req["chunk_method"], req.get("parser_config")
  296. )
  297. DocumentService.update_parser_config(doc.id, req["parser_config"])
  298. if doc.token_num > 0:
  299. e = DocumentService.increment_chunk_num(
  300. doc.id,
  301. doc.kb_id,
  302. doc.token_num * -1,
  303. doc.chunk_num * -1,
  304. doc.process_duation * -1,
  305. )
  306. if not e:
  307. return get_error_data_result(message="Document not found!")
  308. settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), dataset_id)
  309. return get_result()
  310. @manager.route("/datasets/<dataset_id>/documents/<document_id>", methods=["GET"]) # noqa: F821
  311. @token_required
  312. def download(tenant_id, dataset_id, document_id):
  313. """
  314. Download a document from a dataset.
  315. ---
  316. tags:
  317. - Documents
  318. security:
  319. - ApiKeyAuth: []
  320. produces:
  321. - application/octet-stream
  322. parameters:
  323. - in: path
  324. name: dataset_id
  325. type: string
  326. required: true
  327. description: ID of the dataset.
  328. - in: path
  329. name: document_id
  330. type: string
  331. required: true
  332. description: ID of the document to download.
  333. - in: header
  334. name: Authorization
  335. type: string
  336. required: true
  337. description: Bearer token for authentication.
  338. responses:
  339. 200:
  340. description: Document file stream.
  341. schema:
  342. type: file
  343. 400:
  344. description: Error message.
  345. schema:
  346. type: object
  347. """
  348. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  349. return get_error_data_result(message=f"You do not own the dataset {dataset_id}.")
  350. doc = DocumentService.query(kb_id=dataset_id, id=document_id)
  351. if not doc:
  352. return get_error_data_result(
  353. message=f"The dataset not own the document {document_id}."
  354. )
  355. # The process of downloading
  356. doc_id, doc_location = File2DocumentService.get_storage_address(
  357. doc_id=document_id
  358. ) # minio address
  359. file_stream = STORAGE_IMPL.get(doc_id, doc_location)
  360. if not file_stream:
  361. return construct_json_result(
  362. message="This file is empty.", code=settings.RetCode.DATA_ERROR
  363. )
  364. file = BytesIO(file_stream)
  365. # Use send_file with a proper filename and MIME type
  366. return send_file(
  367. file,
  368. as_attachment=True,
  369. download_name=doc[0].name,
  370. mimetype="application/octet-stream", # Set a default MIME type
  371. )
  372. @manager.route("/datasets/<dataset_id>/documents", methods=["GET"]) # noqa: F821
  373. @token_required
  374. def list_docs(dataset_id, tenant_id):
  375. """
  376. List documents in a dataset.
  377. ---
  378. tags:
  379. - Documents
  380. security:
  381. - ApiKeyAuth: []
  382. parameters:
  383. - in: path
  384. name: dataset_id
  385. type: string
  386. required: true
  387. description: ID of the dataset.
  388. - in: query
  389. name: id
  390. type: string
  391. required: false
  392. description: Filter by document ID.
  393. - in: query
  394. name: page
  395. type: integer
  396. required: false
  397. default: 1
  398. description: Page number.
  399. - in: query
  400. name: page_size
  401. type: integer
  402. required: false
  403. default: 30
  404. description: Number of items per page.
  405. - in: query
  406. name: orderby
  407. type: string
  408. required: false
  409. default: "create_time"
  410. description: Field to order by.
  411. - in: query
  412. name: desc
  413. type: boolean
  414. required: false
  415. default: true
  416. description: Order in descending.
  417. - in: header
  418. name: Authorization
  419. type: string
  420. required: true
  421. description: Bearer token for authentication.
  422. responses:
  423. 200:
  424. description: List of documents.
  425. schema:
  426. type: object
  427. properties:
  428. total:
  429. type: integer
  430. description: Total number of documents.
  431. docs:
  432. type: array
  433. items:
  434. type: object
  435. properties:
  436. id:
  437. type: string
  438. description: Document ID.
  439. name:
  440. type: string
  441. description: Document name.
  442. chunk_count:
  443. type: integer
  444. description: Number of chunks.
  445. token_count:
  446. type: integer
  447. description: Number of tokens.
  448. dataset_id:
  449. type: string
  450. description: ID of the dataset.
  451. chunk_method:
  452. type: string
  453. description: Chunking method used.
  454. run:
  455. type: string
  456. description: Processing status.
  457. """
  458. if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
  459. return get_error_data_result(message=f"You don't own the dataset {dataset_id}. ")
  460. id = request.args.get("id")
  461. name = request.args.get("name")
  462. if id and not DocumentService.query(id=id, kb_id=dataset_id):
  463. return get_error_data_result(message=f"You don't own the document {id}.")
  464. if name and not DocumentService.query(name=name, kb_id=dataset_id):
  465. return get_error_data_result(message=f"You don't own the document {name}.")
  466. page = int(request.args.get("page", 1))
  467. keywords = request.args.get("keywords", "")
  468. page_size = int(request.args.get("page_size", 30))
  469. orderby = request.args.get("orderby", "create_time")
  470. if request.args.get("desc") == "False":
  471. desc = False
  472. else:
  473. desc = True
  474. docs, tol = DocumentService.get_list(
  475. dataset_id, page, page_size, orderby, desc, keywords, id, name
  476. )
  477. # rename key's name
  478. renamed_doc_list = []
  479. for doc in docs:
  480. key_mapping = {
  481. "chunk_num": "chunk_count",
  482. "kb_id": "dataset_id",
  483. "token_num": "token_count",
  484. "parser_id": "chunk_method",
  485. }
  486. run_mapping = {
  487. "0": "UNSTART",
  488. "1": "RUNNING",
  489. "2": "CANCEL",
  490. "3": "DONE",
  491. "4": "FAIL",
  492. }
  493. renamed_doc = {}
  494. for key, value in doc.items():
  495. if key == "run":
  496. renamed_doc["run"] = run_mapping.get(str(value))
  497. new_key = key_mapping.get(key, key)
  498. renamed_doc[new_key] = value
  499. if key == "run":
  500. renamed_doc["run"] = run_mapping.get(value)
  501. renamed_doc_list.append(renamed_doc)
  502. return get_result(data={"total": tol, "docs": renamed_doc_list})
  503. @manager.route("/datasets/<dataset_id>/documents", methods=["DELETE"]) # noqa: F821
  504. @token_required
  505. def delete(tenant_id, dataset_id):
  506. """
  507. Delete documents from a dataset.
  508. ---
  509. tags:
  510. - Documents
  511. security:
  512. - ApiKeyAuth: []
  513. parameters:
  514. - in: path
  515. name: dataset_id
  516. type: string
  517. required: true
  518. description: ID of the dataset.
  519. - in: body
  520. name: body
  521. description: Document deletion parameters.
  522. required: true
  523. schema:
  524. type: object
  525. properties:
  526. ids:
  527. type: array
  528. items:
  529. type: string
  530. description: List of document IDs to delete.
  531. - in: header
  532. name: Authorization
  533. type: string
  534. required: true
  535. description: Bearer token for authentication.
  536. responses:
  537. 200:
  538. description: Documents deleted successfully.
  539. schema:
  540. type: object
  541. """
  542. if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
  543. return get_error_data_result(message=f"You don't own the dataset {dataset_id}. ")
  544. req = request.json
  545. if not req:
  546. doc_ids = None
  547. else:
  548. doc_ids = req.get("ids")
  549. if not doc_ids:
  550. doc_list = []
  551. docs = DocumentService.query(kb_id=dataset_id)
  552. for doc in docs:
  553. doc_list.append(doc.id)
  554. else:
  555. doc_list = doc_ids
  556. root_folder = FileService.get_root_folder(tenant_id)
  557. pf_id = root_folder["id"]
  558. FileService.init_knowledgebase_docs(pf_id, tenant_id)
  559. errors = ""
  560. for doc_id in doc_list:
  561. try:
  562. e, doc = DocumentService.get_by_id(doc_id)
  563. if not e:
  564. return get_error_data_result(message="Document not found!")
  565. tenant_id = DocumentService.get_tenant_id(doc_id)
  566. if not tenant_id:
  567. return get_error_data_result(message="Tenant not found!")
  568. b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
  569. if not DocumentService.remove_document(doc, tenant_id):
  570. return get_error_data_result(
  571. message="Database error (Document removal)!"
  572. )
  573. f2d = File2DocumentService.get_by_document_id(doc_id)
  574. FileService.filter_delete(
  575. [
  576. File.source_type == FileSource.KNOWLEDGEBASE,
  577. File.id == f2d[0].file_id,
  578. ]
  579. )
  580. File2DocumentService.delete_by_document_id(doc_id)
  581. STORAGE_IMPL.rm(b, n)
  582. except Exception as e:
  583. errors += str(e)
  584. if errors:
  585. return get_result(message=errors, code=settings.RetCode.SERVER_ERROR)
  586. return get_result()
  587. @manager.route("/datasets/<dataset_id>/chunks", methods=["POST"]) # noqa: F821
  588. @token_required
  589. def parse(tenant_id, dataset_id):
  590. """
  591. Start parsing documents into chunks.
  592. ---
  593. tags:
  594. - Chunks
  595. security:
  596. - ApiKeyAuth: []
  597. parameters:
  598. - in: path
  599. name: dataset_id
  600. type: string
  601. required: true
  602. description: ID of the dataset.
  603. - in: body
  604. name: body
  605. description: Parsing parameters.
  606. required: true
  607. schema:
  608. type: object
  609. properties:
  610. document_ids:
  611. type: array
  612. items:
  613. type: string
  614. description: List of document IDs to parse.
  615. - in: header
  616. name: Authorization
  617. type: string
  618. required: true
  619. description: Bearer token for authentication.
  620. responses:
  621. 200:
  622. description: Parsing started successfully.
  623. schema:
  624. type: object
  625. """
  626. if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
  627. return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
  628. req = request.json
  629. if not req.get("document_ids"):
  630. return get_error_data_result("`document_ids` is required")
  631. for id in req["document_ids"]:
  632. doc = DocumentService.query(id=id, kb_id=dataset_id)
  633. if not doc:
  634. return get_error_data_result(message=f"You don't own the document {id}.")
  635. if doc[0].progress != 0.0:
  636. return get_error_data_result(
  637. "Can't stop parsing document with progress at 0 or 100"
  638. )
  639. info = {"run": "1", "progress": 0}
  640. info["progress_msg"] = ""
  641. info["chunk_num"] = 0
  642. info["token_num"] = 0
  643. DocumentService.update_by_id(id, info)
  644. settings.docStoreConn.delete({"doc_id": id}, search.index_name(tenant_id), dataset_id)
  645. TaskService.filter_delete([Task.doc_id == id])
  646. e, doc = DocumentService.get_by_id(id)
  647. doc = doc.to_dict()
  648. doc["tenant_id"] = tenant_id
  649. bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
  650. queue_tasks(doc, bucket, name)
  651. return get_result()
  652. @manager.route("/datasets/<dataset_id>/chunks", methods=["DELETE"]) # noqa: F821
  653. @token_required
  654. def stop_parsing(tenant_id, dataset_id):
  655. """
  656. Stop parsing documents into chunks.
  657. ---
  658. tags:
  659. - Chunks
  660. security:
  661. - ApiKeyAuth: []
  662. parameters:
  663. - in: path
  664. name: dataset_id
  665. type: string
  666. required: true
  667. description: ID of the dataset.
  668. - in: body
  669. name: body
  670. description: Stop parsing parameters.
  671. required: true
  672. schema:
  673. type: object
  674. properties:
  675. document_ids:
  676. type: array
  677. items:
  678. type: string
  679. description: List of document IDs to stop parsing.
  680. - in: header
  681. name: Authorization
  682. type: string
  683. required: true
  684. description: Bearer token for authentication.
  685. responses:
  686. 200:
  687. description: Parsing stopped successfully.
  688. schema:
  689. type: object
  690. """
  691. if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
  692. return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
  693. req = request.json
  694. if not req.get("document_ids"):
  695. return get_error_data_result("`document_ids` is required")
  696. for id in req["document_ids"]:
  697. doc = DocumentService.query(id=id, kb_id=dataset_id)
  698. if not doc:
  699. return get_error_data_result(message=f"You don't own the document {id}.")
  700. if int(doc[0].progress) == 1 or doc[0].progress == 0:
  701. return get_error_data_result(
  702. "Can't stop parsing document with progress at 0 or 1"
  703. )
  704. info = {"run": "2", "progress": 0, "chunk_num": 0}
  705. DocumentService.update_by_id(id, info)
  706. settings.docStoreConn.delete({"doc_id": doc[0].id}, search.index_name(tenant_id), dataset_id)
  707. return get_result()
  708. @manager.route("/datasets/<dataset_id>/documents/<document_id>/chunks", methods=["GET"]) # noqa: F821
  709. @token_required
  710. def list_chunks(tenant_id, dataset_id, document_id):
  711. """
  712. List chunks of a document.
  713. ---
  714. tags:
  715. - Chunks
  716. security:
  717. - ApiKeyAuth: []
  718. parameters:
  719. - in: path
  720. name: dataset_id
  721. type: string
  722. required: true
  723. description: ID of the dataset.
  724. - in: path
  725. name: document_id
  726. type: string
  727. required: true
  728. description: ID of the document.
  729. - in: query
  730. name: page
  731. type: integer
  732. required: false
  733. default: 1
  734. description: Page number.
  735. - in: query
  736. name: page_size
  737. type: integer
  738. required: false
  739. default: 30
  740. description: Number of items per page.
  741. - in: header
  742. name: Authorization
  743. type: string
  744. required: true
  745. description: Bearer token for authentication.
  746. responses:
  747. 200:
  748. description: List of chunks.
  749. schema:
  750. type: object
  751. properties:
  752. total:
  753. type: integer
  754. description: Total number of chunks.
  755. chunks:
  756. type: array
  757. items:
  758. type: object
  759. properties:
  760. id:
  761. type: string
  762. description: Chunk ID.
  763. content:
  764. type: string
  765. description: Chunk content.
  766. document_id:
  767. type: string
  768. description: ID of the document.
  769. important_keywords:
  770. type: array
  771. items:
  772. type: string
  773. description: Important keywords.
  774. image_id:
  775. type: string
  776. description: Image ID associated with the chunk.
  777. doc:
  778. type: object
  779. description: Document details.
  780. """
  781. if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
  782. return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
  783. doc = DocumentService.query(id=document_id, kb_id=dataset_id)
  784. if not doc:
  785. return get_error_data_result(
  786. message=f"You don't own the document {document_id}."
  787. )
  788. doc = doc[0]
  789. req = request.args
  790. doc_id = document_id
  791. page = int(req.get("page", 1))
  792. size = int(req.get("page_size", 30))
  793. question = req.get("keywords", "")
  794. query = {
  795. "doc_ids": [doc_id],
  796. "page": page,
  797. "size": size,
  798. "question": question,
  799. "sort": True,
  800. }
  801. key_mapping = {
  802. "chunk_num": "chunk_count",
  803. "kb_id": "dataset_id",
  804. "token_num": "token_count",
  805. "parser_id": "chunk_method",
  806. }
  807. run_mapping = {
  808. "0": "UNSTART",
  809. "1": "RUNNING",
  810. "2": "CANCEL",
  811. "3": "DONE",
  812. "4": "FAIL",
  813. }
  814. doc = doc.to_dict()
  815. renamed_doc = {}
  816. for key, value in doc.items():
  817. new_key = key_mapping.get(key, key)
  818. renamed_doc[new_key] = value
  819. if key == "run":
  820. renamed_doc["run"] = run_mapping.get(str(value))
  821. res = {"total": 0, "chunks": [], "doc": renamed_doc}
  822. if req.get("id"):
  823. chunk = settings.docStoreConn.get(req.get("id"), search.index_name(tenant_id), [dataset_id])
  824. k = []
  825. for n in chunk.keys():
  826. if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
  827. k.append(n)
  828. for n in k:
  829. del chunk[n]
  830. if not chunk:
  831. return get_error_data_result(f"Chunk `{req.get('id')}` not found.")
  832. res['total'] = 1
  833. final_chunk = {
  834. "id":chunk.get("id",chunk.get("chunk_id")),
  835. "content":chunk["content_with_weight"],
  836. "document_id":chunk.get("doc_id",chunk.get("document_id")),
  837. "docnm_kwd":chunk["docnm_kwd"],
  838. "important_keywords":chunk.get("important_kwd",[]),
  839. "questions":chunk.get("question_kwd",[]),
  840. "dataset_id":chunk.get("kb_id",chunk.get("dataset_id")),
  841. "image_id":chunk["img_id"],
  842. "available":bool(chunk.get("available_int",1)),
  843. "positions":chunk.get("position_int",[]),
  844. }
  845. res["chunks"].append(final_chunk)
  846. _ = Chunk(**final_chunk)
  847. elif settings.docStoreConn.indexExist(search.index_name(tenant_id), dataset_id):
  848. sres = settings.retrievaler.search(query, search.index_name(tenant_id), [dataset_id], emb_mdl=None,
  849. highlight=True)
  850. res["total"] = sres.total
  851. for id in sres.ids:
  852. d = {
  853. "id": id,
  854. "content": (
  855. rmSpace(sres.highlight[id])
  856. if question and id in sres.highlight
  857. else sres.field[id].get("content_with_weight", "")
  858. ),
  859. "document_id": sres.field[id]["doc_id"],
  860. "docnm_kwd": sres.field[id]["docnm_kwd"],
  861. "important_keywords": sres.field[id].get("important_kwd", []),
  862. "questions": sres.field[id].get("question_kwd", []),
  863. "dataset_id": sres.field[id].get("kb_id", sres.field[id].get("dataset_id")),
  864. "image_id": sres.field[id].get("img_id", ""),
  865. "available": bool(sres.field[id].get("available_int", 1)),
  866. "positions": sres.field[id].get("position_int",[]),
  867. }
  868. res["chunks"].append(d)
  869. _ = Chunk(**d) # validate the chunk
  870. return get_result(data=res)
  871. @manager.route( # noqa: F821
  872. "/datasets/<dataset_id>/documents/<document_id>/chunks", methods=["POST"]
  873. )
  874. @token_required
  875. def add_chunk(tenant_id, dataset_id, document_id):
  876. """
  877. Add a chunk to a document.
  878. ---
  879. tags:
  880. - Chunks
  881. security:
  882. - ApiKeyAuth: []
  883. parameters:
  884. - in: path
  885. name: dataset_id
  886. type: string
  887. required: true
  888. description: ID of the dataset.
  889. - in: path
  890. name: document_id
  891. type: string
  892. required: true
  893. description: ID of the document.
  894. - in: body
  895. name: body
  896. description: Chunk data.
  897. required: true
  898. schema:
  899. type: object
  900. properties:
  901. content:
  902. type: string
  903. required: true
  904. description: Content of the chunk.
  905. important_keywords:
  906. type: array
  907. items:
  908. type: string
  909. description: Important keywords.
  910. - in: header
  911. name: Authorization
  912. type: string
  913. required: true
  914. description: Bearer token for authentication.
  915. responses:
  916. 200:
  917. description: Chunk added successfully.
  918. schema:
  919. type: object
  920. properties:
  921. chunk:
  922. type: object
  923. properties:
  924. id:
  925. type: string
  926. description: Chunk ID.
  927. content:
  928. type: string
  929. description: Chunk content.
  930. document_id:
  931. type: string
  932. description: ID of the document.
  933. important_keywords:
  934. type: array
  935. items:
  936. type: string
  937. description: Important keywords.
  938. """
  939. if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
  940. return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
  941. doc = DocumentService.query(id=document_id, kb_id=dataset_id)
  942. if not doc:
  943. return get_error_data_result(
  944. message=f"You don't own the document {document_id}."
  945. )
  946. doc = doc[0]
  947. req = request.json
  948. if not req.get("content"):
  949. return get_error_data_result(message="`content` is required")
  950. if "important_keywords" in req:
  951. if not isinstance(req["important_keywords"], list):
  952. return get_error_data_result(
  953. "`important_keywords` is required to be a list"
  954. )
  955. if "questions" in req:
  956. if not isinstance(req["questions"], list):
  957. return get_error_data_result(
  958. "`questions` is required to be a list"
  959. )
  960. chunk_id = xxhash.xxh64((req["content"] + document_id).encode("utf-8")).hexdigest()
  961. d = {
  962. "id": chunk_id,
  963. "content_ltks": rag_tokenizer.tokenize(req["content"]),
  964. "content_with_weight": req["content"],
  965. }
  966. d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
  967. d["important_kwd"] = req.get("important_keywords", [])
  968. d["important_tks"] = rag_tokenizer.tokenize(
  969. " ".join(req.get("important_keywords", []))
  970. )
  971. d["question_kwd"] = req.get("questions", [])
  972. d["question_tks"] = rag_tokenizer.tokenize(
  973. "\n".join(req.get("questions", []))
  974. )
  975. d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
  976. d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
  977. d["kb_id"] = dataset_id
  978. d["docnm_kwd"] = doc.name
  979. d["doc_id"] = document_id
  980. embd_id = DocumentService.get_embd_id(document_id)
  981. embd_mdl = TenantLLMService.model_instance(
  982. tenant_id, LLMType.EMBEDDING.value, embd_id
  983. )
  984. v, c = embd_mdl.encode([doc.name, req["content"] if not d["question_kwd"] else "\n".join(d["question_kwd"])])
  985. v = 0.1 * v[0] + 0.9 * v[1]
  986. d["q_%d_vec" % len(v)] = v.tolist()
  987. settings.docStoreConn.insert([d], search.index_name(tenant_id), dataset_id)
  988. DocumentService.increment_chunk_num(doc.id, doc.kb_id, c, 1, 0)
  989. # rename keys
  990. key_mapping = {
  991. "id": "id",
  992. "content_with_weight": "content",
  993. "doc_id": "document_id",
  994. "important_kwd": "important_keywords",
  995. "question_kwd": "questions",
  996. "kb_id": "dataset_id",
  997. "create_timestamp_flt": "create_timestamp",
  998. "create_time": "create_time",
  999. "document_keyword": "document",
  1000. }
  1001. renamed_chunk = {}
  1002. for key, value in d.items():
  1003. if key in key_mapping:
  1004. new_key = key_mapping.get(key, key)
  1005. renamed_chunk[new_key] = value
  1006. _ = Chunk(**renamed_chunk) # validate the chunk
  1007. return get_result(data={"chunk": renamed_chunk})
  1008. # return get_result(data={"chunk_id": chunk_id})
  1009. @manager.route( # noqa: F821
  1010. "datasets/<dataset_id>/documents/<document_id>/chunks", methods=["DELETE"]
  1011. )
  1012. @token_required
  1013. def rm_chunk(tenant_id, dataset_id, document_id):
  1014. """
  1015. Remove chunks from a document.
  1016. ---
  1017. tags:
  1018. - Chunks
  1019. security:
  1020. - ApiKeyAuth: []
  1021. parameters:
  1022. - in: path
  1023. name: dataset_id
  1024. type: string
  1025. required: true
  1026. description: ID of the dataset.
  1027. - in: path
  1028. name: document_id
  1029. type: string
  1030. required: true
  1031. description: ID of the document.
  1032. - in: body
  1033. name: body
  1034. description: Chunk removal parameters.
  1035. required: true
  1036. schema:
  1037. type: object
  1038. properties:
  1039. chunk_ids:
  1040. type: array
  1041. items:
  1042. type: string
  1043. description: List of chunk IDs to remove.
  1044. - in: header
  1045. name: Authorization
  1046. type: string
  1047. required: true
  1048. description: Bearer token for authentication.
  1049. responses:
  1050. 200:
  1051. description: Chunks removed successfully.
  1052. schema:
  1053. type: object
  1054. """
  1055. if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
  1056. return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
  1057. req = request.json
  1058. condition = {"doc_id": document_id}
  1059. if "chunk_ids" in req:
  1060. condition["id"] = req["chunk_ids"]
  1061. chunk_number = settings.docStoreConn.delete(condition, search.index_name(tenant_id), dataset_id)
  1062. if chunk_number != 0:
  1063. DocumentService.decrement_chunk_num(document_id, dataset_id, 1, chunk_number, 0)
  1064. if "chunk_ids" in req and chunk_number != len(req["chunk_ids"]):
  1065. return get_error_data_result(message=f"rm_chunk deleted chunks {chunk_number}, expect {len(req['chunk_ids'])}")
  1066. return get_result(message=f"deleted {chunk_number} chunks")
  1067. @manager.route( # noqa: F821
  1068. "/datasets/<dataset_id>/documents/<document_id>/chunks/<chunk_id>", methods=["PUT"]
  1069. )
  1070. @token_required
  1071. def update_chunk(tenant_id, dataset_id, document_id, chunk_id):
  1072. """
  1073. Update a chunk within a document.
  1074. ---
  1075. tags:
  1076. - Chunks
  1077. security:
  1078. - ApiKeyAuth: []
  1079. parameters:
  1080. - in: path
  1081. name: dataset_id
  1082. type: string
  1083. required: true
  1084. description: ID of the dataset.
  1085. - in: path
  1086. name: document_id
  1087. type: string
  1088. required: true
  1089. description: ID of the document.
  1090. - in: path
  1091. name: chunk_id
  1092. type: string
  1093. required: true
  1094. description: ID of the chunk to update.
  1095. - in: body
  1096. name: body
  1097. description: Chunk update parameters.
  1098. required: true
  1099. schema:
  1100. type: object
  1101. properties:
  1102. content:
  1103. type: string
  1104. description: Updated content of the chunk.
  1105. important_keywords:
  1106. type: array
  1107. items:
  1108. type: string
  1109. description: Updated important keywords.
  1110. available:
  1111. type: boolean
  1112. description: Availability status of the chunk.
  1113. - in: header
  1114. name: Authorization
  1115. type: string
  1116. required: true
  1117. description: Bearer token for authentication.
  1118. responses:
  1119. 200:
  1120. description: Chunk updated successfully.
  1121. schema:
  1122. type: object
  1123. """
  1124. chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant_id), [dataset_id])
  1125. if chunk is None:
  1126. return get_error_data_result(f"Can't find this chunk {chunk_id}")
  1127. if not KnowledgebaseService.accessible(kb_id=dataset_id, user_id=tenant_id):
  1128. return get_error_data_result(message=f"You don't own the dataset {dataset_id}.")
  1129. doc = DocumentService.query(id=document_id, kb_id=dataset_id)
  1130. if not doc:
  1131. return get_error_data_result(
  1132. message=f"You don't own the document {document_id}."
  1133. )
  1134. doc = doc[0]
  1135. req = request.json
  1136. if "content" in req:
  1137. content = req["content"]
  1138. else:
  1139. content = chunk.get("content_with_weight", "")
  1140. d = {"id": chunk_id, "content_with_weight": content}
  1141. d["content_ltks"] = rag_tokenizer.tokenize(d["content_with_weight"])
  1142. d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
  1143. if "important_keywords" in req:
  1144. if not isinstance(req["important_keywords"], list):
  1145. return get_error_data_result("`important_keywords` should be a list")
  1146. d["important_kwd"] = req.get("important_keywords", [])
  1147. d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_keywords"]))
  1148. if "questions" in req:
  1149. if not isinstance(req["questions"], list):
  1150. return get_error_data_result("`questions` should be a list")
  1151. d["question_kwd"] = req.get("questions")
  1152. d["question_tks"] = rag_tokenizer.tokenize("\n".join(req["questions"]))
  1153. if "available" in req:
  1154. d["available_int"] = int(req["available"])
  1155. embd_id = DocumentService.get_embd_id(document_id)
  1156. embd_mdl = TenantLLMService.model_instance(
  1157. tenant_id, LLMType.EMBEDDING.value, embd_id
  1158. )
  1159. if doc.parser_id == ParserType.QA:
  1160. arr = [t for t in re.split(r"[\n\t]", d["content_with_weight"]) if len(t) > 1]
  1161. if len(arr) != 2:
  1162. return get_error_data_result(
  1163. message="Q&A must be separated by TAB/ENTER key."
  1164. )
  1165. q, a = rmPrefix(arr[0]), rmPrefix(arr[1])
  1166. d = beAdoc(
  1167. d, arr[0], arr[1], not any([rag_tokenizer.is_chinese(t) for t in q + a])
  1168. )
  1169. v, c = embd_mdl.encode([doc.name, d["content_with_weight"] if not d.get("question_kwd") else "\n".join(d["question_kwd"])])
  1170. v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
  1171. d["q_%d_vec" % len(v)] = v.tolist()
  1172. settings.docStoreConn.update({"id": chunk_id}, d, search.index_name(tenant_id), dataset_id)
  1173. return get_result()
  1174. @manager.route("/retrieval", methods=["POST"]) # noqa: F821
  1175. @token_required
  1176. def retrieval_test(tenant_id):
  1177. """
  1178. Retrieve chunks based on a query.
  1179. ---
  1180. tags:
  1181. - Retrieval
  1182. security:
  1183. - ApiKeyAuth: []
  1184. parameters:
  1185. - in: body
  1186. name: body
  1187. description: Retrieval parameters.
  1188. required: true
  1189. schema:
  1190. type: object
  1191. properties:
  1192. dataset_ids:
  1193. type: array
  1194. items:
  1195. type: string
  1196. required: true
  1197. description: List of dataset IDs to search in.
  1198. question:
  1199. type: string
  1200. required: true
  1201. description: Query string.
  1202. document_ids:
  1203. type: array
  1204. items:
  1205. type: string
  1206. description: List of document IDs to filter.
  1207. similarity_threshold:
  1208. type: number
  1209. format: float
  1210. description: Similarity threshold.
  1211. vector_similarity_weight:
  1212. type: number
  1213. format: float
  1214. description: Vector similarity weight.
  1215. top_k:
  1216. type: integer
  1217. description: Maximum number of chunks to return.
  1218. highlight:
  1219. type: boolean
  1220. description: Whether to highlight matched content.
  1221. - in: header
  1222. name: Authorization
  1223. type: string
  1224. required: true
  1225. description: Bearer token for authentication.
  1226. responses:
  1227. 200:
  1228. description: Retrieval results.
  1229. schema:
  1230. type: object
  1231. properties:
  1232. chunks:
  1233. type: array
  1234. items:
  1235. type: object
  1236. properties:
  1237. id:
  1238. type: string
  1239. description: Chunk ID.
  1240. content:
  1241. type: string
  1242. description: Chunk content.
  1243. document_id:
  1244. type: string
  1245. description: ID of the document.
  1246. dataset_id:
  1247. type: string
  1248. description: ID of the dataset.
  1249. similarity:
  1250. type: number
  1251. format: float
  1252. description: Similarity score.
  1253. """
  1254. req = request.json
  1255. if not req.get("dataset_ids"):
  1256. return get_error_data_result("`dataset_ids` is required.")
  1257. kb_ids = req["dataset_ids"]
  1258. if not isinstance(kb_ids, list):
  1259. return get_error_data_result("`dataset_ids` should be a list")
  1260. for id in kb_ids:
  1261. if not KnowledgebaseService.accessible(kb_id=id, user_id=tenant_id):
  1262. return get_error_data_result(f"You don't own the dataset {id}.")
  1263. kbs = KnowledgebaseService.get_by_ids(kb_ids)
  1264. embd_nms = list(set([TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs])) # remove vendor suffix for comparison
  1265. if len(embd_nms) != 1:
  1266. return get_result(
  1267. message='Datasets use different embedding models."',
  1268. code=settings.RetCode.DATA_ERROR,
  1269. )
  1270. if "question" not in req:
  1271. return get_error_data_result("`question` is required.")
  1272. page = int(req.get("page", 1))
  1273. size = int(req.get("page_size", 30))
  1274. question = req["question"]
  1275. doc_ids = req.get("document_ids", [])
  1276. use_kg = req.get("use_kg", False)
  1277. if not isinstance(doc_ids, list):
  1278. return get_error_data_result("`documents` should be a list")
  1279. doc_ids_list = KnowledgebaseService.list_documents_by_ids(kb_ids)
  1280. for doc_id in doc_ids:
  1281. if doc_id not in doc_ids_list:
  1282. return get_error_data_result(
  1283. f"The datasets don't own the document {doc_id}"
  1284. )
  1285. similarity_threshold = float(req.get("similarity_threshold", 0.2))
  1286. vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
  1287. top = int(req.get("top_k", 1024))
  1288. if req.get("highlight") == "False" or req.get("highlight") == "false":
  1289. highlight = False
  1290. else:
  1291. highlight = True
  1292. try:
  1293. e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
  1294. if not e:
  1295. return get_error_data_result(message="Dataset not found!")
  1296. embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING, llm_name=kb.embd_id)
  1297. rerank_mdl = None
  1298. if req.get("rerank_id"):
  1299. rerank_mdl = LLMBundle(kb.tenant_id, LLMType.RERANK, llm_name=req["rerank_id"])
  1300. if req.get("keyword", False):
  1301. chat_mdl = LLMBundle(kb.tenant_id, LLMType.CHAT)
  1302. question += keyword_extraction(chat_mdl, question)
  1303. ranks = settings.retrievaler.retrieval(
  1304. question,
  1305. embd_mdl,
  1306. kb.tenant_id,
  1307. kb_ids,
  1308. page,
  1309. size,
  1310. similarity_threshold,
  1311. vector_similarity_weight,
  1312. top,
  1313. doc_ids,
  1314. rerank_mdl=rerank_mdl,
  1315. highlight=highlight,
  1316. rank_feature=label_question(question, kbs)
  1317. )
  1318. if use_kg:
  1319. ck = settings.kg_retrievaler.retrieval(question,
  1320. [k.tenant_id for k in kbs],
  1321. kb_ids,
  1322. embd_mdl,
  1323. LLMBundle(kb.tenant_id, LLMType.CHAT))
  1324. if ck["content_with_weight"]:
  1325. ranks["chunks"].insert(0, ck)
  1326. for c in ranks["chunks"]:
  1327. c.pop("vector", None)
  1328. ##rename keys
  1329. renamed_chunks = []
  1330. for chunk in ranks["chunks"]:
  1331. key_mapping = {
  1332. "chunk_id": "id",
  1333. "content_with_weight": "content",
  1334. "doc_id": "document_id",
  1335. "important_kwd": "important_keywords",
  1336. "question_kwd": "questions",
  1337. "docnm_kwd": "document_keyword",
  1338. "kb_id":"dataset_id"
  1339. }
  1340. rename_chunk = {}
  1341. for key, value in chunk.items():
  1342. new_key = key_mapping.get(key, key)
  1343. rename_chunk[new_key] = value
  1344. renamed_chunks.append(rename_chunk)
  1345. ranks["chunks"] = renamed_chunks
  1346. return get_result(data=ranks)
  1347. except Exception as e:
  1348. if str(e).find("not_found") > 0:
  1349. return get_result(
  1350. message="No chunk found! Check the chunk status please!",
  1351. code=settings.RetCode.DATA_ERROR,
  1352. )
  1353. return server_error_response(e)