Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import pathlib
  17. import datetime
  18. from api.db.services.dialog_service import keyword_extraction
  19. from rag.app.qa import rmPrefix, beAdoc
  20. from rag.nlp import rag_tokenizer
  21. from api.db import LLMType, ParserType
  22. from api.db.services.llm_service import TenantLLMService
  23. from api.settings import kg_retrievaler
  24. import hashlib
  25. import re
  26. from api.utils.api_utils import token_required
  27. from api.db.db_models import Task
  28. from api.db.services.task_service import TaskService, queue_tasks
  29. from api.utils.api_utils import server_error_response
  30. from api.utils.api_utils import get_result, get_error_data_result
  31. from io import BytesIO
  32. from elasticsearch_dsl import Q
  33. from flask import request, send_file
  34. from api.db import FileSource, TaskStatus, FileType
  35. from api.db.db_models import File
  36. from api.db.services.document_service import DocumentService
  37. from api.db.services.file2document_service import File2DocumentService
  38. from api.db.services.file_service import FileService
  39. from api.db.services.knowledgebase_service import KnowledgebaseService
  40. from api.settings import RetCode, retrievaler
  41. from api.utils.api_utils import construct_json_result,get_parser_config
  42. from rag.nlp import search
  43. from rag.utils import rmSpace
  44. from rag.utils.es_conn import ELASTICSEARCH
  45. from rag.utils.storage_factory import STORAGE_IMPL
  46. MAXIMUM_OF_UPLOADING_FILES = 256
  47. MAXIMUM_OF_UPLOADING_FILES = 256
  48. MAXIMUM_OF_UPLOADING_FILES = 256
  49. MAXIMUM_OF_UPLOADING_FILES = 256
  50. @manager.route('/dataset/<dataset_id>/document', methods=['POST'])
  51. @token_required
  52. def upload(dataset_id, tenant_id):
  53. if 'file' not in request.files:
  54. return get_error_data_result(
  55. retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
  56. file_objs = request.files.getlist('file')
  57. for file_obj in file_objs:
  58. if file_obj.filename == '':
  59. return get_result(
  60. retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
  61. # total size
  62. total_size = 0
  63. for file_obj in file_objs:
  64. file_obj.seek(0, os.SEEK_END)
  65. total_size += file_obj.tell()
  66. file_obj.seek(0)
  67. MAX_TOTAL_FILE_SIZE=10*1024*1024
  68. if total_size > MAX_TOTAL_FILE_SIZE:
  69. return get_result(
  70. retmsg=f'Total file size exceeds 10MB limit! ({total_size / (1024 * 1024):.2f} MB)',
  71. retcode=RetCode.ARGUMENT_ERROR)
  72. e, kb = KnowledgebaseService.get_by_id(dataset_id)
  73. if not e:
  74. raise LookupError(f"Can't find the dataset with ID {dataset_id}!")
  75. err, files= FileService.upload_document(kb, file_objs, tenant_id)
  76. if err:
  77. return get_result(
  78. retmsg="\n".join(err), retcode=RetCode.SERVER_ERROR)
  79. # rename key's name
  80. renamed_doc_list = []
  81. for file in files:
  82. doc = file[0]
  83. key_mapping = {
  84. "chunk_num": "chunk_count",
  85. "kb_id": "dataset_id",
  86. "token_num": "token_count",
  87. "parser_id": "chunk_method"
  88. }
  89. renamed_doc = {}
  90. for key, value in doc.items():
  91. new_key = key_mapping.get(key, key)
  92. renamed_doc[new_key] = value
  93. renamed_doc["run"] = "UNSTART"
  94. renamed_doc_list.append(renamed_doc)
  95. return get_result(data=renamed_doc_list)
  96. @manager.route('/dataset/<dataset_id>/info/<document_id>', methods=['PUT'])
  97. @token_required
  98. def update_doc(tenant_id, dataset_id, document_id):
  99. req = request.json
  100. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  101. return get_error_data_result(retmsg="You don't own the dataset.")
  102. doc = DocumentService.query(kb_id=dataset_id, id=document_id)
  103. if not doc:
  104. return get_error_data_result(retmsg="The dataset doesn't own the document.")
  105. doc = doc[0]
  106. if "chunk_count" in req:
  107. if req["chunk_count"] != doc.chunk_num:
  108. return get_error_data_result(retmsg="Can't change `chunk_count`.")
  109. if "token_count" in req:
  110. if req["token_count"] != doc.token_num:
  111. return get_error_data_result(retmsg="Can't change `token_count`.")
  112. if "progress" in req:
  113. if req['progress'] != doc.progress:
  114. return get_error_data_result(retmsg="Can't change `progress`.")
  115. if "name" in req and req["name"] != doc.name:
  116. if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(doc.name.lower()).suffix:
  117. return get_result(retmsg="The extension of file can't be changed", retcode=RetCode.ARGUMENT_ERROR)
  118. for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
  119. if d.name == req["name"]:
  120. return get_error_data_result(
  121. retmsg="Duplicated document name in the same dataset.")
  122. if not DocumentService.update_by_id(
  123. document_id, {"name": req["name"]}):
  124. return get_error_data_result(
  125. retmsg="Database error (Document rename)!")
  126. informs = File2DocumentService.get_by_document_id(document_id)
  127. if informs:
  128. e, file = FileService.get_by_id(informs[0].file_id)
  129. FileService.update_by_id(file.id, {"name": req["name"]})
  130. if "parser_config" in req:
  131. DocumentService.update_parser_config(doc.id, req["parser_config"])
  132. if "chunk_method" in req:
  133. valid_chunk_method = {"naive","manual","qa","table","paper","book","laws","presentation","picture","one","knowledge_graph","email"}
  134. if req.get("chunk_method") not in valid_chunk_method:
  135. return get_error_data_result(f"`chunk_method` {req['chunk_method']} doesn't exist")
  136. if doc.parser_id.lower() == req["chunk_method"].lower():
  137. return get_result()
  138. if doc.type == FileType.VISUAL or re.search(
  139. r"\.(ppt|pptx|pages)$", doc.name):
  140. return get_error_data_result(retmsg="Not supported yet!")
  141. e = DocumentService.update_by_id(doc.id,
  142. {"parser_id": req["chunk_method"], "progress": 0, "progress_msg": "",
  143. "run": TaskStatus.UNSTART.value})
  144. if not e:
  145. return get_error_data_result(retmsg="Document not found!")
  146. req["parser_config"] = get_parser_config(req["chunk_method"], req.get("parser_config"))
  147. if doc.token_num > 0:
  148. e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1,
  149. doc.process_duation * -1)
  150. if not e:
  151. return get_error_data_result(retmsg="Document not found!")
  152. tenant_id = DocumentService.get_tenant_id(req["id"])
  153. if not tenant_id:
  154. return get_error_data_result(retmsg="Tenant not found!")
  155. ELASTICSEARCH.deleteByQuery(
  156. Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
  157. return get_result()
  158. @manager.route('/dataset/<dataset_id>/document/<document_id>', methods=['GET'])
  159. @token_required
  160. def download(tenant_id, dataset_id, document_id):
  161. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  162. return get_error_data_result(retmsg=f'You do not own the dataset {dataset_id}.')
  163. doc = DocumentService.query(kb_id=dataset_id, id=document_id)
  164. if not doc:
  165. return get_error_data_result(retmsg=f'The dataset not own the document {document_id}.')
  166. # The process of downloading
  167. doc_id, doc_location = File2DocumentService.get_storage_address(doc_id=document_id) # minio address
  168. file_stream = STORAGE_IMPL.get(doc_id, doc_location)
  169. if not file_stream:
  170. return construct_json_result(message="This file is empty.", code=RetCode.DATA_ERROR)
  171. file = BytesIO(file_stream)
  172. # Use send_file with a proper filename and MIME type
  173. return send_file(
  174. file,
  175. as_attachment=True,
  176. download_name=doc[0].name,
  177. mimetype='application/octet-stream' # Set a default MIME type
  178. )
  179. @manager.route('/dataset/<dataset_id>/info', methods=['GET'])
  180. @token_required
  181. def list_docs(dataset_id, tenant_id):
  182. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  183. return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}. ")
  184. id = request.args.get("id")
  185. if not DocumentService.query(id=id,kb_id=dataset_id):
  186. return get_error_data_result(retmsg=f"You don't own the document {id}.")
  187. offset = int(request.args.get("offset", 1))
  188. keywords = request.args.get("keywords","")
  189. limit = int(request.args.get("limit", 1024))
  190. orderby = request.args.get("orderby", "create_time")
  191. if request.args.get("desc") == "False":
  192. desc = False
  193. else:
  194. desc = True
  195. docs, tol = DocumentService.get_list(dataset_id, offset, limit, orderby, desc, keywords, id)
  196. # rename key's name
  197. renamed_doc_list = []
  198. for doc in docs:
  199. key_mapping = {
  200. "chunk_num": "chunk_count",
  201. "kb_id": "dataset_id",
  202. "token_num": "token_count",
  203. "parser_id": "chunk_method"
  204. }
  205. run_mapping = {
  206. "0" :"UNSTART",
  207. "1":"RUNNING",
  208. "2":"CANCEL",
  209. "3":"DONE",
  210. "4":"FAIL"
  211. }
  212. renamed_doc = {}
  213. for key, value in doc.items():
  214. if key =="run":
  215. renamed_doc["run"]=run_mapping.get(str(value))
  216. new_key = key_mapping.get(key, key)
  217. renamed_doc[new_key] = value
  218. renamed_doc_list.append(renamed_doc)
  219. return get_result(data={"total": tol, "docs": renamed_doc_list})
  220. @manager.route('/dataset/<dataset_id>/document', methods=['DELETE'])
  221. @token_required
  222. def delete(tenant_id,dataset_id):
  223. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  224. return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}. ")
  225. req = request.json
  226. if not req.get("ids"):
  227. return get_error_data_result(retmsg="`ids` is required")
  228. doc_ids = req["ids"]
  229. root_folder = FileService.get_root_folder(tenant_id)
  230. pf_id = root_folder["id"]
  231. FileService.init_knowledgebase_docs(pf_id, tenant_id)
  232. errors = ""
  233. for doc_id in doc_ids:
  234. try:
  235. e, doc = DocumentService.get_by_id(doc_id)
  236. if not e:
  237. return get_error_data_result(retmsg="Document not found!")
  238. tenant_id = DocumentService.get_tenant_id(doc_id)
  239. if not tenant_id:
  240. return get_error_data_result(retmsg="Tenant not found!")
  241. b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
  242. if not DocumentService.remove_document(doc, tenant_id):
  243. return get_error_data_result(
  244. retmsg="Database error (Document removal)!")
  245. f2d = File2DocumentService.get_by_document_id(doc_id)
  246. FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
  247. File2DocumentService.delete_by_document_id(doc_id)
  248. STORAGE_IMPL.rm(b, n)
  249. except Exception as e:
  250. errors += str(e)
  251. if errors:
  252. return get_result(retmsg=errors, retcode=RetCode.SERVER_ERROR)
  253. return get_result()
  254. @manager.route('/dataset/<dataset_id>/chunk', methods=['POST'])
  255. @token_required
  256. def parse(tenant_id,dataset_id):
  257. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  258. return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}.")
  259. req = request.json
  260. if not req.get("document_ids"):
  261. return get_error_data_result("`document_ids` is required")
  262. for id in req["document_ids"]:
  263. if not DocumentService.query(id=id,kb_id=dataset_id):
  264. return get_error_data_result(retmsg=f"You don't own the document {id}.")
  265. info = {"run": "1", "progress": 0}
  266. info["progress_msg"] = ""
  267. info["chunk_num"] = 0
  268. info["token_num"] = 0
  269. DocumentService.update_by_id(id, info)
  270. # if str(req["run"]) == TaskStatus.CANCEL.value:
  271. ELASTICSEARCH.deleteByQuery(
  272. Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
  273. TaskService.filter_delete([Task.doc_id == id])
  274. e, doc = DocumentService.get_by_id(id)
  275. doc = doc.to_dict()
  276. doc["tenant_id"] = tenant_id
  277. bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
  278. queue_tasks(doc, bucket, name)
  279. return get_result()
  280. @manager.route('/dataset/<dataset_id>/chunk', methods=['DELETE'])
  281. @token_required
  282. def stop_parsing(tenant_id,dataset_id):
  283. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  284. return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}.")
  285. req = request.json
  286. if not req.get("document_ids"):
  287. return get_error_data_result("`document_ids` is required")
  288. for id in req["document_ids"]:
  289. doc = DocumentService.query(id=id, kb_id=dataset_id)
  290. if not doc:
  291. return get_error_data_result(retmsg=f"You don't own the document {id}.")
  292. if doc[0].progress == 100.0 or doc[0].progress == 0.0:
  293. return get_error_data_result("Can't stop parsing document with progress at 0 or 100")
  294. info = {"run": "2", "progress": 0}
  295. DocumentService.update_by_id(id, info)
  296. # if str(req["run"]) == TaskStatus.CANCEL.value:
  297. tenant_id = DocumentService.get_tenant_id(id)
  298. ELASTICSEARCH.deleteByQuery(
  299. Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
  300. return get_result()
  301. @manager.route('/dataset/<dataset_id>/document/<document_id>/chunk', methods=['GET'])
  302. @token_required
  303. def list_chunks(tenant_id,dataset_id,document_id):
  304. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  305. return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}.")
  306. doc=DocumentService.query(id=document_id, kb_id=dataset_id)
  307. if not doc:
  308. return get_error_data_result(retmsg=f"You don't own the document {document_id}.")
  309. doc=doc[0]
  310. req = request.args
  311. doc_id = document_id
  312. page = int(req.get("offset", 1))
  313. size = int(req.get("limit", 30))
  314. question = req.get("keywords", "")
  315. query = {
  316. "doc_ids": [doc_id], "page": page, "size": size, "question": question, "sort": True
  317. }
  318. sres = retrievaler.search(query, search.index_name(tenant_id), highlight=True)
  319. res = {"total": sres.total, "chunks": [], "doc": doc.to_dict()}
  320. origin_chunks = []
  321. sign = 0
  322. for id in sres.ids:
  323. d = {
  324. "chunk_id": id,
  325. "content_with_weight": rmSpace(sres.highlight[id]) if question and id in sres.highlight else sres.field[
  326. id].get(
  327. "content_with_weight", ""),
  328. "doc_id": sres.field[id]["doc_id"],
  329. "docnm_kwd": sres.field[id]["docnm_kwd"],
  330. "important_kwd": sres.field[id].get("important_kwd", []),
  331. "img_id": sres.field[id].get("img_id", ""),
  332. "available_int": sres.field[id].get("available_int", 1),
  333. "positions": sres.field[id].get("position_int", "").split("\t")
  334. }
  335. if len(d["positions"]) % 5 == 0:
  336. poss = []
  337. for i in range(0, len(d["positions"]), 5):
  338. poss.append([float(d["positions"][i]), float(d["positions"][i + 1]), float(d["positions"][i + 2]),
  339. float(d["positions"][i + 3]), float(d["positions"][i + 4])])
  340. d["positions"] = poss
  341. origin_chunks.append(d)
  342. if req.get("id"):
  343. if req.get("id") == id:
  344. origin_chunks.clear()
  345. origin_chunks.append(d)
  346. sign = 1
  347. break
  348. if req.get("id"):
  349. if sign == 0:
  350. return get_error_data_result(f"Can't find this chunk {req.get('id')}")
  351. for chunk in origin_chunks:
  352. key_mapping = {
  353. "chunk_id": "id",
  354. "content_with_weight": "content",
  355. "doc_id": "document_id",
  356. "important_kwd": "important_keywords",
  357. "img_id": "image_id",
  358. }
  359. renamed_chunk = {}
  360. for key, value in chunk.items():
  361. new_key = key_mapping.get(key, key)
  362. renamed_chunk[new_key] = value
  363. res["chunks"].append(renamed_chunk)
  364. return get_result(data=res)
  365. @manager.route('/dataset/<dataset_id>/document/<document_id>/chunk', methods=['POST'])
  366. @token_required
  367. def add_chunk(tenant_id,dataset_id,document_id):
  368. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  369. return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}.")
  370. doc = DocumentService.query(id=document_id, kb_id=dataset_id)
  371. if not doc:
  372. return get_error_data_result(retmsg=f"You don't own the document {document_id}.")
  373. doc = doc[0]
  374. req = request.json
  375. if not req.get("content"):
  376. return get_error_data_result(retmsg="`content` is required")
  377. if "important_keywords" in req:
  378. if type(req["important_keywords"]) != list:
  379. return get_error_data_result("`important_keywords` is required to be a list")
  380. md5 = hashlib.md5()
  381. md5.update((req["content"] + document_id).encode("utf-8"))
  382. chunk_id = md5.hexdigest()
  383. d = {"id": chunk_id, "content_ltks": rag_tokenizer.tokenize(req["content"]),
  384. "content_with_weight": req["content"]}
  385. d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
  386. d["important_kwd"] = req.get("important_keywords", [])
  387. d["important_tks"] = rag_tokenizer.tokenize(" ".join(req.get("important_keywords", [])))
  388. d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
  389. d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
  390. d["kb_id"] = [doc.kb_id]
  391. d["docnm_kwd"] = doc.name
  392. d["doc_id"] = doc.id
  393. embd_id = DocumentService.get_embd_id(document_id)
  394. embd_mdl = TenantLLMService.model_instance(
  395. tenant_id, LLMType.EMBEDDING.value, embd_id)
  396. v, c = embd_mdl.encode([doc.name, req["content"]])
  397. v = 0.1 * v[0] + 0.9 * v[1]
  398. d["q_%d_vec" % len(v)] = v.tolist()
  399. ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
  400. DocumentService.increment_chunk_num(
  401. doc.id, doc.kb_id, c, 1, 0)
  402. d["chunk_id"] = chunk_id
  403. # rename keys
  404. key_mapping = {
  405. "chunk_id": "id",
  406. "content_with_weight": "content",
  407. "doc_id": "document_id",
  408. "important_kwd": "important_keywords",
  409. "kb_id": "dataset_id",
  410. "create_timestamp_flt": "create_timestamp",
  411. "create_time": "create_time",
  412. "document_keyword": "document",
  413. }
  414. renamed_chunk = {}
  415. for key, value in d.items():
  416. if key in key_mapping:
  417. new_key = key_mapping.get(key, key)
  418. renamed_chunk[new_key] = value
  419. return get_result(data={"chunk": renamed_chunk})
  420. # return get_result(data={"chunk_id": chunk_id})
  421. @manager.route('dataset/<dataset_id>/document/<document_id>/chunk', methods=['DELETE'])
  422. @token_required
  423. def rm_chunk(tenant_id,dataset_id,document_id):
  424. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  425. return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}.")
  426. doc = DocumentService.query(id=document_id, kb_id=dataset_id)
  427. if not doc:
  428. return get_error_data_result(retmsg=f"You don't own the document {document_id}.")
  429. doc = doc[0]
  430. req = request.json
  431. if not req.get("chunk_ids"):
  432. return get_error_data_result("`chunk_ids` is required")
  433. query = {
  434. "doc_ids": [doc.id], "page": 1, "size": 1024, "question": "", "sort": True}
  435. sres = retrievaler.search(query, search.index_name(tenant_id), highlight=True)
  436. for chunk_id in req.get("chunk_ids"):
  437. if chunk_id not in sres.ids:
  438. return get_error_data_result(f"Chunk {chunk_id} not found")
  439. if not ELASTICSEARCH.deleteByQuery(
  440. Q("ids", values=req["chunk_ids"]), search.index_name(tenant_id)):
  441. return get_error_data_result(retmsg="Index updating failure")
  442. deleted_chunk_ids = req["chunk_ids"]
  443. chunk_number = len(deleted_chunk_ids)
  444. DocumentService.decrement_chunk_num(doc.id, doc.kb_id, 1, chunk_number, 0)
  445. return get_result()
  446. @manager.route('/dataset/<dataset_id>/document/<document_id>/chunk/<chunk_id>', methods=['PUT'])
  447. @token_required
  448. def update_chunk(tenant_id,dataset_id,document_id,chunk_id):
  449. try:
  450. res = ELASTICSEARCH.get(
  451. chunk_id, search.index_name(
  452. tenant_id))
  453. except Exception as e:
  454. return get_error_data_result(f"Can't find this chunk {chunk_id}")
  455. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  456. return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}.")
  457. doc = DocumentService.query(id=document_id, kb_id=dataset_id)
  458. if not doc:
  459. return get_error_data_result(retmsg=f"You don't own the document {document_id}.")
  460. doc = doc[0]
  461. query = {
  462. "doc_ids": [document_id], "page": 1, "size": 1024, "question": "", "sort": True
  463. }
  464. sres = retrievaler.search(query, search.index_name(tenant_id), highlight=True)
  465. if chunk_id not in sres.ids:
  466. return get_error_data_result(f"You don't own the chunk {chunk_id}")
  467. req = request.json
  468. content=res["_source"].get("content_with_weight")
  469. d = {
  470. "id": chunk_id,
  471. "content_with_weight": req.get("content",content)}
  472. d["content_ltks"] = rag_tokenizer.tokenize(d["content_with_weight"])
  473. d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
  474. if "important_keywords" in req:
  475. if not isinstance(req["important_keywords"],list):
  476. return get_error_data_result("`important_keywords` should be a list")
  477. d["important_kwd"] = req.get("important_keywords")
  478. d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_keywords"]))
  479. if "available" in req:
  480. d["available_int"] = int(req["available"])
  481. embd_id = DocumentService.get_embd_id(document_id)
  482. embd_mdl = TenantLLMService.model_instance(
  483. tenant_id, LLMType.EMBEDDING.value, embd_id)
  484. if doc.parser_id == ParserType.QA:
  485. arr = [
  486. t for t in re.split(
  487. r"[\n\t]",
  488. d["content_with_weight"]) if len(t) > 1]
  489. if len(arr) != 2:
  490. return get_error_data_result(
  491. retmsg="Q&A must be separated by TAB/ENTER key.")
  492. q, a = rmPrefix(arr[0]), rmPrefix(arr[1])
  493. d = beAdoc(d, arr[0], arr[1], not any(
  494. [rag_tokenizer.is_chinese(t) for t in q + a]))
  495. v, c = embd_mdl.encode([doc.name, d["content_with_weight"]])
  496. v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
  497. d["q_%d_vec" % len(v)] = v.tolist()
  498. ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
  499. return get_result()
  500. @manager.route('/retrieval', methods=['POST'])
  501. @token_required
  502. def retrieval_test(tenant_id):
  503. req = request.json
  504. if not req.get("datasets"):
  505. return get_error_data_result("`datasets` is required.")
  506. kb_ids = req["datasets"]
  507. if not isinstance(kb_ids,list):
  508. return get_error_data_result("`datasets` should be a list")
  509. kbs = KnowledgebaseService.get_by_ids(kb_ids)
  510. embd_nms = list(set([kb.embd_id for kb in kbs]))
  511. if len(embd_nms) != 1:
  512. return get_result(
  513. retmsg='Knowledge bases use different embedding models or does not exist."',
  514. retcode=RetCode.AUTHENTICATION_ERROR)
  515. if isinstance(kb_ids, str): kb_ids = [kb_ids]
  516. for id in kb_ids:
  517. if not KnowledgebaseService.query(id=id,tenant_id=tenant_id):
  518. return get_error_data_result(f"You don't own the dataset {id}.")
  519. if "question" not in req:
  520. return get_error_data_result("`question` is required.")
  521. page = int(req.get("offset", 1))
  522. size = int(req.get("limit", 1024))
  523. question = req["question"]
  524. doc_ids = req.get("documents", [])
  525. if not isinstance(req.get("documents"),list):
  526. return get_error_data_result("`documents` should be a list")
  527. doc_ids_list=KnowledgebaseService.list_documents_by_ids(kb_ids)
  528. for doc_id in doc_ids:
  529. if doc_id not in doc_ids_list:
  530. return get_error_data_result(f"You don't own the document {doc_id}")
  531. similarity_threshold = float(req.get("similarity_threshold", 0.2))
  532. vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
  533. top = int(req.get("top_k", 1024))
  534. if req.get("highlight")=="False" or req.get("highlight")=="false":
  535. highlight = False
  536. else:
  537. highlight = True
  538. try:
  539. e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
  540. if not e:
  541. return get_error_data_result(retmsg="Dataset not found!")
  542. embd_mdl = TenantLLMService.model_instance(
  543. kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
  544. rerank_mdl = None
  545. if req.get("rerank_id"):
  546. rerank_mdl = TenantLLMService.model_instance(
  547. kb.tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"])
  548. if req.get("keyword", False):
  549. chat_mdl = TenantLLMService.model_instance(kb.tenant_id, LLMType.CHAT)
  550. question += keyword_extraction(chat_mdl, question)
  551. retr = retrievaler if kb.parser_id != ParserType.KG else kg_retrievaler
  552. ranks = retr.retrieval(question, embd_mdl, kb.tenant_id, kb_ids, page, size,
  553. similarity_threshold, vector_similarity_weight, top,
  554. doc_ids, rerank_mdl=rerank_mdl, highlight=highlight)
  555. for c in ranks["chunks"]:
  556. if "vector" in c:
  557. del c["vector"]
  558. ##rename keys
  559. renamed_chunks = []
  560. for chunk in ranks["chunks"]:
  561. key_mapping = {
  562. "chunk_id": "id",
  563. "content_with_weight": "content",
  564. "doc_id": "document_id",
  565. "important_kwd": "important_keywords",
  566. "docnm_kwd": "document_keyword"
  567. }
  568. rename_chunk = {}
  569. for key, value in chunk.items():
  570. new_key = key_mapping.get(key, key)
  571. rename_chunk[new_key] = value
  572. renamed_chunks.append(rename_chunk)
  573. ranks["chunks"] = renamed_chunks
  574. return get_result(data=ranks)
  575. except Exception as e:
  576. if str(e).find("not_found") > 0:
  577. return get_result(retmsg=f'No chunk found! Check the chunk status please!',
  578. retcode=RetCode.DATA_ERROR)
  579. return server_error_response(e)