選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。

doc.py 25KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import pathlib
  17. import datetime
  18. from api.db.services.dialog_service import keyword_extraction
  19. from rag.app.qa import rmPrefix, beAdoc
  20. from rag.nlp import rag_tokenizer
  21. from api.db import LLMType, ParserType
  22. from api.db.services.llm_service import TenantLLMService
  23. from api.settings import kg_retrievaler
  24. import hashlib
  25. import re
  26. from api.utils.api_utils import token_required
  27. from api.db.db_models import Task
  28. from api.db.services.task_service import TaskService, queue_tasks
  29. from api.utils.api_utils import server_error_response
  30. from api.utils.api_utils import get_result, get_error_data_result
  31. from io import BytesIO
  32. from elasticsearch_dsl import Q
  33. from flask import request, send_file
  34. from api.db import FileSource, TaskStatus, FileType
  35. from api.db.db_models import File
  36. from api.db.services.document_service import DocumentService
  37. from api.db.services.file2document_service import File2DocumentService
  38. from api.db.services.file_service import FileService
  39. from api.db.services.knowledgebase_service import KnowledgebaseService
  40. from api.settings import RetCode, retrievaler
  41. from api.utils.api_utils import construct_json_result
  42. from rag.nlp import search
  43. from rag.utils import rmSpace
  44. from rag.utils.es_conn import ELASTICSEARCH
  45. from rag.utils.storage_factory import STORAGE_IMPL
  46. MAXIMUM_OF_UPLOADING_FILES = 256
  47. MAXIMUM_OF_UPLOADING_FILES = 256
  48. @manager.route('/dataset/<dataset_id>/document', methods=['POST'])
  49. @token_required
  50. def upload(dataset_id, tenant_id):
  51. if 'file' not in request.files:
  52. return get_error_data_result(
  53. retmsg='No file part!', retcode=RetCode.ARGUMENT_ERROR)
  54. file_objs = request.files.getlist('file')
  55. for file_obj in file_objs:
  56. if file_obj.filename == '':
  57. return get_result(
  58. retmsg='No file selected!', retcode=RetCode.ARGUMENT_ERROR)
  59. e, kb = KnowledgebaseService.get_by_id(dataset_id)
  60. if not e:
  61. raise LookupError(f"Can't find the knowledgebase with ID {dataset_id}!")
  62. err, _ = FileService.upload_document(kb, file_objs, tenant_id)
  63. if err:
  64. return get_result(
  65. retmsg="\n".join(err), retcode=RetCode.SERVER_ERROR)
  66. return get_result()
  67. @manager.route('/dataset/<dataset_id>/info/<document_id>', methods=['PUT'])
  68. @token_required
  69. def update_doc(tenant_id, dataset_id, document_id):
  70. req = request.json
  71. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  72. return get_error_data_result(retmsg="You don't own the dataset.")
  73. doc = DocumentService.query(kb_id=dataset_id, id=document_id)
  74. if not doc:
  75. return get_error_data_result(retmsg="The dataset doesn't own the document.")
  76. doc = doc[0]
  77. if "chunk_count" in req:
  78. if req["chunk_count"] != doc.chunk_num:
  79. return get_error_data_result(retmsg="Can't change `chunk_count`.")
  80. if "token_count" in req:
  81. if req["token_count"] != doc.token_num:
  82. return get_error_data_result(retmsg="Can't change `token_count`.")
  83. if "progress" in req:
  84. if req['progress'] != doc.progress:
  85. return get_error_data_result(retmsg="Can't change `progress`.")
  86. if "name" in req and req["name"] != doc.name:
  87. if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(doc.name.lower()).suffix:
  88. return get_result(retmsg="The extension of file can't be changed", retcode=RetCode.ARGUMENT_ERROR)
  89. for d in DocumentService.query(name=req["name"], kb_id=doc.kb_id):
  90. if d.name == req["name"]:
  91. return get_error_data_result(
  92. retmsg="Duplicated document name in the same knowledgebase.")
  93. if not DocumentService.update_by_id(
  94. document_id, {"name": req["name"]}):
  95. return get_error_data_result(
  96. retmsg="Database error (Document rename)!")
  97. informs = File2DocumentService.get_by_document_id(document_id)
  98. if informs:
  99. e, file = FileService.get_by_id(informs[0].file_id)
  100. FileService.update_by_id(file.id, {"name": req["name"]})
  101. if "parser_config" in req:
  102. DocumentService.update_parser_config(doc.id, req["parser_config"])
  103. if "chunk_method" in req:
  104. if doc.parser_id.lower() == req["chunk_method"].lower():
  105. return get_result()
  106. if doc.type == FileType.VISUAL or re.search(
  107. r"\.(ppt|pptx|pages)$", doc.name):
  108. return get_error_data_result(retmsg="Not supported yet!")
  109. e = DocumentService.update_by_id(doc.id,
  110. {"parser_id": req["chunk_method"], "progress": 0, "progress_msg": "",
  111. "run": TaskStatus.UNSTART.value})
  112. if not e:
  113. return get_error_data_result(retmsg="Document not found!")
  114. if doc.token_num > 0:
  115. e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1,
  116. doc.process_duation * -1)
  117. if not e:
  118. return get_error_data_result(retmsg="Document not found!")
  119. tenant_id = DocumentService.get_tenant_id(req["id"])
  120. if not tenant_id:
  121. return get_error_data_result(retmsg="Tenant not found!")
  122. ELASTICSEARCH.deleteByQuery(
  123. Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
  124. return get_result()
  125. @manager.route('/dataset/<dataset_id>/document/<document_id>', methods=['GET'])
  126. @token_required
  127. def download(tenant_id, dataset_id, document_id):
  128. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  129. return get_error_data_result(retmsg=f'You do not own the dataset {dataset_id}.')
  130. doc = DocumentService.query(kb_id=dataset_id, id=document_id)
  131. if not doc:
  132. return get_error_data_result(retmsg=f'The dataset not own the document {document_id}.')
  133. # The process of downloading
  134. doc_id, doc_location = File2DocumentService.get_storage_address(doc_id=document_id) # minio address
  135. file_stream = STORAGE_IMPL.get(doc_id, doc_location)
  136. if not file_stream:
  137. return construct_json_result(message="This file is empty.", code=RetCode.DATA_ERROR)
  138. file = BytesIO(file_stream)
  139. # Use send_file with a proper filename and MIME type
  140. return send_file(
  141. file,
  142. as_attachment=True,
  143. download_name=doc[0].name,
  144. mimetype='application/octet-stream' # Set a default MIME type
  145. )
  146. @manager.route('/dataset/<dataset_id>/info', methods=['GET'])
  147. @token_required
  148. def list_docs(dataset_id, tenant_id):
  149. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  150. return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}. ")
  151. id = request.args.get("id")
  152. if not DocumentService.query(id=id,kb_id=dataset_id):
  153. return get_error_data_result(retmsg=f"You don't own the document {id}.")
  154. offset = int(request.args.get("offset", 1))
  155. keywords = request.args.get("keywords","")
  156. limit = int(request.args.get("limit", 1024))
  157. orderby = request.args.get("orderby", "create_time")
  158. if request.args.get("desc") == "False":
  159. desc = False
  160. else:
  161. desc = True
  162. docs, tol = DocumentService.get_list(dataset_id, offset, limit, orderby, desc, keywords, id)
  163. # rename key's name
  164. renamed_doc_list = []
  165. for doc in docs:
  166. key_mapping = {
  167. "chunk_num": "chunk_count",
  168. "kb_id": "knowledgebase_id",
  169. "token_num": "token_count",
  170. "parser_id": "chunk_method"
  171. }
  172. renamed_doc = {}
  173. for key, value in doc.items():
  174. new_key = key_mapping.get(key, key)
  175. renamed_doc[new_key] = value
  176. renamed_doc_list.append(renamed_doc)
  177. return get_result(data={"total": tol, "docs": renamed_doc_list})
  178. @manager.route('/dataset/<dataset_id>/document', methods=['DELETE'])
  179. @token_required
  180. def delete(tenant_id,dataset_id):
  181. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  182. return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}. ")
  183. req = request.json
  184. if not req.get("ids"):
  185. return get_error_data_result(retmsg="`ids` is required")
  186. doc_ids = req["ids"]
  187. root_folder = FileService.get_root_folder(tenant_id)
  188. pf_id = root_folder["id"]
  189. FileService.init_knowledgebase_docs(pf_id, tenant_id)
  190. errors = ""
  191. for doc_id in doc_ids:
  192. try:
  193. e, doc = DocumentService.get_by_id(doc_id)
  194. if not e:
  195. return get_error_data_result(retmsg="Document not found!")
  196. tenant_id = DocumentService.get_tenant_id(doc_id)
  197. if not tenant_id:
  198. return get_error_data_result(retmsg="Tenant not found!")
  199. b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
  200. if not DocumentService.remove_document(doc, tenant_id):
  201. return get_error_data_result(
  202. retmsg="Database error (Document removal)!")
  203. f2d = File2DocumentService.get_by_document_id(doc_id)
  204. FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
  205. File2DocumentService.delete_by_document_id(doc_id)
  206. STORAGE_IMPL.rm(b, n)
  207. except Exception as e:
  208. errors += str(e)
  209. if errors:
  210. return get_result(retmsg=errors, retcode=RetCode.SERVER_ERROR)
  211. return get_result()
  212. @manager.route('/dataset/<dataset_id>/chunk', methods=['POST'])
  213. @token_required
  214. def parse(tenant_id,dataset_id):
  215. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  216. return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}.")
  217. req = request.json
  218. if not req.get("document_ids"):
  219. return get_error_data_result("`document_ids` is required")
  220. for id in req["document_ids"]:
  221. if not DocumentService.query(id=id,kb_id=dataset_id):
  222. return get_error_data_result(retmsg=f"You don't own the document {id}.")
  223. info = {"run": "1", "progress": 0}
  224. info["progress_msg"] = ""
  225. info["chunk_num"] = 0
  226. info["token_num"] = 0
  227. DocumentService.update_by_id(id, info)
  228. # if str(req["run"]) == TaskStatus.CANCEL.value:
  229. ELASTICSEARCH.deleteByQuery(
  230. Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
  231. TaskService.filter_delete([Task.doc_id == id])
  232. e, doc = DocumentService.get_by_id(id)
  233. doc = doc.to_dict()
  234. doc["tenant_id"] = tenant_id
  235. bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
  236. queue_tasks(doc, bucket, name)
  237. return get_result()
  238. @manager.route('/dataset/<dataset_id>/chunk', methods=['DELETE'])
  239. @token_required
  240. def stop_parsing(tenant_id,dataset_id):
  241. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  242. return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}.")
  243. req = request.json
  244. if not req.get("document_ids"):
  245. return get_error_data_result("`document_ids` is required")
  246. for id in req["document_ids"]:
  247. doc = DocumentService.query(id=id, kb_id=dataset_id)
  248. if not doc:
  249. return get_error_data_result(retmsg=f"You don't own the document {id}.")
  250. if doc[0].progress == 100.0 or doc[0].progress == 0.0:
  251. return get_error_data_result("Can't stop parsing document with progress at 0 or 100")
  252. info = {"run": "2", "progress": 0}
  253. DocumentService.update_by_id(id, info)
  254. # if str(req["run"]) == TaskStatus.CANCEL.value:
  255. tenant_id = DocumentService.get_tenant_id(id)
  256. ELASTICSEARCH.deleteByQuery(
  257. Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
  258. return get_result()
  259. @manager.route('/dataset/<dataset_id>/document/<document_id>/chunk', methods=['GET'])
  260. @token_required
  261. def list_chunks(tenant_id,dataset_id,document_id):
  262. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  263. return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}.")
  264. doc=DocumentService.query(id=document_id, kb_id=dataset_id)
  265. if not doc:
  266. return get_error_data_result(retmsg=f"You don't own the document {document_id}.")
  267. doc=doc[0]
  268. req = request.args
  269. doc_id = document_id
  270. page = int(req.get("offset", 1))
  271. size = int(req.get("limit", 30))
  272. question = req.get("keywords", "")
  273. query = {
  274. "doc_ids": [doc_id], "page": page, "size": size, "question": question, "sort": True
  275. }
  276. sres = retrievaler.search(query, search.index_name(tenant_id), highlight=True)
  277. res = {"total": sres.total, "chunks": [], "doc": doc.to_dict()}
  278. origin_chunks = []
  279. sign = 0
  280. for id in sres.ids:
  281. d = {
  282. "chunk_id": id,
  283. "content_with_weight": rmSpace(sres.highlight[id]) if question and id in sres.highlight else sres.field[
  284. id].get(
  285. "content_with_weight", ""),
  286. "doc_id": sres.field[id]["doc_id"],
  287. "docnm_kwd": sres.field[id]["docnm_kwd"],
  288. "important_kwd": sres.field[id].get("important_kwd", []),
  289. "img_id": sres.field[id].get("img_id", ""),
  290. "available_int": sres.field[id].get("available_int", 1),
  291. "positions": sres.field[id].get("position_int", "").split("\t")
  292. }
  293. if len(d["positions"]) % 5 == 0:
  294. poss = []
  295. for i in range(0, len(d["positions"]), 5):
  296. poss.append([float(d["positions"][i]), float(d["positions"][i + 1]), float(d["positions"][i + 2]),
  297. float(d["positions"][i + 3]), float(d["positions"][i + 4])])
  298. d["positions"] = poss
  299. origin_chunks.append(d)
  300. if req.get("id"):
  301. if req.get("id") == id:
  302. origin_chunks.clear()
  303. origin_chunks.append(d)
  304. sign = 1
  305. break
  306. if req.get("id"):
  307. if sign == 0:
  308. return get_error_data_result(f"Can't find this chunk {req.get('id')}")
  309. for chunk in origin_chunks:
  310. key_mapping = {
  311. "chunk_id": "id",
  312. "content_with_weight": "content",
  313. "doc_id": "document_id",
  314. "important_kwd": "important_keywords",
  315. "img_id": "image_id",
  316. }
  317. renamed_chunk = {}
  318. for key, value in chunk.items():
  319. new_key = key_mapping.get(key, key)
  320. renamed_chunk[new_key] = value
  321. res["chunks"].append(renamed_chunk)
  322. return get_result(data=res)
  323. @manager.route('/dataset/<dataset_id>/document/<document_id>/chunk', methods=['POST'])
  324. @token_required
  325. def create(tenant_id,dataset_id,document_id):
  326. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  327. return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}.")
  328. doc = DocumentService.query(id=document_id, kb_id=dataset_id)
  329. if not doc:
  330. return get_error_data_result(retmsg=f"You don't own the document {document_id}.")
  331. doc = doc[0]
  332. req = request.json
  333. if not req.get("content"):
  334. return get_error_data_result(retmsg="`content` is required")
  335. if "important_keywords" in req:
  336. if type(req["important_keywords"]) != list:
  337. return get_error_data_result("`important_keywords` is required to be a list")
  338. md5 = hashlib.md5()
  339. md5.update((req["content"] + document_id).encode("utf-8"))
  340. chunk_id = md5.hexdigest()
  341. d = {"id": chunk_id, "content_ltks": rag_tokenizer.tokenize(req["content"]),
  342. "content_with_weight": req["content"]}
  343. d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
  344. d["important_kwd"] = req.get("important_keywords", [])
  345. d["important_tks"] = rag_tokenizer.tokenize(" ".join(req.get("important_keywords", [])))
  346. d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
  347. d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
  348. d["kb_id"] = [doc.kb_id]
  349. d["docnm_kwd"] = doc.name
  350. d["doc_id"] = doc.id
  351. embd_id = DocumentService.get_embd_id(document_id)
  352. embd_mdl = TenantLLMService.model_instance(
  353. tenant_id, LLMType.EMBEDDING.value, embd_id)
  354. v, c = embd_mdl.encode([doc.name, req["content"]])
  355. v = 0.1 * v[0] + 0.9 * v[1]
  356. d["q_%d_vec" % len(v)] = v.tolist()
  357. ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
  358. DocumentService.increment_chunk_num(
  359. doc.id, doc.kb_id, c, 1, 0)
  360. d["chunk_id"] = chunk_id
  361. # rename keys
  362. key_mapping = {
  363. "chunk_id": "id",
  364. "content_with_weight": "content",
  365. "doc_id": "document_id",
  366. "important_kwd": "important_keywords",
  367. "kb_id": "dataset_id",
  368. "create_timestamp_flt": "create_timestamp",
  369. "create_time": "create_time",
  370. "document_keyword": "document",
  371. }
  372. renamed_chunk = {}
  373. for key, value in d.items():
  374. if key in key_mapping:
  375. new_key = key_mapping.get(key, key)
  376. renamed_chunk[new_key] = value
  377. return get_result(data={"chunk": renamed_chunk})
  378. # return get_result(data={"chunk_id": chunk_id})
  379. @manager.route('dataset/<dataset_id>/document/<document_id>/chunk', methods=['DELETE'])
  380. @token_required
  381. def rm_chunk(tenant_id,dataset_id,document_id):
  382. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  383. return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}.")
  384. doc = DocumentService.query(id=document_id, kb_id=dataset_id)
  385. if not doc:
  386. return get_error_data_result(retmsg=f"You don't own the document {document_id}.")
  387. doc = doc[0]
  388. req = request.json
  389. if not req.get("chunk_ids"):
  390. return get_error_data_result("`chunk_ids` is required")
  391. query = {
  392. "doc_ids": [doc.id], "page": 1, "size": 1024, "question": "", "sort": True}
  393. sres = retrievaler.search(query, search.index_name(tenant_id), highlight=True)
  394. for chunk_id in req.get("chunk_ids"):
  395. if chunk_id not in sres.ids:
  396. return get_error_data_result(f"Chunk {chunk_id} not found")
  397. if not ELASTICSEARCH.deleteByQuery(
  398. Q("ids", values=req["chunk_ids"]), search.index_name(tenant_id)):
  399. return get_error_data_result(retmsg="Index updating failure")
  400. deleted_chunk_ids = req["chunk_ids"]
  401. chunk_number = len(deleted_chunk_ids)
  402. DocumentService.decrement_chunk_num(doc.id, doc.kb_id, 1, chunk_number, 0)
  403. return get_result()
  404. @manager.route('/dataset/<dataset_id>/document/<document_id>/chunk/<chunk_id>', methods=['PUT'])
  405. @token_required
  406. def update_chunk(tenant_id,dataset_id,document_id,chunk_id):
  407. try:
  408. res = ELASTICSEARCH.get(
  409. chunk_id, search.index_name(
  410. tenant_id))
  411. except Exception as e:
  412. return get_error_data_result(f"Can't find this chunk {chunk_id}")
  413. if not KnowledgebaseService.query(id=dataset_id, tenant_id=tenant_id):
  414. return get_error_data_result(retmsg=f"You don't own the dataset {dataset_id}.")
  415. doc = DocumentService.query(id=document_id, kb_id=dataset_id)
  416. if not doc:
  417. return get_error_data_result(retmsg=f"You don't own the document {document_id}.")
  418. doc = doc[0]
  419. query = {
  420. "doc_ids": [document_id], "page": 1, "size": 1024, "question": "", "sort": True
  421. }
  422. sres = retrievaler.search(query, search.index_name(tenant_id), highlight=True)
  423. if chunk_id not in sres.ids:
  424. return get_error_data_result(f"You don't own the chunk {chunk_id}")
  425. req = request.json
  426. content=res["_source"].get("content_with_weight")
  427. d = {
  428. "id": chunk_id,
  429. "content_with_weight": req.get("content",content)}
  430. d["content_ltks"] = rag_tokenizer.tokenize(d["content_with_weight"])
  431. d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
  432. if "important_keywords" in req:
  433. if type(req["important_keywords"]) != list:
  434. return get_error_data_result("`important_keywords` is required to be a list")
  435. d["important_kwd"] = req.get("important_keywords")
  436. d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_keywords"]))
  437. if "available" in req:
  438. d["available_int"] = req["available"]
  439. embd_id = DocumentService.get_embd_id(document_id)
  440. embd_mdl = TenantLLMService.model_instance(
  441. tenant_id, LLMType.EMBEDDING.value, embd_id)
  442. if doc.parser_id == ParserType.QA:
  443. arr = [
  444. t for t in re.split(
  445. r"[\n\t]",
  446. d["content_with_weight"]) if len(t) > 1]
  447. if len(arr) != 2:
  448. return get_error_data_result(
  449. retmsg="Q&A must be separated by TAB/ENTER key.")
  450. q, a = rmPrefix(arr[0]), rmPrefix(arr[1])
  451. d = beAdoc(d, arr[0], arr[1], not any(
  452. [rag_tokenizer.is_chinese(t) for t in q + a]))
  453. v, c = embd_mdl.encode([doc.name, d["content_with_weight"]])
  454. v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
  455. d["q_%d_vec" % len(v)] = v.tolist()
  456. ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
  457. return get_result()
  458. @manager.route('/retrieval', methods=['POST'])
  459. @token_required
  460. def retrieval_test(tenant_id):
  461. req = request.json
  462. if not req.get("datasets"):
  463. return get_error_data_result("`datasets` is required.")
  464. kb_ids = req["datasets"]
  465. kbs = KnowledgebaseService.get_by_ids(kb_ids)
  466. embd_nms = list(set([kb.embd_id for kb in kbs]))
  467. if len(embd_nms) != 1:
  468. return get_result(
  469. retmsg='Knowledge bases use different embedding models or does not exist."',
  470. retcode=RetCode.AUTHENTICATION_ERROR)
  471. if isinstance(kb_ids, str): kb_ids = [kb_ids]
  472. for id in kb_ids:
  473. if not KnowledgebaseService.query(id=id,tenant_id=tenant_id):
  474. return get_error_data_result(f"You don't own the dataset {id}.")
  475. if "question" not in req:
  476. return get_error_data_result("`question` is required.")
  477. page = int(req.get("offset", 1))
  478. size = int(req.get("limit", 30))
  479. question = req["question"]
  480. doc_ids = req.get("documents", [])
  481. similarity_threshold = float(req.get("similarity_threshold", 0.2))
  482. vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
  483. top = int(req.get("top_k", 1024))
  484. if req.get("highlight")=="False" or req.get("highlight")=="false":
  485. highlight = False
  486. else:
  487. highlight = True
  488. try:
  489. e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
  490. if not e:
  491. return get_error_data_result(retmsg="Knowledgebase not found!")
  492. embd_mdl = TenantLLMService.model_instance(
  493. kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
  494. rerank_mdl = None
  495. if req.get("rerank_id"):
  496. rerank_mdl = TenantLLMService.model_instance(
  497. kb.tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"])
  498. if req.get("keyword", False):
  499. chat_mdl = TenantLLMService.model_instance(kb.tenant_id, LLMType.CHAT)
  500. question += keyword_extraction(chat_mdl, question)
  501. retr = retrievaler if kb.parser_id != ParserType.KG else kg_retrievaler
  502. ranks = retr.retrieval(question, embd_mdl, kb.tenant_id, kb_ids, page, size,
  503. similarity_threshold, vector_similarity_weight, top,
  504. doc_ids, rerank_mdl=rerank_mdl, highlight=highlight)
  505. for c in ranks["chunks"]:
  506. if "vector" in c:
  507. del c["vector"]
  508. ##rename keys
  509. renamed_chunks = []
  510. for chunk in ranks["chunks"]:
  511. key_mapping = {
  512. "chunk_id": "id",
  513. "content_with_weight": "content",
  514. "doc_id": "document_id",
  515. "important_kwd": "important_keywords",
  516. "docnm_kwd": "document_keyword"
  517. }
  518. rename_chunk = {}
  519. for key, value in chunk.items():
  520. new_key = key_mapping.get(key, key)
  521. rename_chunk[new_key] = value
  522. renamed_chunks.append(rename_chunk)
  523. ranks["chunks"] = renamed_chunks
  524. return get_result(data=ranks)
  525. except Exception as e:
  526. if str(e).find("not_found") > 0:
  527. return get_result(retmsg=f'No chunk found! Check the chunk status please!',
  528. retcode=RetCode.DATA_ERROR)
  529. return server_error_response(e)