You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

document.py 29KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677
  1. import json
  2. from flask import request
  3. from flask_restx import marshal, reqparse
  4. from sqlalchemy import desc, select
  5. from werkzeug.exceptions import Forbidden, NotFound
  6. import services
  7. from controllers.common.errors import (
  8. FilenameNotExistsError,
  9. FileTooLargeError,
  10. NoFileUploadedError,
  11. TooManyFilesError,
  12. UnsupportedFileTypeError,
  13. )
  14. from controllers.service_api import service_api_ns
  15. from controllers.service_api.app.error import ProviderNotInitializeError
  16. from controllers.service_api.dataset.error import (
  17. ArchivedDocumentImmutableError,
  18. DocumentIndexingError,
  19. InvalidMetadataError,
  20. )
  21. from controllers.service_api.wraps import (
  22. DatasetApiResource,
  23. cloud_edition_billing_rate_limit_check,
  24. cloud_edition_billing_resource_check,
  25. )
  26. from core.errors.error import ProviderTokenNotInitError
  27. from extensions.ext_database import db
  28. from fields.document_fields import document_fields, document_status_fields
  29. from libs.login import current_user
  30. from models.dataset import Dataset, Document, DocumentSegment
  31. from services.dataset_service import DatasetService, DocumentService
  32. from services.entities.knowledge_entities.knowledge_entities import KnowledgeConfig
  33. from services.file_service import FileService
  34. # Define parsers for document operations
  35. document_text_create_parser = reqparse.RequestParser()
  36. document_text_create_parser.add_argument("name", type=str, required=True, nullable=False, location="json")
  37. document_text_create_parser.add_argument("text", type=str, required=True, nullable=False, location="json")
  38. document_text_create_parser.add_argument("process_rule", type=dict, required=False, nullable=True, location="json")
  39. document_text_create_parser.add_argument("original_document_id", type=str, required=False, location="json")
  40. document_text_create_parser.add_argument(
  41. "doc_form", type=str, default="text_model", required=False, nullable=False, location="json"
  42. )
  43. document_text_create_parser.add_argument(
  44. "doc_language", type=str, default="English", required=False, nullable=False, location="json"
  45. )
  46. document_text_create_parser.add_argument(
  47. "indexing_technique", type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, nullable=False, location="json"
  48. )
  49. document_text_create_parser.add_argument("retrieval_model", type=dict, required=False, nullable=True, location="json")
  50. document_text_create_parser.add_argument("embedding_model", type=str, required=False, nullable=True, location="json")
  51. document_text_create_parser.add_argument(
  52. "embedding_model_provider", type=str, required=False, nullable=True, location="json"
  53. )
  54. document_text_update_parser = reqparse.RequestParser()
  55. document_text_update_parser.add_argument("name", type=str, required=False, nullable=True, location="json")
  56. document_text_update_parser.add_argument("text", type=str, required=False, nullable=True, location="json")
  57. document_text_update_parser.add_argument("process_rule", type=dict, required=False, nullable=True, location="json")
  58. document_text_update_parser.add_argument(
  59. "doc_form", type=str, default="text_model", required=False, nullable=False, location="json"
  60. )
  61. document_text_update_parser.add_argument(
  62. "doc_language", type=str, default="English", required=False, nullable=False, location="json"
  63. )
  64. document_text_update_parser.add_argument("retrieval_model", type=dict, required=False, nullable=False, location="json")
  65. @service_api_ns.route(
  66. "/datasets/<uuid:dataset_id>/document/create_by_text",
  67. "/datasets/<uuid:dataset_id>/document/create-by-text",
  68. )
  69. class DocumentAddByTextApi(DatasetApiResource):
  70. """Resource for documents."""
  71. @service_api_ns.expect(document_text_create_parser)
  72. @service_api_ns.doc("create_document_by_text")
  73. @service_api_ns.doc(description="Create a new document by providing text content")
  74. @service_api_ns.doc(params={"dataset_id": "Dataset ID"})
  75. @service_api_ns.doc(
  76. responses={
  77. 200: "Document created successfully",
  78. 401: "Unauthorized - invalid API token",
  79. 400: "Bad request - invalid parameters",
  80. }
  81. )
  82. @cloud_edition_billing_resource_check("vector_space", "dataset")
  83. @cloud_edition_billing_resource_check("documents", "dataset")
  84. @cloud_edition_billing_rate_limit_check("knowledge", "dataset")
  85. def post(self, tenant_id, dataset_id):
  86. """Create document by text."""
  87. args = document_text_create_parser.parse_args()
  88. dataset_id = str(dataset_id)
  89. tenant_id = str(tenant_id)
  90. dataset = db.session.query(Dataset).where(Dataset.tenant_id == tenant_id, Dataset.id == dataset_id).first()
  91. if not dataset:
  92. raise ValueError("Dataset does not exist.")
  93. if not dataset.indexing_technique and not args["indexing_technique"]:
  94. raise ValueError("indexing_technique is required.")
  95. text = args.get("text")
  96. name = args.get("name")
  97. if text is None or name is None:
  98. raise ValueError("Both 'text' and 'name' must be non-null values.")
  99. if args.get("embedding_model_provider"):
  100. DatasetService.check_embedding_model_setting(
  101. tenant_id, args.get("embedding_model_provider"), args.get("embedding_model")
  102. )
  103. if (
  104. args.get("retrieval_model")
  105. and args.get("retrieval_model").get("reranking_model")
  106. and args.get("retrieval_model").get("reranking_model").get("reranking_provider_name")
  107. ):
  108. DatasetService.check_reranking_model_setting(
  109. tenant_id,
  110. args.get("retrieval_model").get("reranking_model").get("reranking_provider_name"),
  111. args.get("retrieval_model").get("reranking_model").get("reranking_model_name"),
  112. )
  113. upload_file = FileService.upload_text(text=str(text), text_name=str(name))
  114. data_source = {
  115. "type": "upload_file",
  116. "info_list": {"data_source_type": "upload_file", "file_info_list": {"file_ids": [upload_file.id]}},
  117. }
  118. args["data_source"] = data_source
  119. knowledge_config = KnowledgeConfig(**args)
  120. # validate args
  121. DocumentService.document_create_args_validate(knowledge_config)
  122. try:
  123. documents, batch = DocumentService.save_document_with_dataset_id(
  124. dataset=dataset,
  125. knowledge_config=knowledge_config,
  126. account=current_user,
  127. dataset_process_rule=dataset.latest_process_rule if "process_rule" not in args else None,
  128. created_from="api",
  129. )
  130. except ProviderTokenNotInitError as ex:
  131. raise ProviderNotInitializeError(ex.description)
  132. document = documents[0]
  133. documents_and_batch_fields = {"document": marshal(document, document_fields), "batch": batch}
  134. return documents_and_batch_fields, 200
  135. @service_api_ns.route(
  136. "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/update_by_text",
  137. "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/update-by-text",
  138. )
  139. class DocumentUpdateByTextApi(DatasetApiResource):
  140. """Resource for update documents."""
  141. @service_api_ns.expect(document_text_update_parser)
  142. @service_api_ns.doc("update_document_by_text")
  143. @service_api_ns.doc(description="Update an existing document by providing text content")
  144. @service_api_ns.doc(params={"dataset_id": "Dataset ID", "document_id": "Document ID"})
  145. @service_api_ns.doc(
  146. responses={
  147. 200: "Document updated successfully",
  148. 401: "Unauthorized - invalid API token",
  149. 404: "Document not found",
  150. }
  151. )
  152. @cloud_edition_billing_resource_check("vector_space", "dataset")
  153. @cloud_edition_billing_rate_limit_check("knowledge", "dataset")
  154. def post(self, tenant_id, dataset_id, document_id):
  155. """Update document by text."""
  156. args = document_text_update_parser.parse_args()
  157. dataset_id = str(dataset_id)
  158. tenant_id = str(tenant_id)
  159. dataset = db.session.query(Dataset).where(Dataset.tenant_id == tenant_id, Dataset.id == dataset_id).first()
  160. if not dataset:
  161. raise ValueError("Dataset does not exist.")
  162. if (
  163. args.get("retrieval_model")
  164. and args.get("retrieval_model").get("reranking_model")
  165. and args.get("retrieval_model").get("reranking_model").get("reranking_provider_name")
  166. ):
  167. DatasetService.check_reranking_model_setting(
  168. tenant_id,
  169. args.get("retrieval_model").get("reranking_model").get("reranking_provider_name"),
  170. args.get("retrieval_model").get("reranking_model").get("reranking_model_name"),
  171. )
  172. # indexing_technique is already set in dataset since this is an update
  173. args["indexing_technique"] = dataset.indexing_technique
  174. if args["text"]:
  175. text = args.get("text")
  176. name = args.get("name")
  177. if text is None or name is None:
  178. raise ValueError("Both text and name must be strings.")
  179. upload_file = FileService.upload_text(text=str(text), text_name=str(name))
  180. data_source = {
  181. "type": "upload_file",
  182. "info_list": {"data_source_type": "upload_file", "file_info_list": {"file_ids": [upload_file.id]}},
  183. }
  184. args["data_source"] = data_source
  185. # validate args
  186. args["original_document_id"] = str(document_id)
  187. knowledge_config = KnowledgeConfig(**args)
  188. DocumentService.document_create_args_validate(knowledge_config)
  189. try:
  190. documents, batch = DocumentService.save_document_with_dataset_id(
  191. dataset=dataset,
  192. knowledge_config=knowledge_config,
  193. account=current_user,
  194. dataset_process_rule=dataset.latest_process_rule if "process_rule" not in args else None,
  195. created_from="api",
  196. )
  197. except ProviderTokenNotInitError as ex:
  198. raise ProviderNotInitializeError(ex.description)
  199. document = documents[0]
  200. documents_and_batch_fields = {"document": marshal(document, document_fields), "batch": batch}
  201. return documents_and_batch_fields, 200
  202. @service_api_ns.route(
  203. "/datasets/<uuid:dataset_id>/document/create_by_file",
  204. "/datasets/<uuid:dataset_id>/document/create-by-file",
  205. )
  206. class DocumentAddByFileApi(DatasetApiResource):
  207. """Resource for documents."""
  208. @service_api_ns.doc("create_document_by_file")
  209. @service_api_ns.doc(description="Create a new document by uploading a file")
  210. @service_api_ns.doc(params={"dataset_id": "Dataset ID"})
  211. @service_api_ns.doc(
  212. responses={
  213. 200: "Document created successfully",
  214. 401: "Unauthorized - invalid API token",
  215. 400: "Bad request - invalid file or parameters",
  216. }
  217. )
  218. @cloud_edition_billing_resource_check("vector_space", "dataset")
  219. @cloud_edition_billing_resource_check("documents", "dataset")
  220. @cloud_edition_billing_rate_limit_check("knowledge", "dataset")
  221. def post(self, tenant_id, dataset_id):
  222. """Create document by upload file."""
  223. args = {}
  224. if "data" in request.form:
  225. args = json.loads(request.form["data"])
  226. if "doc_form" not in args:
  227. args["doc_form"] = "text_model"
  228. if "doc_language" not in args:
  229. args["doc_language"] = "English"
  230. # get dataset info
  231. dataset_id = str(dataset_id)
  232. tenant_id = str(tenant_id)
  233. dataset = db.session.query(Dataset).where(Dataset.tenant_id == tenant_id, Dataset.id == dataset_id).first()
  234. if not dataset:
  235. raise ValueError("Dataset does not exist.")
  236. if dataset.provider == "external":
  237. raise ValueError("External datasets are not supported.")
  238. indexing_technique = args.get("indexing_technique") or dataset.indexing_technique
  239. if not indexing_technique:
  240. raise ValueError("indexing_technique is required.")
  241. args["indexing_technique"] = indexing_technique
  242. if "embedding_model_provider" in args:
  243. DatasetService.check_embedding_model_setting(
  244. tenant_id, args["embedding_model_provider"], args["embedding_model"]
  245. )
  246. if (
  247. "retrieval_model" in args
  248. and args["retrieval_model"].get("reranking_model")
  249. and args["retrieval_model"].get("reranking_model").get("reranking_provider_name")
  250. ):
  251. DatasetService.check_reranking_model_setting(
  252. tenant_id,
  253. args["retrieval_model"].get("reranking_model").get("reranking_provider_name"),
  254. args["retrieval_model"].get("reranking_model").get("reranking_model_name"),
  255. )
  256. # check file
  257. if "file" not in request.files:
  258. raise NoFileUploadedError()
  259. if len(request.files) > 1:
  260. raise TooManyFilesError()
  261. # save file info
  262. file = request.files["file"]
  263. if not file.filename:
  264. raise FilenameNotExistsError
  265. upload_file = FileService.upload_file(
  266. filename=file.filename,
  267. content=file.read(),
  268. mimetype=file.mimetype,
  269. user=current_user,
  270. source="datasets",
  271. )
  272. data_source = {
  273. "type": "upload_file",
  274. "info_list": {"data_source_type": "upload_file", "file_info_list": {"file_ids": [upload_file.id]}},
  275. }
  276. args["data_source"] = data_source
  277. # validate args
  278. knowledge_config = KnowledgeConfig(**args)
  279. DocumentService.document_create_args_validate(knowledge_config)
  280. dataset_process_rule = dataset.latest_process_rule if "process_rule" not in args else None
  281. if not knowledge_config.original_document_id and not dataset_process_rule and not knowledge_config.process_rule:
  282. raise ValueError("process_rule is required.")
  283. try:
  284. documents, batch = DocumentService.save_document_with_dataset_id(
  285. dataset=dataset,
  286. knowledge_config=knowledge_config,
  287. account=dataset.created_by_account,
  288. dataset_process_rule=dataset_process_rule,
  289. created_from="api",
  290. )
  291. except ProviderTokenNotInitError as ex:
  292. raise ProviderNotInitializeError(ex.description)
  293. document = documents[0]
  294. documents_and_batch_fields = {"document": marshal(document, document_fields), "batch": batch}
  295. return documents_and_batch_fields, 200
  296. @service_api_ns.route(
  297. "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/update_by_file",
  298. "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/update-by-file",
  299. )
  300. class DocumentUpdateByFileApi(DatasetApiResource):
  301. """Resource for update documents."""
  302. @service_api_ns.doc("update_document_by_file")
  303. @service_api_ns.doc(description="Update an existing document by uploading a file")
  304. @service_api_ns.doc(params={"dataset_id": "Dataset ID", "document_id": "Document ID"})
  305. @service_api_ns.doc(
  306. responses={
  307. 200: "Document updated successfully",
  308. 401: "Unauthorized - invalid API token",
  309. 404: "Document not found",
  310. }
  311. )
  312. @cloud_edition_billing_resource_check("vector_space", "dataset")
  313. @cloud_edition_billing_rate_limit_check("knowledge", "dataset")
  314. def post(self, tenant_id, dataset_id, document_id):
  315. """Update document by upload file."""
  316. args = {}
  317. if "data" in request.form:
  318. args = json.loads(request.form["data"])
  319. if "doc_form" not in args:
  320. args["doc_form"] = "text_model"
  321. if "doc_language" not in args:
  322. args["doc_language"] = "English"
  323. # get dataset info
  324. dataset_id = str(dataset_id)
  325. tenant_id = str(tenant_id)
  326. dataset = db.session.query(Dataset).where(Dataset.tenant_id == tenant_id, Dataset.id == dataset_id).first()
  327. if not dataset:
  328. raise ValueError("Dataset does not exist.")
  329. if dataset.provider == "external":
  330. raise ValueError("External datasets are not supported.")
  331. # indexing_technique is already set in dataset since this is an update
  332. args["indexing_technique"] = dataset.indexing_technique
  333. if "file" in request.files:
  334. # save file info
  335. file = request.files["file"]
  336. if len(request.files) > 1:
  337. raise TooManyFilesError()
  338. if not file.filename:
  339. raise FilenameNotExistsError
  340. try:
  341. upload_file = FileService.upload_file(
  342. filename=file.filename,
  343. content=file.read(),
  344. mimetype=file.mimetype,
  345. user=current_user,
  346. source="datasets",
  347. )
  348. except services.errors.file.FileTooLargeError as file_too_large_error:
  349. raise FileTooLargeError(file_too_large_error.description)
  350. except services.errors.file.UnsupportedFileTypeError:
  351. raise UnsupportedFileTypeError()
  352. data_source = {
  353. "type": "upload_file",
  354. "info_list": {"data_source_type": "upload_file", "file_info_list": {"file_ids": [upload_file.id]}},
  355. }
  356. args["data_source"] = data_source
  357. # validate args
  358. args["original_document_id"] = str(document_id)
  359. knowledge_config = KnowledgeConfig(**args)
  360. DocumentService.document_create_args_validate(knowledge_config)
  361. try:
  362. documents, _ = DocumentService.save_document_with_dataset_id(
  363. dataset=dataset,
  364. knowledge_config=knowledge_config,
  365. account=dataset.created_by_account,
  366. dataset_process_rule=dataset.latest_process_rule if "process_rule" not in args else None,
  367. created_from="api",
  368. )
  369. except ProviderTokenNotInitError as ex:
  370. raise ProviderNotInitializeError(ex.description)
  371. document = documents[0]
  372. documents_and_batch_fields = {"document": marshal(document, document_fields), "batch": document.batch}
  373. return documents_and_batch_fields, 200
  374. @service_api_ns.route("/datasets/<uuid:dataset_id>/documents")
  375. class DocumentListApi(DatasetApiResource):
  376. @service_api_ns.doc("list_documents")
  377. @service_api_ns.doc(description="List all documents in a dataset")
  378. @service_api_ns.doc(params={"dataset_id": "Dataset ID"})
  379. @service_api_ns.doc(
  380. responses={
  381. 200: "Documents retrieved successfully",
  382. 401: "Unauthorized - invalid API token",
  383. 404: "Dataset not found",
  384. }
  385. )
  386. def get(self, tenant_id, dataset_id):
  387. dataset_id = str(dataset_id)
  388. tenant_id = str(tenant_id)
  389. page = request.args.get("page", default=1, type=int)
  390. limit = request.args.get("limit", default=20, type=int)
  391. search = request.args.get("keyword", default=None, type=str)
  392. dataset = db.session.query(Dataset).where(Dataset.tenant_id == tenant_id, Dataset.id == dataset_id).first()
  393. if not dataset:
  394. raise NotFound("Dataset not found.")
  395. query = select(Document).filter_by(dataset_id=str(dataset_id), tenant_id=tenant_id)
  396. if search:
  397. search = f"%{search}%"
  398. query = query.where(Document.name.like(search))
  399. query = query.order_by(desc(Document.created_at), desc(Document.position))
  400. paginated_documents = db.paginate(select=query, page=page, per_page=limit, max_per_page=100, error_out=False)
  401. documents = paginated_documents.items
  402. response = {
  403. "data": marshal(documents, document_fields),
  404. "has_more": len(documents) == limit,
  405. "limit": limit,
  406. "total": paginated_documents.total,
  407. "page": page,
  408. }
  409. return response
  410. @service_api_ns.route("/datasets/<uuid:dataset_id>/documents/<string:batch>/indexing-status")
  411. class DocumentIndexingStatusApi(DatasetApiResource):
  412. @service_api_ns.doc("get_document_indexing_status")
  413. @service_api_ns.doc(description="Get indexing status for documents in a batch")
  414. @service_api_ns.doc(params={"dataset_id": "Dataset ID", "batch": "Batch ID"})
  415. @service_api_ns.doc(
  416. responses={
  417. 200: "Indexing status retrieved successfully",
  418. 401: "Unauthorized - invalid API token",
  419. 404: "Dataset or documents not found",
  420. }
  421. )
  422. def get(self, tenant_id, dataset_id, batch):
  423. dataset_id = str(dataset_id)
  424. batch = str(batch)
  425. tenant_id = str(tenant_id)
  426. # get dataset
  427. dataset = db.session.query(Dataset).where(Dataset.tenant_id == tenant_id, Dataset.id == dataset_id).first()
  428. if not dataset:
  429. raise NotFound("Dataset not found.")
  430. # get documents
  431. documents = DocumentService.get_batch_documents(dataset_id, batch)
  432. if not documents:
  433. raise NotFound("Documents not found.")
  434. documents_status = []
  435. for document in documents:
  436. completed_segments = (
  437. db.session.query(DocumentSegment)
  438. .where(
  439. DocumentSegment.completed_at.isnot(None),
  440. DocumentSegment.document_id == str(document.id),
  441. DocumentSegment.status != "re_segment",
  442. )
  443. .count()
  444. )
  445. total_segments = (
  446. db.session.query(DocumentSegment)
  447. .where(DocumentSegment.document_id == str(document.id), DocumentSegment.status != "re_segment")
  448. .count()
  449. )
  450. # Create a dictionary with document attributes and additional fields
  451. document_dict = {
  452. "id": document.id,
  453. "indexing_status": "paused" if document.is_paused else document.indexing_status,
  454. "processing_started_at": document.processing_started_at,
  455. "parsing_completed_at": document.parsing_completed_at,
  456. "cleaning_completed_at": document.cleaning_completed_at,
  457. "splitting_completed_at": document.splitting_completed_at,
  458. "completed_at": document.completed_at,
  459. "paused_at": document.paused_at,
  460. "error": document.error,
  461. "stopped_at": document.stopped_at,
  462. "completed_segments": completed_segments,
  463. "total_segments": total_segments,
  464. }
  465. documents_status.append(marshal(document_dict, document_status_fields))
  466. data = {"data": documents_status}
  467. return data
  468. @service_api_ns.route("/datasets/<uuid:dataset_id>/documents/<uuid:document_id>")
  469. class DocumentApi(DatasetApiResource):
  470. METADATA_CHOICES = {"all", "only", "without"}
  471. @service_api_ns.doc("get_document")
  472. @service_api_ns.doc(description="Get a specific document by ID")
  473. @service_api_ns.doc(params={"dataset_id": "Dataset ID", "document_id": "Document ID"})
  474. @service_api_ns.doc(
  475. responses={
  476. 200: "Document retrieved successfully",
  477. 401: "Unauthorized - invalid API token",
  478. 403: "Forbidden - insufficient permissions",
  479. 404: "Document not found",
  480. }
  481. )
  482. def get(self, tenant_id, dataset_id, document_id):
  483. dataset_id = str(dataset_id)
  484. document_id = str(document_id)
  485. dataset = self.get_dataset(dataset_id, tenant_id)
  486. document = DocumentService.get_document(dataset.id, document_id)
  487. if not document:
  488. raise NotFound("Document not found.")
  489. if document.tenant_id != str(tenant_id):
  490. raise Forbidden("No permission.")
  491. metadata = request.args.get("metadata", "all")
  492. if metadata not in self.METADATA_CHOICES:
  493. raise InvalidMetadataError(f"Invalid metadata value: {metadata}")
  494. if metadata == "only":
  495. response = {"id": document.id, "doc_type": document.doc_type, "doc_metadata": document.doc_metadata_details}
  496. elif metadata == "without":
  497. dataset_process_rules = DatasetService.get_process_rules(dataset_id)
  498. document_process_rules = document.dataset_process_rule.to_dict()
  499. data_source_info = document.data_source_detail_dict
  500. response = {
  501. "id": document.id,
  502. "position": document.position,
  503. "data_source_type": document.data_source_type,
  504. "data_source_info": data_source_info,
  505. "dataset_process_rule_id": document.dataset_process_rule_id,
  506. "dataset_process_rule": dataset_process_rules,
  507. "document_process_rule": document_process_rules,
  508. "name": document.name,
  509. "created_from": document.created_from,
  510. "created_by": document.created_by,
  511. "created_at": document.created_at.timestamp(),
  512. "tokens": document.tokens,
  513. "indexing_status": document.indexing_status,
  514. "completed_at": int(document.completed_at.timestamp()) if document.completed_at else None,
  515. "updated_at": int(document.updated_at.timestamp()) if document.updated_at else None,
  516. "indexing_latency": document.indexing_latency,
  517. "error": document.error,
  518. "enabled": document.enabled,
  519. "disabled_at": int(document.disabled_at.timestamp()) if document.disabled_at else None,
  520. "disabled_by": document.disabled_by,
  521. "archived": document.archived,
  522. "segment_count": document.segment_count,
  523. "average_segment_length": document.average_segment_length,
  524. "hit_count": document.hit_count,
  525. "display_status": document.display_status,
  526. "doc_form": document.doc_form,
  527. "doc_language": document.doc_language,
  528. }
  529. else:
  530. dataset_process_rules = DatasetService.get_process_rules(dataset_id)
  531. document_process_rules = document.dataset_process_rule.to_dict()
  532. data_source_info = document.data_source_detail_dict
  533. response = {
  534. "id": document.id,
  535. "position": document.position,
  536. "data_source_type": document.data_source_type,
  537. "data_source_info": data_source_info,
  538. "dataset_process_rule_id": document.dataset_process_rule_id,
  539. "dataset_process_rule": dataset_process_rules,
  540. "document_process_rule": document_process_rules,
  541. "name": document.name,
  542. "created_from": document.created_from,
  543. "created_by": document.created_by,
  544. "created_at": document.created_at.timestamp(),
  545. "tokens": document.tokens,
  546. "indexing_status": document.indexing_status,
  547. "completed_at": int(document.completed_at.timestamp()) if document.completed_at else None,
  548. "updated_at": int(document.updated_at.timestamp()) if document.updated_at else None,
  549. "indexing_latency": document.indexing_latency,
  550. "error": document.error,
  551. "enabled": document.enabled,
  552. "disabled_at": int(document.disabled_at.timestamp()) if document.disabled_at else None,
  553. "disabled_by": document.disabled_by,
  554. "archived": document.archived,
  555. "doc_type": document.doc_type,
  556. "doc_metadata": document.doc_metadata_details,
  557. "segment_count": document.segment_count,
  558. "average_segment_length": document.average_segment_length,
  559. "hit_count": document.hit_count,
  560. "display_status": document.display_status,
  561. "doc_form": document.doc_form,
  562. "doc_language": document.doc_language,
  563. }
  564. return response
  565. @service_api_ns.doc("delete_document")
  566. @service_api_ns.doc(description="Delete a document")
  567. @service_api_ns.doc(params={"dataset_id": "Dataset ID", "document_id": "Document ID"})
  568. @service_api_ns.doc(
  569. responses={
  570. 204: "Document deleted successfully",
  571. 401: "Unauthorized - invalid API token",
  572. 403: "Forbidden - document is archived",
  573. 404: "Document not found",
  574. }
  575. )
  576. @cloud_edition_billing_rate_limit_check("knowledge", "dataset")
  577. def delete(self, tenant_id, dataset_id, document_id):
  578. """Delete document."""
  579. document_id = str(document_id)
  580. dataset_id = str(dataset_id)
  581. tenant_id = str(tenant_id)
  582. # get dataset info
  583. dataset = db.session.query(Dataset).where(Dataset.tenant_id == tenant_id, Dataset.id == dataset_id).first()
  584. if not dataset:
  585. raise ValueError("Dataset does not exist.")
  586. document = DocumentService.get_document(dataset.id, document_id)
  587. # 404 if document not found
  588. if document is None:
  589. raise NotFound("Document Not Exists.")
  590. # 403 if document is archived
  591. if DocumentService.check_archived(document):
  592. raise ArchivedDocumentImmutableError()
  593. try:
  594. # delete document
  595. DocumentService.delete_document(document)
  596. except services.errors.document.DocumentIndexingError:
  597. raise DocumentIndexingError("Cannot delete document during indexing.")
  598. return 204