You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

datasets_document.py 46KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116
  1. import logging
  2. from argparse import ArgumentTypeError
  3. from datetime import UTC, datetime
  4. from typing import cast
  5. from flask import request
  6. from flask_login import current_user
  7. from flask_restful import Resource, fields, marshal, marshal_with, reqparse
  8. from sqlalchemy import asc, desc, select
  9. from werkzeug.exceptions import Forbidden, NotFound
  10. import services
  11. from controllers.console import api
  12. from controllers.console.app.error import (
  13. ProviderModelCurrentlyNotSupportError,
  14. ProviderNotInitializeError,
  15. ProviderQuotaExceededError,
  16. )
  17. from controllers.console.datasets.error import (
  18. ArchivedDocumentImmutableError,
  19. DocumentAlreadyFinishedError,
  20. DocumentIndexingError,
  21. IndexingEstimateError,
  22. InvalidActionError,
  23. InvalidMetadataError,
  24. )
  25. from controllers.console.wraps import (
  26. account_initialization_required,
  27. cloud_edition_billing_rate_limit_check,
  28. cloud_edition_billing_resource_check,
  29. setup_required,
  30. )
  31. from core.errors.error import (
  32. LLMBadRequestError,
  33. ModelCurrentlyNotSupportError,
  34. ProviderTokenNotInitError,
  35. QuotaExceededError,
  36. )
  37. from core.indexing_runner import IndexingRunner
  38. from core.model_manager import ModelManager
  39. from core.model_runtime.entities.model_entities import ModelType
  40. from core.model_runtime.errors.invoke import InvokeAuthorizationError
  41. from core.plugin.impl.exc import PluginDaemonClientSideError
  42. from core.rag.extractor.entity.extract_setting import ExtractSetting
  43. from extensions.ext_database import db
  44. from extensions.ext_redis import redis_client
  45. from fields.document_fields import (
  46. dataset_and_document_fields,
  47. document_fields,
  48. document_status_fields,
  49. document_with_segments_fields,
  50. )
  51. from libs.login import login_required
  52. from models import Dataset, DatasetProcessRule, Document, DocumentSegment, UploadFile
  53. from services.dataset_service import DatasetService, DocumentService
  54. from services.entities.knowledge_entities.knowledge_entities import KnowledgeConfig
  55. from tasks.add_document_to_index_task import add_document_to_index_task
  56. from tasks.remove_document_from_index_task import remove_document_from_index_task
  57. class DocumentResource(Resource):
  58. def get_document(self, dataset_id: str, document_id: str) -> Document:
  59. dataset = DatasetService.get_dataset(dataset_id)
  60. if not dataset:
  61. raise NotFound("Dataset not found.")
  62. try:
  63. DatasetService.check_dataset_permission(dataset, current_user)
  64. except services.errors.account.NoPermissionError as e:
  65. raise Forbidden(str(e))
  66. document = DocumentService.get_document(dataset_id, document_id)
  67. if not document:
  68. raise NotFound("Document not found.")
  69. if document.tenant_id != current_user.current_tenant_id:
  70. raise Forbidden("No permission.")
  71. return document
  72. def get_batch_documents(self, dataset_id: str, batch: str) -> list[Document]:
  73. dataset = DatasetService.get_dataset(dataset_id)
  74. if not dataset:
  75. raise NotFound("Dataset not found.")
  76. try:
  77. DatasetService.check_dataset_permission(dataset, current_user)
  78. except services.errors.account.NoPermissionError as e:
  79. raise Forbidden(str(e))
  80. documents = DocumentService.get_batch_documents(dataset_id, batch)
  81. if not documents:
  82. raise NotFound("Documents not found.")
  83. return documents
  84. class GetProcessRuleApi(Resource):
  85. @setup_required
  86. @login_required
  87. @account_initialization_required
  88. def get(self):
  89. req_data = request.args
  90. document_id = req_data.get("document_id")
  91. # get default rules
  92. mode = DocumentService.DEFAULT_RULES["mode"]
  93. rules = DocumentService.DEFAULT_RULES["rules"]
  94. limits = DocumentService.DEFAULT_RULES["limits"]
  95. if document_id:
  96. # get the latest process rule
  97. document = db.get_or_404(Document, document_id)
  98. dataset = DatasetService.get_dataset(document.dataset_id)
  99. if not dataset:
  100. raise NotFound("Dataset not found.")
  101. try:
  102. DatasetService.check_dataset_permission(dataset, current_user)
  103. except services.errors.account.NoPermissionError as e:
  104. raise Forbidden(str(e))
  105. # get the latest process rule
  106. dataset_process_rule = (
  107. db.session.query(DatasetProcessRule)
  108. .filter(DatasetProcessRule.dataset_id == document.dataset_id)
  109. .order_by(DatasetProcessRule.created_at.desc())
  110. .limit(1)
  111. .one_or_none()
  112. )
  113. if dataset_process_rule:
  114. mode = dataset_process_rule.mode
  115. rules = dataset_process_rule.rules_dict
  116. return {"mode": mode, "rules": rules, "limits": limits}
  117. class DatasetDocumentListApi(Resource):
  118. @setup_required
  119. @login_required
  120. @account_initialization_required
  121. def get(self, dataset_id):
  122. dataset_id = str(dataset_id)
  123. page = request.args.get("page", default=1, type=int)
  124. limit = request.args.get("limit", default=20, type=int)
  125. search = request.args.get("keyword", default=None, type=str)
  126. sort = request.args.get("sort", default="-created_at", type=str)
  127. # "yes", "true", "t", "y", "1" convert to True, while others convert to False.
  128. try:
  129. fetch_val = request.args.get("fetch", default="false")
  130. if isinstance(fetch_val, bool):
  131. fetch = fetch_val
  132. else:
  133. if fetch_val.lower() in ("yes", "true", "t", "y", "1"):
  134. fetch = True
  135. elif fetch_val.lower() in ("no", "false", "f", "n", "0"):
  136. fetch = False
  137. else:
  138. raise ArgumentTypeError(
  139. f"Truthy value expected: got {fetch_val} but expected one of yes/no, true/false, t/f, y/n, 1/0 "
  140. f"(case insensitive)."
  141. )
  142. except (ArgumentTypeError, ValueError, Exception):
  143. fetch = False
  144. dataset = DatasetService.get_dataset(dataset_id)
  145. if not dataset:
  146. raise NotFound("Dataset not found.")
  147. try:
  148. DatasetService.check_dataset_permission(dataset, current_user)
  149. except services.errors.account.NoPermissionError as e:
  150. raise Forbidden(str(e))
  151. query = select(Document).filter_by(dataset_id=str(dataset_id), tenant_id=current_user.current_tenant_id)
  152. if search:
  153. search = f"%{search}%"
  154. query = query.filter(Document.name.like(search))
  155. if sort.startswith("-"):
  156. sort_logic = desc
  157. sort = sort[1:]
  158. else:
  159. sort_logic = asc
  160. if sort == "hit_count":
  161. sub_query = (
  162. db.select(DocumentSegment.document_id, db.func.sum(DocumentSegment.hit_count).label("total_hit_count"))
  163. .group_by(DocumentSegment.document_id)
  164. .subquery()
  165. )
  166. query = query.outerjoin(sub_query, sub_query.c.document_id == Document.id).order_by(
  167. sort_logic(db.func.coalesce(sub_query.c.total_hit_count, 0)),
  168. sort_logic(Document.position),
  169. )
  170. elif sort == "created_at":
  171. query = query.order_by(
  172. sort_logic(Document.created_at),
  173. sort_logic(Document.position),
  174. )
  175. else:
  176. query = query.order_by(
  177. desc(Document.created_at),
  178. desc(Document.position),
  179. )
  180. paginated_documents = db.paginate(select=query, page=page, per_page=limit, max_per_page=100, error_out=False)
  181. documents = paginated_documents.items
  182. if fetch:
  183. for document in documents:
  184. completed_segments = (
  185. db.session.query(DocumentSegment)
  186. .filter(
  187. DocumentSegment.completed_at.isnot(None),
  188. DocumentSegment.document_id == str(document.id),
  189. DocumentSegment.status != "re_segment",
  190. )
  191. .count()
  192. )
  193. total_segments = (
  194. db.session.query(DocumentSegment)
  195. .filter(DocumentSegment.document_id == str(document.id), DocumentSegment.status != "re_segment")
  196. .count()
  197. )
  198. document.completed_segments = completed_segments
  199. document.total_segments = total_segments
  200. data = marshal(documents, document_with_segments_fields)
  201. else:
  202. data = marshal(documents, document_fields)
  203. response = {
  204. "data": data,
  205. "has_more": len(documents) == limit,
  206. "limit": limit,
  207. "total": paginated_documents.total,
  208. "page": page,
  209. }
  210. return response
  211. documents_and_batch_fields = {"documents": fields.List(fields.Nested(document_fields)), "batch": fields.String}
  212. @setup_required
  213. @login_required
  214. @account_initialization_required
  215. @marshal_with(documents_and_batch_fields)
  216. @cloud_edition_billing_resource_check("vector_space")
  217. @cloud_edition_billing_rate_limit_check("knowledge")
  218. def post(self, dataset_id):
  219. dataset_id = str(dataset_id)
  220. dataset = DatasetService.get_dataset(dataset_id)
  221. if not dataset:
  222. raise NotFound("Dataset not found.")
  223. # The role of the current user in the ta table must be admin, owner, or editor
  224. if not current_user.is_dataset_editor:
  225. raise Forbidden()
  226. try:
  227. DatasetService.check_dataset_permission(dataset, current_user)
  228. except services.errors.account.NoPermissionError as e:
  229. raise Forbidden(str(e))
  230. parser = reqparse.RequestParser()
  231. parser.add_argument(
  232. "indexing_technique", type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, nullable=False, location="json"
  233. )
  234. parser.add_argument("data_source", type=dict, required=False, location="json")
  235. parser.add_argument("process_rule", type=dict, required=False, location="json")
  236. parser.add_argument("duplicate", type=bool, default=True, nullable=False, location="json")
  237. parser.add_argument("original_document_id", type=str, required=False, location="json")
  238. parser.add_argument("doc_form", type=str, default="text_model", required=False, nullable=False, location="json")
  239. parser.add_argument("retrieval_model", type=dict, required=False, nullable=False, location="json")
  240. parser.add_argument("embedding_model", type=str, required=False, nullable=True, location="json")
  241. parser.add_argument("embedding_model_provider", type=str, required=False, nullable=True, location="json")
  242. parser.add_argument(
  243. "doc_language", type=str, default="English", required=False, nullable=False, location="json"
  244. )
  245. args = parser.parse_args()
  246. knowledge_config = KnowledgeConfig(**args)
  247. if not dataset.indexing_technique and not knowledge_config.indexing_technique:
  248. raise ValueError("indexing_technique is required.")
  249. # validate args
  250. DocumentService.document_create_args_validate(knowledge_config)
  251. try:
  252. documents, batch = DocumentService.save_document_with_dataset_id(dataset, knowledge_config, current_user)
  253. except ProviderTokenNotInitError as ex:
  254. raise ProviderNotInitializeError(ex.description)
  255. except QuotaExceededError:
  256. raise ProviderQuotaExceededError()
  257. except ModelCurrentlyNotSupportError:
  258. raise ProviderModelCurrentlyNotSupportError()
  259. return {"documents": documents, "batch": batch}
  260. @setup_required
  261. @login_required
  262. @account_initialization_required
  263. @cloud_edition_billing_rate_limit_check("knowledge")
  264. def delete(self, dataset_id):
  265. dataset_id = str(dataset_id)
  266. dataset = DatasetService.get_dataset(dataset_id)
  267. if dataset is None:
  268. raise NotFound("Dataset not found.")
  269. # check user's model setting
  270. DatasetService.check_dataset_model_setting(dataset)
  271. try:
  272. document_ids = request.args.getlist("document_id")
  273. DocumentService.delete_documents(dataset, document_ids)
  274. except services.errors.document.DocumentIndexingError:
  275. raise DocumentIndexingError("Cannot delete document during indexing.")
  276. return {"result": "success"}, 204
  277. class DatasetInitApi(Resource):
  278. @setup_required
  279. @login_required
  280. @account_initialization_required
  281. @marshal_with(dataset_and_document_fields)
  282. @cloud_edition_billing_resource_check("vector_space")
  283. @cloud_edition_billing_rate_limit_check("knowledge")
  284. def post(self):
  285. # The role of the current user in the ta table must be admin, owner, dataset_operator, or editor
  286. if not current_user.is_dataset_editor:
  287. raise Forbidden()
  288. parser = reqparse.RequestParser()
  289. parser.add_argument(
  290. "indexing_technique",
  291. type=str,
  292. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  293. required=True,
  294. nullable=False,
  295. location="json",
  296. )
  297. parser.add_argument("data_source", type=dict, required=True, nullable=True, location="json")
  298. parser.add_argument("process_rule", type=dict, required=True, nullable=True, location="json")
  299. parser.add_argument("doc_form", type=str, default="text_model", required=False, nullable=False, location="json")
  300. parser.add_argument(
  301. "doc_language", type=str, default="English", required=False, nullable=False, location="json"
  302. )
  303. parser.add_argument("retrieval_model", type=dict, required=False, nullable=False, location="json")
  304. parser.add_argument("embedding_model", type=str, required=False, nullable=True, location="json")
  305. parser.add_argument("embedding_model_provider", type=str, required=False, nullable=True, location="json")
  306. args = parser.parse_args()
  307. # The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator
  308. if not current_user.is_dataset_editor:
  309. raise Forbidden()
  310. knowledge_config = KnowledgeConfig(**args)
  311. if knowledge_config.indexing_technique == "high_quality":
  312. if knowledge_config.embedding_model is None or knowledge_config.embedding_model_provider is None:
  313. raise ValueError("embedding model and embedding model provider are required for high quality indexing.")
  314. try:
  315. model_manager = ModelManager()
  316. model_manager.get_model_instance(
  317. tenant_id=current_user.current_tenant_id,
  318. provider=args["embedding_model_provider"],
  319. model_type=ModelType.TEXT_EMBEDDING,
  320. model=args["embedding_model"],
  321. )
  322. except InvokeAuthorizationError:
  323. raise ProviderNotInitializeError(
  324. "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
  325. )
  326. except ProviderTokenNotInitError as ex:
  327. raise ProviderNotInitializeError(ex.description)
  328. # validate args
  329. DocumentService.document_create_args_validate(knowledge_config)
  330. try:
  331. dataset, documents, batch = DocumentService.save_document_without_dataset_id(
  332. tenant_id=current_user.current_tenant_id, knowledge_config=knowledge_config, account=current_user
  333. )
  334. except ProviderTokenNotInitError as ex:
  335. raise ProviderNotInitializeError(ex.description)
  336. except QuotaExceededError:
  337. raise ProviderQuotaExceededError()
  338. except ModelCurrentlyNotSupportError:
  339. raise ProviderModelCurrentlyNotSupportError()
  340. response = {"dataset": dataset, "documents": documents, "batch": batch}
  341. return response
  342. class DocumentIndexingEstimateApi(DocumentResource):
  343. @setup_required
  344. @login_required
  345. @account_initialization_required
  346. def get(self, dataset_id, document_id):
  347. dataset_id = str(dataset_id)
  348. document_id = str(document_id)
  349. document = self.get_document(dataset_id, document_id)
  350. if document.indexing_status in {"completed", "error"}:
  351. raise DocumentAlreadyFinishedError()
  352. data_process_rule = document.dataset_process_rule
  353. data_process_rule_dict = data_process_rule.to_dict()
  354. response = {"tokens": 0, "total_price": 0, "currency": "USD", "total_segments": 0, "preview": []}
  355. if document.data_source_type == "upload_file":
  356. data_source_info = document.data_source_info_dict
  357. if data_source_info and "upload_file_id" in data_source_info:
  358. file_id = data_source_info["upload_file_id"]
  359. file = (
  360. db.session.query(UploadFile)
  361. .filter(UploadFile.tenant_id == document.tenant_id, UploadFile.id == file_id)
  362. .first()
  363. )
  364. # raise error if file not found
  365. if not file:
  366. raise NotFound("File not found.")
  367. extract_setting = ExtractSetting(
  368. datasource_type="upload_file", upload_file=file, document_model=document.doc_form
  369. )
  370. indexing_runner = IndexingRunner()
  371. try:
  372. estimate_response = indexing_runner.indexing_estimate(
  373. current_user.current_tenant_id,
  374. [extract_setting],
  375. data_process_rule_dict,
  376. document.doc_form,
  377. "English",
  378. dataset_id,
  379. )
  380. return estimate_response.model_dump(), 200
  381. except LLMBadRequestError:
  382. raise ProviderNotInitializeError(
  383. "No Embedding Model available. Please configure a valid provider "
  384. "in the Settings -> Model Provider."
  385. )
  386. except ProviderTokenNotInitError as ex:
  387. raise ProviderNotInitializeError(ex.description)
  388. except PluginDaemonClientSideError as ex:
  389. raise ProviderNotInitializeError(ex.description)
  390. except Exception as e:
  391. raise IndexingEstimateError(str(e))
  392. return response, 200
  393. class DocumentBatchIndexingEstimateApi(DocumentResource):
  394. @setup_required
  395. @login_required
  396. @account_initialization_required
  397. def get(self, dataset_id, batch):
  398. dataset_id = str(dataset_id)
  399. batch = str(batch)
  400. documents = self.get_batch_documents(dataset_id, batch)
  401. if not documents:
  402. return {"tokens": 0, "total_price": 0, "currency": "USD", "total_segments": 0, "preview": []}, 200
  403. data_process_rule = documents[0].dataset_process_rule
  404. data_process_rule_dict = data_process_rule.to_dict()
  405. info_list = []
  406. extract_settings = []
  407. for document in documents:
  408. if document.indexing_status in {"completed", "error"}:
  409. raise DocumentAlreadyFinishedError()
  410. data_source_info = document.data_source_info_dict
  411. # format document files info
  412. if data_source_info and "upload_file_id" in data_source_info:
  413. file_id = data_source_info["upload_file_id"]
  414. info_list.append(file_id)
  415. # format document notion info
  416. elif (
  417. data_source_info and "notion_workspace_id" in data_source_info and "notion_page_id" in data_source_info
  418. ):
  419. pages = []
  420. page = {"page_id": data_source_info["notion_page_id"], "type": data_source_info["type"]}
  421. pages.append(page)
  422. notion_info = {"workspace_id": data_source_info["notion_workspace_id"], "pages": pages}
  423. info_list.append(notion_info)
  424. if document.data_source_type == "upload_file":
  425. file_id = data_source_info["upload_file_id"]
  426. file_detail = (
  427. db.session.query(UploadFile)
  428. .filter(UploadFile.tenant_id == current_user.current_tenant_id, UploadFile.id == file_id)
  429. .first()
  430. )
  431. if file_detail is None:
  432. raise NotFound("File not found.")
  433. extract_setting = ExtractSetting(
  434. datasource_type="upload_file", upload_file=file_detail, document_model=document.doc_form
  435. )
  436. extract_settings.append(extract_setting)
  437. elif document.data_source_type == "notion_import":
  438. extract_setting = ExtractSetting(
  439. datasource_type="notion_import",
  440. notion_info={
  441. "notion_workspace_id": data_source_info["notion_workspace_id"],
  442. "notion_obj_id": data_source_info["notion_page_id"],
  443. "notion_page_type": data_source_info["type"],
  444. "tenant_id": current_user.current_tenant_id,
  445. },
  446. document_model=document.doc_form,
  447. )
  448. extract_settings.append(extract_setting)
  449. elif document.data_source_type == "website_crawl":
  450. extract_setting = ExtractSetting(
  451. datasource_type="website_crawl",
  452. website_info={
  453. "provider": data_source_info["provider"],
  454. "job_id": data_source_info["job_id"],
  455. "url": data_source_info["url"],
  456. "tenant_id": current_user.current_tenant_id,
  457. "mode": data_source_info["mode"],
  458. "only_main_content": data_source_info["only_main_content"],
  459. },
  460. document_model=document.doc_form,
  461. )
  462. extract_settings.append(extract_setting)
  463. else:
  464. raise ValueError("Data source type not support")
  465. indexing_runner = IndexingRunner()
  466. try:
  467. response = indexing_runner.indexing_estimate(
  468. current_user.current_tenant_id,
  469. extract_settings,
  470. data_process_rule_dict,
  471. document.doc_form,
  472. "English",
  473. dataset_id,
  474. )
  475. return response.model_dump(), 200
  476. except LLMBadRequestError:
  477. raise ProviderNotInitializeError(
  478. "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
  479. )
  480. except ProviderTokenNotInitError as ex:
  481. raise ProviderNotInitializeError(ex.description)
  482. except PluginDaemonClientSideError as ex:
  483. raise ProviderNotInitializeError(ex.description)
  484. except Exception as e:
  485. raise IndexingEstimateError(str(e))
  486. class DocumentBatchIndexingStatusApi(DocumentResource):
  487. @setup_required
  488. @login_required
  489. @account_initialization_required
  490. def get(self, dataset_id, batch):
  491. dataset_id = str(dataset_id)
  492. batch = str(batch)
  493. documents = self.get_batch_documents(dataset_id, batch)
  494. documents_status = []
  495. for document in documents:
  496. completed_segments = (
  497. db.session.query(DocumentSegment)
  498. .filter(
  499. DocumentSegment.completed_at.isnot(None),
  500. DocumentSegment.document_id == str(document.id),
  501. DocumentSegment.status != "re_segment",
  502. )
  503. .count()
  504. )
  505. total_segments = (
  506. db.session.query(DocumentSegment)
  507. .filter(DocumentSegment.document_id == str(document.id), DocumentSegment.status != "re_segment")
  508. .count()
  509. )
  510. # Create a dictionary with document attributes and additional fields
  511. document_dict = {
  512. "id": document.id,
  513. "indexing_status": "paused" if document.is_paused else document.indexing_status,
  514. "processing_started_at": document.processing_started_at,
  515. "parsing_completed_at": document.parsing_completed_at,
  516. "cleaning_completed_at": document.cleaning_completed_at,
  517. "splitting_completed_at": document.splitting_completed_at,
  518. "completed_at": document.completed_at,
  519. "paused_at": document.paused_at,
  520. "error": document.error,
  521. "stopped_at": document.stopped_at,
  522. "completed_segments": completed_segments,
  523. "total_segments": total_segments,
  524. }
  525. documents_status.append(marshal(document_dict, document_status_fields))
  526. data = {"data": documents_status}
  527. return data
  528. class DocumentIndexingStatusApi(DocumentResource):
  529. @setup_required
  530. @login_required
  531. @account_initialization_required
  532. def get(self, dataset_id, document_id):
  533. dataset_id = str(dataset_id)
  534. document_id = str(document_id)
  535. document = self.get_document(dataset_id, document_id)
  536. completed_segments = (
  537. db.session.query(DocumentSegment)
  538. .filter(
  539. DocumentSegment.completed_at.isnot(None),
  540. DocumentSegment.document_id == str(document_id),
  541. DocumentSegment.status != "re_segment",
  542. )
  543. .count()
  544. )
  545. total_segments = (
  546. db.session.query(DocumentSegment)
  547. .filter(DocumentSegment.document_id == str(document_id), DocumentSegment.status != "re_segment")
  548. .count()
  549. )
  550. # Create a dictionary with document attributes and additional fields
  551. document_dict = {
  552. "id": document.id,
  553. "indexing_status": "paused" if document.is_paused else document.indexing_status,
  554. "processing_started_at": document.processing_started_at,
  555. "parsing_completed_at": document.parsing_completed_at,
  556. "cleaning_completed_at": document.cleaning_completed_at,
  557. "splitting_completed_at": document.splitting_completed_at,
  558. "completed_at": document.completed_at,
  559. "paused_at": document.paused_at,
  560. "error": document.error,
  561. "stopped_at": document.stopped_at,
  562. "completed_segments": completed_segments,
  563. "total_segments": total_segments,
  564. }
  565. return marshal(document_dict, document_status_fields)
  566. class DocumentDetailApi(DocumentResource):
  567. METADATA_CHOICES = {"all", "only", "without"}
  568. @setup_required
  569. @login_required
  570. @account_initialization_required
  571. def get(self, dataset_id, document_id):
  572. dataset_id = str(dataset_id)
  573. document_id = str(document_id)
  574. document = self.get_document(dataset_id, document_id)
  575. metadata = request.args.get("metadata", "all")
  576. if metadata not in self.METADATA_CHOICES:
  577. raise InvalidMetadataError(f"Invalid metadata value: {metadata}")
  578. if metadata == "only":
  579. response = {"id": document.id, "doc_type": document.doc_type, "doc_metadata": document.doc_metadata_details}
  580. elif metadata == "without":
  581. dataset_process_rules = DatasetService.get_process_rules(dataset_id)
  582. document_process_rules = document.dataset_process_rule.to_dict()
  583. data_source_info = document.data_source_detail_dict
  584. response = {
  585. "id": document.id,
  586. "position": document.position,
  587. "data_source_type": document.data_source_type,
  588. "data_source_info": data_source_info,
  589. "dataset_process_rule_id": document.dataset_process_rule_id,
  590. "dataset_process_rule": dataset_process_rules,
  591. "document_process_rule": document_process_rules,
  592. "name": document.name,
  593. "created_from": document.created_from,
  594. "created_by": document.created_by,
  595. "created_at": document.created_at.timestamp(),
  596. "tokens": document.tokens,
  597. "indexing_status": document.indexing_status,
  598. "completed_at": int(document.completed_at.timestamp()) if document.completed_at else None,
  599. "updated_at": int(document.updated_at.timestamp()) if document.updated_at else None,
  600. "indexing_latency": document.indexing_latency,
  601. "error": document.error,
  602. "enabled": document.enabled,
  603. "disabled_at": int(document.disabled_at.timestamp()) if document.disabled_at else None,
  604. "disabled_by": document.disabled_by,
  605. "archived": document.archived,
  606. "segment_count": document.segment_count,
  607. "average_segment_length": document.average_segment_length,
  608. "hit_count": document.hit_count,
  609. "display_status": document.display_status,
  610. "doc_form": document.doc_form,
  611. "doc_language": document.doc_language,
  612. }
  613. else:
  614. dataset_process_rules = DatasetService.get_process_rules(dataset_id)
  615. document_process_rules = document.dataset_process_rule.to_dict()
  616. data_source_info = document.data_source_detail_dict
  617. response = {
  618. "id": document.id,
  619. "position": document.position,
  620. "data_source_type": document.data_source_type,
  621. "data_source_info": data_source_info,
  622. "dataset_process_rule_id": document.dataset_process_rule_id,
  623. "dataset_process_rule": dataset_process_rules,
  624. "document_process_rule": document_process_rules,
  625. "name": document.name,
  626. "created_from": document.created_from,
  627. "created_by": document.created_by,
  628. "created_at": document.created_at.timestamp(),
  629. "tokens": document.tokens,
  630. "indexing_status": document.indexing_status,
  631. "completed_at": int(document.completed_at.timestamp()) if document.completed_at else None,
  632. "updated_at": int(document.updated_at.timestamp()) if document.updated_at else None,
  633. "indexing_latency": document.indexing_latency,
  634. "error": document.error,
  635. "enabled": document.enabled,
  636. "disabled_at": int(document.disabled_at.timestamp()) if document.disabled_at else None,
  637. "disabled_by": document.disabled_by,
  638. "archived": document.archived,
  639. "doc_type": document.doc_type,
  640. "doc_metadata": document.doc_metadata_details,
  641. "segment_count": document.segment_count,
  642. "average_segment_length": document.average_segment_length,
  643. "hit_count": document.hit_count,
  644. "display_status": document.display_status,
  645. "doc_form": document.doc_form,
  646. "doc_language": document.doc_language,
  647. }
  648. return response, 200
  649. class DocumentProcessingApi(DocumentResource):
  650. @setup_required
  651. @login_required
  652. @account_initialization_required
  653. @cloud_edition_billing_rate_limit_check("knowledge")
  654. def patch(self, dataset_id, document_id, action):
  655. dataset_id = str(dataset_id)
  656. document_id = str(document_id)
  657. document = self.get_document(dataset_id, document_id)
  658. # The role of the current user in the ta table must be admin, owner, dataset_operator, or editor
  659. if not current_user.is_dataset_editor:
  660. raise Forbidden()
  661. if action == "pause":
  662. if document.indexing_status != "indexing":
  663. raise InvalidActionError("Document not in indexing state.")
  664. document.paused_by = current_user.id
  665. document.paused_at = datetime.now(UTC).replace(tzinfo=None)
  666. document.is_paused = True
  667. db.session.commit()
  668. elif action == "resume":
  669. if document.indexing_status not in {"paused", "error"}:
  670. raise InvalidActionError("Document not in paused or error state.")
  671. document.paused_by = None
  672. document.paused_at = None
  673. document.is_paused = False
  674. db.session.commit()
  675. else:
  676. raise InvalidActionError()
  677. return {"result": "success"}, 200
  678. class DocumentDeleteApi(DocumentResource):
  679. @setup_required
  680. @login_required
  681. @account_initialization_required
  682. @cloud_edition_billing_rate_limit_check("knowledge")
  683. def delete(self, dataset_id, document_id):
  684. dataset_id = str(dataset_id)
  685. document_id = str(document_id)
  686. dataset = DatasetService.get_dataset(dataset_id)
  687. if dataset is None:
  688. raise NotFound("Dataset not found.")
  689. # check user's model setting
  690. DatasetService.check_dataset_model_setting(dataset)
  691. document = self.get_document(dataset_id, document_id)
  692. try:
  693. DocumentService.delete_document(document)
  694. except services.errors.document.DocumentIndexingError:
  695. raise DocumentIndexingError("Cannot delete document during indexing.")
  696. return {"result": "success"}, 204
  697. class DocumentMetadataApi(DocumentResource):
  698. @setup_required
  699. @login_required
  700. @account_initialization_required
  701. def put(self, dataset_id, document_id):
  702. dataset_id = str(dataset_id)
  703. document_id = str(document_id)
  704. document = self.get_document(dataset_id, document_id)
  705. req_data = request.get_json()
  706. doc_type = req_data.get("doc_type")
  707. doc_metadata = req_data.get("doc_metadata")
  708. # The role of the current user in the ta table must be admin, owner, dataset_operator, or editor
  709. if not current_user.is_dataset_editor:
  710. raise Forbidden()
  711. if doc_type is None or doc_metadata is None:
  712. raise ValueError("Both doc_type and doc_metadata must be provided.")
  713. if doc_type not in DocumentService.DOCUMENT_METADATA_SCHEMA:
  714. raise ValueError("Invalid doc_type.")
  715. if not isinstance(doc_metadata, dict):
  716. raise ValueError("doc_metadata must be a dictionary.")
  717. metadata_schema: dict = cast(dict, DocumentService.DOCUMENT_METADATA_SCHEMA[doc_type])
  718. document.doc_metadata = {}
  719. if doc_type == "others":
  720. document.doc_metadata = doc_metadata
  721. else:
  722. for key, value_type in metadata_schema.items():
  723. value = doc_metadata.get(key)
  724. if value is not None and isinstance(value, value_type):
  725. document.doc_metadata[key] = value
  726. document.doc_type = doc_type
  727. document.updated_at = datetime.now(UTC).replace(tzinfo=None)
  728. db.session.commit()
  729. return {"result": "success", "message": "Document metadata updated."}, 200
  730. class DocumentStatusApi(DocumentResource):
  731. @setup_required
  732. @login_required
  733. @account_initialization_required
  734. @cloud_edition_billing_resource_check("vector_space")
  735. @cloud_edition_billing_rate_limit_check("knowledge")
  736. def patch(self, dataset_id, action):
  737. dataset_id = str(dataset_id)
  738. dataset = DatasetService.get_dataset(dataset_id)
  739. if dataset is None:
  740. raise NotFound("Dataset not found.")
  741. # The role of the current user in the ta table must be admin, owner, or editor
  742. if not current_user.is_dataset_editor:
  743. raise Forbidden()
  744. # check user's model setting
  745. DatasetService.check_dataset_model_setting(dataset)
  746. # check user's permission
  747. DatasetService.check_dataset_permission(dataset, current_user)
  748. document_ids = request.args.getlist("document_id")
  749. for document_id in document_ids:
  750. document = self.get_document(dataset_id, document_id)
  751. indexing_cache_key = "document_{}_indexing".format(document.id)
  752. cache_result = redis_client.get(indexing_cache_key)
  753. if cache_result is not None:
  754. raise InvalidActionError(f"Document:{document.name} is being indexed, please try again later")
  755. if action == "enable":
  756. if document.enabled:
  757. continue
  758. document.enabled = True
  759. document.disabled_at = None
  760. document.disabled_by = None
  761. document.updated_at = datetime.now(UTC).replace(tzinfo=None)
  762. db.session.commit()
  763. # Set cache to prevent indexing the same document multiple times
  764. redis_client.setex(indexing_cache_key, 600, 1)
  765. add_document_to_index_task.delay(document_id)
  766. elif action == "disable":
  767. if not document.completed_at or document.indexing_status != "completed":
  768. raise InvalidActionError(f"Document: {document.name} is not completed.")
  769. if not document.enabled:
  770. continue
  771. document.enabled = False
  772. document.disabled_at = datetime.now(UTC).replace(tzinfo=None)
  773. document.disabled_by = current_user.id
  774. document.updated_at = datetime.now(UTC).replace(tzinfo=None)
  775. db.session.commit()
  776. # Set cache to prevent indexing the same document multiple times
  777. redis_client.setex(indexing_cache_key, 600, 1)
  778. remove_document_from_index_task.delay(document_id)
  779. elif action == "archive":
  780. if document.archived:
  781. continue
  782. document.archived = True
  783. document.archived_at = datetime.now(UTC).replace(tzinfo=None)
  784. document.archived_by = current_user.id
  785. document.updated_at = datetime.now(UTC).replace(tzinfo=None)
  786. db.session.commit()
  787. if document.enabled:
  788. # Set cache to prevent indexing the same document multiple times
  789. redis_client.setex(indexing_cache_key, 600, 1)
  790. remove_document_from_index_task.delay(document_id)
  791. elif action == "un_archive":
  792. if not document.archived:
  793. continue
  794. document.archived = False
  795. document.archived_at = None
  796. document.archived_by = None
  797. document.updated_at = datetime.now(UTC).replace(tzinfo=None)
  798. db.session.commit()
  799. # Set cache to prevent indexing the same document multiple times
  800. redis_client.setex(indexing_cache_key, 600, 1)
  801. add_document_to_index_task.delay(document_id)
  802. else:
  803. raise InvalidActionError()
  804. return {"result": "success"}, 200
  805. class DocumentPauseApi(DocumentResource):
  806. @setup_required
  807. @login_required
  808. @account_initialization_required
  809. @cloud_edition_billing_rate_limit_check("knowledge")
  810. def patch(self, dataset_id, document_id):
  811. """pause document."""
  812. dataset_id = str(dataset_id)
  813. document_id = str(document_id)
  814. dataset = DatasetService.get_dataset(dataset_id)
  815. if not dataset:
  816. raise NotFound("Dataset not found.")
  817. document = DocumentService.get_document(dataset.id, document_id)
  818. # 404 if document not found
  819. if document is None:
  820. raise NotFound("Document Not Exists.")
  821. # 403 if document is archived
  822. if DocumentService.check_archived(document):
  823. raise ArchivedDocumentImmutableError()
  824. try:
  825. # pause document
  826. DocumentService.pause_document(document)
  827. except services.errors.document.DocumentIndexingError:
  828. raise DocumentIndexingError("Cannot pause completed document.")
  829. return {"result": "success"}, 204
  830. class DocumentRecoverApi(DocumentResource):
  831. @setup_required
  832. @login_required
  833. @account_initialization_required
  834. @cloud_edition_billing_rate_limit_check("knowledge")
  835. def patch(self, dataset_id, document_id):
  836. """recover document."""
  837. dataset_id = str(dataset_id)
  838. document_id = str(document_id)
  839. dataset = DatasetService.get_dataset(dataset_id)
  840. if not dataset:
  841. raise NotFound("Dataset not found.")
  842. document = DocumentService.get_document(dataset.id, document_id)
  843. # 404 if document not found
  844. if document is None:
  845. raise NotFound("Document Not Exists.")
  846. # 403 if document is archived
  847. if DocumentService.check_archived(document):
  848. raise ArchivedDocumentImmutableError()
  849. try:
  850. # pause document
  851. DocumentService.recover_document(document)
  852. except services.errors.document.DocumentIndexingError:
  853. raise DocumentIndexingError("Document is not in paused status.")
  854. return {"result": "success"}, 204
  855. class DocumentRetryApi(DocumentResource):
  856. @setup_required
  857. @login_required
  858. @account_initialization_required
  859. @cloud_edition_billing_rate_limit_check("knowledge")
  860. def post(self, dataset_id):
  861. """retry document."""
  862. parser = reqparse.RequestParser()
  863. parser.add_argument("document_ids", type=list, required=True, nullable=False, location="json")
  864. args = parser.parse_args()
  865. dataset_id = str(dataset_id)
  866. dataset = DatasetService.get_dataset(dataset_id)
  867. retry_documents = []
  868. if not dataset:
  869. raise NotFound("Dataset not found.")
  870. for document_id in args["document_ids"]:
  871. try:
  872. document_id = str(document_id)
  873. document = DocumentService.get_document(dataset.id, document_id)
  874. # 404 if document not found
  875. if document is None:
  876. raise NotFound("Document Not Exists.")
  877. # 403 if document is archived
  878. if DocumentService.check_archived(document):
  879. raise ArchivedDocumentImmutableError()
  880. # 400 if document is completed
  881. if document.indexing_status == "completed":
  882. raise DocumentAlreadyFinishedError()
  883. retry_documents.append(document)
  884. except Exception:
  885. logging.exception(f"Failed to retry document, document id: {document_id}")
  886. continue
  887. # retry document
  888. DocumentService.retry_document(dataset_id, retry_documents)
  889. return {"result": "success"}, 204
  890. class DocumentRenameApi(DocumentResource):
  891. @setup_required
  892. @login_required
  893. @account_initialization_required
  894. @marshal_with(document_fields)
  895. def post(self, dataset_id, document_id):
  896. # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator
  897. if not current_user.is_dataset_editor:
  898. raise Forbidden()
  899. dataset = DatasetService.get_dataset(dataset_id)
  900. DatasetService.check_dataset_operator_permission(current_user, dataset)
  901. parser = reqparse.RequestParser()
  902. parser.add_argument("name", type=str, required=True, nullable=False, location="json")
  903. args = parser.parse_args()
  904. try:
  905. document = DocumentService.rename_document(dataset_id, document_id, args["name"])
  906. except services.errors.document.DocumentIndexingError:
  907. raise DocumentIndexingError("Cannot delete document during indexing.")
  908. return document
  909. class WebsiteDocumentSyncApi(DocumentResource):
  910. @setup_required
  911. @login_required
  912. @account_initialization_required
  913. def get(self, dataset_id, document_id):
  914. """sync website document."""
  915. dataset_id = str(dataset_id)
  916. dataset = DatasetService.get_dataset(dataset_id)
  917. if not dataset:
  918. raise NotFound("Dataset not found.")
  919. document_id = str(document_id)
  920. document = DocumentService.get_document(dataset.id, document_id)
  921. if not document:
  922. raise NotFound("Document not found.")
  923. if document.tenant_id != current_user.current_tenant_id:
  924. raise Forbidden("No permission.")
  925. if document.data_source_type != "website_crawl":
  926. raise ValueError("Document is not a website document.")
  927. # 403 if document is archived
  928. if DocumentService.check_archived(document):
  929. raise ArchivedDocumentImmutableError()
  930. # sync document
  931. DocumentService.sync_website_document(dataset_id, document)
  932. return {"result": "success"}, 200
  933. api.add_resource(GetProcessRuleApi, "/datasets/process-rule")
  934. api.add_resource(DatasetDocumentListApi, "/datasets/<uuid:dataset_id>/documents")
  935. api.add_resource(DatasetInitApi, "/datasets/init")
  936. api.add_resource(
  937. DocumentIndexingEstimateApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/indexing-estimate"
  938. )
  939. api.add_resource(DocumentBatchIndexingEstimateApi, "/datasets/<uuid:dataset_id>/batch/<string:batch>/indexing-estimate")
  940. api.add_resource(DocumentBatchIndexingStatusApi, "/datasets/<uuid:dataset_id>/batch/<string:batch>/indexing-status")
  941. api.add_resource(DocumentIndexingStatusApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/indexing-status")
  942. api.add_resource(DocumentDetailApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>")
  943. api.add_resource(
  944. DocumentProcessingApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/<string:action>"
  945. )
  946. api.add_resource(DocumentDeleteApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>")
  947. api.add_resource(DocumentMetadataApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/metadata")
  948. api.add_resource(DocumentStatusApi, "/datasets/<uuid:dataset_id>/documents/status/<string:action>/batch")
  949. api.add_resource(DocumentPauseApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/pause")
  950. api.add_resource(DocumentRecoverApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/processing/resume")
  951. api.add_resource(DocumentRetryApi, "/datasets/<uuid:dataset_id>/retry")
  952. api.add_resource(DocumentRenameApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/rename")
  953. api.add_resource(WebsiteDocumentSyncApi, "/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/website-sync")