選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。

datasets.py 37KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941
  1. import flask_restx
  2. from flask import request
  3. from flask_login import current_user
  4. from flask_restx import Resource, fields, marshal, marshal_with, reqparse
  5. from sqlalchemy import select
  6. from werkzeug.exceptions import Forbidden, NotFound
  7. import services
  8. from configs import dify_config
  9. from controllers.console import api, console_ns
  10. from controllers.console.apikey import api_key_fields, api_key_list
  11. from controllers.console.app.error import ProviderNotInitializeError
  12. from controllers.console.datasets.error import DatasetInUseError, DatasetNameDuplicateError, IndexingEstimateError
  13. from controllers.console.wraps import (
  14. account_initialization_required,
  15. cloud_edition_billing_rate_limit_check,
  16. enterprise_license_required,
  17. setup_required,
  18. )
  19. from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
  20. from core.indexing_runner import IndexingRunner
  21. from core.model_runtime.entities.model_entities import ModelType
  22. from core.provider_manager import ProviderManager
  23. from core.rag.datasource.vdb.vector_type import VectorType
  24. from core.rag.extractor.entity.datasource_type import DatasourceType
  25. from core.rag.extractor.entity.extract_setting import ExtractSetting
  26. from core.rag.retrieval.retrieval_methods import RetrievalMethod
  27. from extensions.ext_database import db
  28. from fields.app_fields import related_app_list
  29. from fields.dataset_fields import dataset_detail_fields, dataset_query_detail_fields
  30. from fields.document_fields import document_status_fields
  31. from libs.login import login_required
  32. from models import ApiToken, Dataset, Document, DocumentSegment, UploadFile
  33. from models.dataset import DatasetPermissionEnum
  34. from models.provider_ids import ModelProviderID
  35. from services.dataset_service import DatasetPermissionService, DatasetService, DocumentService
  36. def _validate_name(name):
  37. if not name or len(name) < 1 or len(name) > 40:
  38. raise ValueError("Name must be between 1 to 40 characters.")
  39. return name
  40. def _validate_description_length(description):
  41. if description and len(description) > 400:
  42. raise ValueError("Description cannot exceed 400 characters.")
  43. return description
  44. @console_ns.route("/datasets")
  45. class DatasetListApi(Resource):
  46. @api.doc("get_datasets")
  47. @api.doc(description="Get list of datasets")
  48. @api.doc(
  49. params={
  50. "page": "Page number (default: 1)",
  51. "limit": "Number of items per page (default: 20)",
  52. "ids": "Filter by dataset IDs (list)",
  53. "keyword": "Search keyword",
  54. "tag_ids": "Filter by tag IDs (list)",
  55. "include_all": "Include all datasets (default: false)",
  56. }
  57. )
  58. @api.response(200, "Datasets retrieved successfully")
  59. @setup_required
  60. @login_required
  61. @account_initialization_required
  62. @enterprise_license_required
  63. def get(self):
  64. page = request.args.get("page", default=1, type=int)
  65. limit = request.args.get("limit", default=20, type=int)
  66. ids = request.args.getlist("ids")
  67. # provider = request.args.get("provider", default="vendor")
  68. search = request.args.get("keyword", default=None, type=str)
  69. tag_ids = request.args.getlist("tag_ids")
  70. include_all = request.args.get("include_all", default="false").lower() == "true"
  71. if ids:
  72. datasets, total = DatasetService.get_datasets_by_ids(ids, current_user.current_tenant_id)
  73. else:
  74. datasets, total = DatasetService.get_datasets(
  75. page, limit, current_user.current_tenant_id, current_user, search, tag_ids, include_all
  76. )
  77. # check embedding setting
  78. provider_manager = ProviderManager()
  79. configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id)
  80. embedding_models = configurations.get_models(model_type=ModelType.TEXT_EMBEDDING, only_active=True)
  81. model_names = []
  82. for embedding_model in embedding_models:
  83. model_names.append(f"{embedding_model.model}:{embedding_model.provider.provider}")
  84. data = marshal(datasets, dataset_detail_fields)
  85. for item in data:
  86. # convert embedding_model_provider to plugin standard format
  87. if item["indexing_technique"] == "high_quality" and item["embedding_model_provider"]:
  88. item["embedding_model_provider"] = str(ModelProviderID(item["embedding_model_provider"]))
  89. item_model = f"{item['embedding_model']}:{item['embedding_model_provider']}"
  90. if item_model in model_names:
  91. item["embedding_available"] = True
  92. else:
  93. item["embedding_available"] = False
  94. else:
  95. item["embedding_available"] = True
  96. if item.get("permission") == "partial_members":
  97. part_users_list = DatasetPermissionService.get_dataset_partial_member_list(item["id"])
  98. item.update({"partial_member_list": part_users_list})
  99. else:
  100. item.update({"partial_member_list": []})
  101. response = {"data": data, "has_more": len(datasets) == limit, "limit": limit, "total": total, "page": page}
  102. return response, 200
  103. @api.doc("create_dataset")
  104. @api.doc(description="Create a new dataset")
  105. @api.expect(
  106. api.model(
  107. "CreateDatasetRequest",
  108. {
  109. "name": fields.String(required=True, description="Dataset name (1-40 characters)"),
  110. "description": fields.String(description="Dataset description (max 400 characters)"),
  111. "indexing_technique": fields.String(description="Indexing technique"),
  112. "permission": fields.String(description="Dataset permission"),
  113. "provider": fields.String(description="Provider"),
  114. "external_knowledge_api_id": fields.String(description="External knowledge API ID"),
  115. "external_knowledge_id": fields.String(description="External knowledge ID"),
  116. },
  117. )
  118. )
  119. @api.response(201, "Dataset created successfully")
  120. @api.response(400, "Invalid request parameters")
  121. @setup_required
  122. @login_required
  123. @account_initialization_required
  124. @cloud_edition_billing_rate_limit_check("knowledge")
  125. def post(self):
  126. parser = reqparse.RequestParser()
  127. parser.add_argument(
  128. "name",
  129. nullable=False,
  130. required=True,
  131. help="type is required. Name must be between 1 to 40 characters.",
  132. type=_validate_name,
  133. )
  134. parser.add_argument(
  135. "description",
  136. type=_validate_description_length,
  137. nullable=True,
  138. required=False,
  139. default="",
  140. )
  141. parser.add_argument(
  142. "indexing_technique",
  143. type=str,
  144. location="json",
  145. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  146. nullable=True,
  147. help="Invalid indexing technique.",
  148. )
  149. parser.add_argument(
  150. "external_knowledge_api_id",
  151. type=str,
  152. nullable=True,
  153. required=False,
  154. )
  155. parser.add_argument(
  156. "provider",
  157. type=str,
  158. nullable=True,
  159. choices=Dataset.PROVIDER_LIST,
  160. required=False,
  161. default="vendor",
  162. )
  163. parser.add_argument(
  164. "external_knowledge_id",
  165. type=str,
  166. nullable=True,
  167. required=False,
  168. )
  169. args = parser.parse_args()
  170. # The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator
  171. if not current_user.is_dataset_editor:
  172. raise Forbidden()
  173. try:
  174. dataset = DatasetService.create_empty_dataset(
  175. tenant_id=current_user.current_tenant_id,
  176. name=args["name"],
  177. description=args["description"],
  178. indexing_technique=args["indexing_technique"],
  179. account=current_user,
  180. permission=DatasetPermissionEnum.ONLY_ME,
  181. provider=args["provider"],
  182. external_knowledge_api_id=args["external_knowledge_api_id"],
  183. external_knowledge_id=args["external_knowledge_id"],
  184. )
  185. except services.errors.dataset.DatasetNameDuplicateError:
  186. raise DatasetNameDuplicateError()
  187. return marshal(dataset, dataset_detail_fields), 201
  188. @console_ns.route("/datasets/<uuid:dataset_id>")
  189. class DatasetApi(Resource):
  190. @api.doc("get_dataset")
  191. @api.doc(description="Get dataset details")
  192. @api.doc(params={"dataset_id": "Dataset ID"})
  193. @api.response(200, "Dataset retrieved successfully", dataset_detail_fields)
  194. @api.response(404, "Dataset not found")
  195. @api.response(403, "Permission denied")
  196. @setup_required
  197. @login_required
  198. @account_initialization_required
  199. def get(self, dataset_id):
  200. dataset_id_str = str(dataset_id)
  201. dataset = DatasetService.get_dataset(dataset_id_str)
  202. if dataset is None:
  203. raise NotFound("Dataset not found.")
  204. try:
  205. DatasetService.check_dataset_permission(dataset, current_user)
  206. except services.errors.account.NoPermissionError as e:
  207. raise Forbidden(str(e))
  208. data = marshal(dataset, dataset_detail_fields)
  209. if dataset.indexing_technique == "high_quality":
  210. if dataset.embedding_model_provider:
  211. provider_id = ModelProviderID(dataset.embedding_model_provider)
  212. data["embedding_model_provider"] = str(provider_id)
  213. if data.get("permission") == "partial_members":
  214. part_users_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  215. data.update({"partial_member_list": part_users_list})
  216. # check embedding setting
  217. provider_manager = ProviderManager()
  218. configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id)
  219. embedding_models = configurations.get_models(model_type=ModelType.TEXT_EMBEDDING, only_active=True)
  220. model_names = []
  221. for embedding_model in embedding_models:
  222. model_names.append(f"{embedding_model.model}:{embedding_model.provider.provider}")
  223. if data["indexing_technique"] == "high_quality":
  224. item_model = f"{data['embedding_model']}:{data['embedding_model_provider']}"
  225. if item_model in model_names:
  226. data["embedding_available"] = True
  227. else:
  228. data["embedding_available"] = False
  229. else:
  230. data["embedding_available"] = True
  231. return data, 200
  232. @api.doc("update_dataset")
  233. @api.doc(description="Update dataset details")
  234. @api.expect(
  235. api.model(
  236. "UpdateDatasetRequest",
  237. {
  238. "name": fields.String(description="Dataset name"),
  239. "description": fields.String(description="Dataset description"),
  240. "permission": fields.String(description="Dataset permission"),
  241. "indexing_technique": fields.String(description="Indexing technique"),
  242. "external_retrieval_model": fields.Raw(description="External retrieval model settings"),
  243. },
  244. )
  245. )
  246. @api.response(200, "Dataset updated successfully", dataset_detail_fields)
  247. @api.response(404, "Dataset not found")
  248. @api.response(403, "Permission denied")
  249. @setup_required
  250. @login_required
  251. @account_initialization_required
  252. @cloud_edition_billing_rate_limit_check("knowledge")
  253. def patch(self, dataset_id):
  254. dataset_id_str = str(dataset_id)
  255. dataset = DatasetService.get_dataset(dataset_id_str)
  256. if dataset is None:
  257. raise NotFound("Dataset not found.")
  258. parser = reqparse.RequestParser()
  259. parser.add_argument(
  260. "name",
  261. nullable=False,
  262. help="type is required. Name must be between 1 to 40 characters.",
  263. type=_validate_name,
  264. )
  265. parser.add_argument("description", location="json", store_missing=False, type=_validate_description_length)
  266. parser.add_argument(
  267. "indexing_technique",
  268. type=str,
  269. location="json",
  270. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  271. nullable=True,
  272. help="Invalid indexing technique.",
  273. )
  274. parser.add_argument(
  275. "permission",
  276. type=str,
  277. location="json",
  278. choices=(DatasetPermissionEnum.ONLY_ME, DatasetPermissionEnum.ALL_TEAM, DatasetPermissionEnum.PARTIAL_TEAM),
  279. help="Invalid permission.",
  280. )
  281. parser.add_argument("embedding_model", type=str, location="json", help="Invalid embedding model.")
  282. parser.add_argument(
  283. "embedding_model_provider", type=str, location="json", help="Invalid embedding model provider."
  284. )
  285. parser.add_argument("retrieval_model", type=dict, location="json", help="Invalid retrieval model.")
  286. parser.add_argument("partial_member_list", type=list, location="json", help="Invalid parent user list.")
  287. parser.add_argument(
  288. "external_retrieval_model",
  289. type=dict,
  290. required=False,
  291. nullable=True,
  292. location="json",
  293. help="Invalid external retrieval model.",
  294. )
  295. parser.add_argument(
  296. "external_knowledge_id",
  297. type=str,
  298. required=False,
  299. nullable=True,
  300. location="json",
  301. help="Invalid external knowledge id.",
  302. )
  303. parser.add_argument(
  304. "external_knowledge_api_id",
  305. type=str,
  306. required=False,
  307. nullable=True,
  308. location="json",
  309. help="Invalid external knowledge api id.",
  310. )
  311. parser.add_argument(
  312. "icon_info",
  313. type=dict,
  314. required=False,
  315. nullable=True,
  316. location="json",
  317. help="Invalid icon info.",
  318. )
  319. args = parser.parse_args()
  320. data = request.get_json()
  321. # check embedding model setting
  322. if (
  323. data.get("indexing_technique") == "high_quality"
  324. and data.get("embedding_model_provider") is not None
  325. and data.get("embedding_model") is not None
  326. ):
  327. DatasetService.check_embedding_model_setting(
  328. dataset.tenant_id, data.get("embedding_model_provider"), data.get("embedding_model")
  329. )
  330. # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator
  331. DatasetPermissionService.check_permission(
  332. current_user, dataset, data.get("permission"), data.get("partial_member_list")
  333. )
  334. dataset = DatasetService.update_dataset(dataset_id_str, args, current_user)
  335. if dataset is None:
  336. raise NotFound("Dataset not found.")
  337. result_data = marshal(dataset, dataset_detail_fields)
  338. tenant_id = current_user.current_tenant_id
  339. if data.get("partial_member_list") and data.get("permission") == "partial_members":
  340. DatasetPermissionService.update_partial_member_list(
  341. tenant_id, dataset_id_str, data.get("partial_member_list")
  342. )
  343. # clear partial member list when permission is only_me or all_team_members
  344. elif (
  345. data.get("permission") == DatasetPermissionEnum.ONLY_ME
  346. or data.get("permission") == DatasetPermissionEnum.ALL_TEAM
  347. ):
  348. DatasetPermissionService.clear_partial_member_list(dataset_id_str)
  349. partial_member_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  350. result_data.update({"partial_member_list": partial_member_list})
  351. return result_data, 200
  352. @setup_required
  353. @login_required
  354. @account_initialization_required
  355. @cloud_edition_billing_rate_limit_check("knowledge")
  356. def delete(self, dataset_id):
  357. dataset_id_str = str(dataset_id)
  358. # The role of the current user in the ta table must be admin, owner, or editor
  359. if not (current_user.is_editor or current_user.is_dataset_operator):
  360. raise Forbidden()
  361. try:
  362. if DatasetService.delete_dataset(dataset_id_str, current_user):
  363. DatasetPermissionService.clear_partial_member_list(dataset_id_str)
  364. return {"result": "success"}, 204
  365. else:
  366. raise NotFound("Dataset not found.")
  367. except services.errors.dataset.DatasetInUseError:
  368. raise DatasetInUseError()
  369. @console_ns.route("/datasets/<uuid:dataset_id>/use-check")
  370. class DatasetUseCheckApi(Resource):
  371. @api.doc("check_dataset_use")
  372. @api.doc(description="Check if dataset is in use")
  373. @api.doc(params={"dataset_id": "Dataset ID"})
  374. @api.response(200, "Dataset use status retrieved successfully")
  375. @setup_required
  376. @login_required
  377. @account_initialization_required
  378. def get(self, dataset_id):
  379. dataset_id_str = str(dataset_id)
  380. dataset_is_using = DatasetService.dataset_use_check(dataset_id_str)
  381. return {"is_using": dataset_is_using}, 200
  382. @console_ns.route("/datasets/<uuid:dataset_id>/queries")
  383. class DatasetQueryApi(Resource):
  384. @api.doc("get_dataset_queries")
  385. @api.doc(description="Get dataset query history")
  386. @api.doc(params={"dataset_id": "Dataset ID"})
  387. @api.response(200, "Query history retrieved successfully", dataset_query_detail_fields)
  388. @setup_required
  389. @login_required
  390. @account_initialization_required
  391. def get(self, dataset_id):
  392. dataset_id_str = str(dataset_id)
  393. dataset = DatasetService.get_dataset(dataset_id_str)
  394. if dataset is None:
  395. raise NotFound("Dataset not found.")
  396. try:
  397. DatasetService.check_dataset_permission(dataset, current_user)
  398. except services.errors.account.NoPermissionError as e:
  399. raise Forbidden(str(e))
  400. page = request.args.get("page", default=1, type=int)
  401. limit = request.args.get("limit", default=20, type=int)
  402. dataset_queries, total = DatasetService.get_dataset_queries(dataset_id=dataset.id, page=page, per_page=limit)
  403. response = {
  404. "data": marshal(dataset_queries, dataset_query_detail_fields),
  405. "has_more": len(dataset_queries) == limit,
  406. "limit": limit,
  407. "total": total,
  408. "page": page,
  409. }
  410. return response, 200
  411. @console_ns.route("/datasets/indexing-estimate")
  412. class DatasetIndexingEstimateApi(Resource):
  413. @api.doc("estimate_dataset_indexing")
  414. @api.doc(description="Estimate dataset indexing cost")
  415. @api.response(200, "Indexing estimate calculated successfully")
  416. @setup_required
  417. @login_required
  418. @account_initialization_required
  419. def post(self):
  420. parser = reqparse.RequestParser()
  421. parser.add_argument("info_list", type=dict, required=True, nullable=True, location="json")
  422. parser.add_argument("process_rule", type=dict, required=True, nullable=True, location="json")
  423. parser.add_argument(
  424. "indexing_technique",
  425. type=str,
  426. required=True,
  427. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  428. nullable=True,
  429. location="json",
  430. )
  431. parser.add_argument("doc_form", type=str, default="text_model", required=False, nullable=False, location="json")
  432. parser.add_argument("dataset_id", type=str, required=False, nullable=False, location="json")
  433. parser.add_argument(
  434. "doc_language", type=str, default="English", required=False, nullable=False, location="json"
  435. )
  436. args = parser.parse_args()
  437. # validate args
  438. DocumentService.estimate_args_validate(args)
  439. extract_settings = []
  440. if args["info_list"]["data_source_type"] == "upload_file":
  441. file_ids = args["info_list"]["file_info_list"]["file_ids"]
  442. file_details = db.session.scalars(
  443. select(UploadFile).where(
  444. UploadFile.tenant_id == current_user.current_tenant_id, UploadFile.id.in_(file_ids)
  445. )
  446. ).all()
  447. if file_details is None:
  448. raise NotFound("File not found.")
  449. if file_details:
  450. for file_detail in file_details:
  451. extract_setting = ExtractSetting(
  452. datasource_type=DatasourceType.FILE.value,
  453. upload_file=file_detail,
  454. document_model=args["doc_form"],
  455. )
  456. extract_settings.append(extract_setting)
  457. elif args["info_list"]["data_source_type"] == "notion_import":
  458. notion_info_list = args["info_list"]["notion_info_list"]
  459. for notion_info in notion_info_list:
  460. workspace_id = notion_info["workspace_id"]
  461. credential_id = notion_info.get("credential_id")
  462. for page in notion_info["pages"]:
  463. extract_setting = ExtractSetting(
  464. datasource_type=DatasourceType.NOTION.value,
  465. notion_info={
  466. "credential_id": credential_id,
  467. "notion_workspace_id": workspace_id,
  468. "notion_obj_id": page["page_id"],
  469. "notion_page_type": page["type"],
  470. "tenant_id": current_user.current_tenant_id,
  471. },
  472. document_model=args["doc_form"],
  473. )
  474. extract_settings.append(extract_setting)
  475. elif args["info_list"]["data_source_type"] == "website_crawl":
  476. website_info_list = args["info_list"]["website_info_list"]
  477. for url in website_info_list["urls"]:
  478. extract_setting = ExtractSetting(
  479. datasource_type=DatasourceType.WEBSITE.value,
  480. website_info={
  481. "provider": website_info_list["provider"],
  482. "job_id": website_info_list["job_id"],
  483. "url": url,
  484. "tenant_id": current_user.current_tenant_id,
  485. "mode": "crawl",
  486. "only_main_content": website_info_list["only_main_content"],
  487. },
  488. document_model=args["doc_form"],
  489. )
  490. extract_settings.append(extract_setting)
  491. else:
  492. raise ValueError("Data source type not support")
  493. indexing_runner = IndexingRunner()
  494. try:
  495. response = indexing_runner.indexing_estimate(
  496. current_user.current_tenant_id,
  497. extract_settings,
  498. args["process_rule"],
  499. args["doc_form"],
  500. args["doc_language"],
  501. args["dataset_id"],
  502. args["indexing_technique"],
  503. )
  504. except LLMBadRequestError:
  505. raise ProviderNotInitializeError(
  506. "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
  507. )
  508. except ProviderTokenNotInitError as ex:
  509. raise ProviderNotInitializeError(ex.description)
  510. except Exception as e:
  511. raise IndexingEstimateError(str(e))
  512. return response.model_dump(), 200
  513. @console_ns.route("/datasets/<uuid:dataset_id>/related-apps")
  514. class DatasetRelatedAppListApi(Resource):
  515. @api.doc("get_dataset_related_apps")
  516. @api.doc(description="Get applications related to dataset")
  517. @api.doc(params={"dataset_id": "Dataset ID"})
  518. @api.response(200, "Related apps retrieved successfully", related_app_list)
  519. @setup_required
  520. @login_required
  521. @account_initialization_required
  522. @marshal_with(related_app_list)
  523. def get(self, dataset_id):
  524. dataset_id_str = str(dataset_id)
  525. dataset = DatasetService.get_dataset(dataset_id_str)
  526. if dataset is None:
  527. raise NotFound("Dataset not found.")
  528. try:
  529. DatasetService.check_dataset_permission(dataset, current_user)
  530. except services.errors.account.NoPermissionError as e:
  531. raise Forbidden(str(e))
  532. app_dataset_joins = DatasetService.get_related_apps(dataset.id)
  533. related_apps = []
  534. for app_dataset_join in app_dataset_joins:
  535. app_model = app_dataset_join.app
  536. if app_model:
  537. related_apps.append(app_model)
  538. return {"data": related_apps, "total": len(related_apps)}, 200
  539. @console_ns.route("/datasets/<uuid:dataset_id>/indexing-status")
  540. class DatasetIndexingStatusApi(Resource):
  541. @api.doc("get_dataset_indexing_status")
  542. @api.doc(description="Get dataset indexing status")
  543. @api.doc(params={"dataset_id": "Dataset ID"})
  544. @api.response(200, "Indexing status retrieved successfully")
  545. @setup_required
  546. @login_required
  547. @account_initialization_required
  548. def get(self, dataset_id):
  549. dataset_id = str(dataset_id)
  550. documents = db.session.scalars(
  551. select(Document).where(
  552. Document.dataset_id == dataset_id, Document.tenant_id == current_user.current_tenant_id
  553. )
  554. ).all()
  555. documents_status = []
  556. for document in documents:
  557. completed_segments = (
  558. db.session.query(DocumentSegment)
  559. .where(
  560. DocumentSegment.completed_at.isnot(None),
  561. DocumentSegment.document_id == str(document.id),
  562. DocumentSegment.status != "re_segment",
  563. )
  564. .count()
  565. )
  566. total_segments = (
  567. db.session.query(DocumentSegment)
  568. .where(DocumentSegment.document_id == str(document.id), DocumentSegment.status != "re_segment")
  569. .count()
  570. )
  571. # Create a dictionary with document attributes and additional fields
  572. document_dict = {
  573. "id": document.id,
  574. "indexing_status": document.indexing_status,
  575. "processing_started_at": document.processing_started_at,
  576. "parsing_completed_at": document.parsing_completed_at,
  577. "cleaning_completed_at": document.cleaning_completed_at,
  578. "splitting_completed_at": document.splitting_completed_at,
  579. "completed_at": document.completed_at,
  580. "paused_at": document.paused_at,
  581. "error": document.error,
  582. "stopped_at": document.stopped_at,
  583. "completed_segments": completed_segments,
  584. "total_segments": total_segments,
  585. }
  586. documents_status.append(marshal(document_dict, document_status_fields))
  587. data = {"data": documents_status}
  588. return data, 200
  589. @console_ns.route("/datasets/api-keys")
  590. class DatasetApiKeyApi(Resource):
  591. max_keys = 10
  592. token_prefix = "dataset-"
  593. resource_type = "dataset"
  594. @api.doc("get_dataset_api_keys")
  595. @api.doc(description="Get dataset API keys")
  596. @api.response(200, "API keys retrieved successfully", api_key_list)
  597. @setup_required
  598. @login_required
  599. @account_initialization_required
  600. @marshal_with(api_key_list)
  601. def get(self):
  602. keys = db.session.scalars(
  603. select(ApiToken).where(
  604. ApiToken.type == self.resource_type, ApiToken.tenant_id == current_user.current_tenant_id
  605. )
  606. ).all()
  607. return {"items": keys}
  608. @setup_required
  609. @login_required
  610. @account_initialization_required
  611. @marshal_with(api_key_fields)
  612. def post(self):
  613. # The role of the current user in the ta table must be admin or owner
  614. if not current_user.is_admin_or_owner:
  615. raise Forbidden()
  616. current_key_count = (
  617. db.session.query(ApiToken)
  618. .where(ApiToken.type == self.resource_type, ApiToken.tenant_id == current_user.current_tenant_id)
  619. .count()
  620. )
  621. if current_key_count >= self.max_keys:
  622. flask_restx.abort(
  623. 400,
  624. message=f"Cannot create more than {self.max_keys} API keys for this resource type.",
  625. code="max_keys_exceeded",
  626. )
  627. key = ApiToken.generate_api_key(self.token_prefix, 24)
  628. api_token = ApiToken()
  629. api_token.tenant_id = current_user.current_tenant_id
  630. api_token.token = key
  631. api_token.type = self.resource_type
  632. db.session.add(api_token)
  633. db.session.commit()
  634. return api_token, 200
  635. @console_ns.route("/datasets/api-keys/<uuid:api_key_id>")
  636. class DatasetApiDeleteApi(Resource):
  637. resource_type = "dataset"
  638. @api.doc("delete_dataset_api_key")
  639. @api.doc(description="Delete dataset API key")
  640. @api.doc(params={"api_key_id": "API key ID"})
  641. @api.response(204, "API key deleted successfully")
  642. @setup_required
  643. @login_required
  644. @account_initialization_required
  645. def delete(self, api_key_id):
  646. api_key_id = str(api_key_id)
  647. # The role of the current user in the ta table must be admin or owner
  648. if not current_user.is_admin_or_owner:
  649. raise Forbidden()
  650. key = (
  651. db.session.query(ApiToken)
  652. .where(
  653. ApiToken.tenant_id == current_user.current_tenant_id,
  654. ApiToken.type == self.resource_type,
  655. ApiToken.id == api_key_id,
  656. )
  657. .first()
  658. )
  659. if key is None:
  660. flask_restx.abort(404, message="API key not found")
  661. db.session.query(ApiToken).where(ApiToken.id == api_key_id).delete()
  662. db.session.commit()
  663. return {"result": "success"}, 204
  664. @console_ns.route("/datasets/<uuid:dataset_id>/api-keys/<string:status>")
  665. class DatasetEnableApiApi(Resource):
  666. @setup_required
  667. @login_required
  668. @account_initialization_required
  669. def post(self, dataset_id, status):
  670. dataset_id_str = str(dataset_id)
  671. DatasetService.update_dataset_api_status(dataset_id_str, status == "enable")
  672. return {"result": "success"}, 200
  673. @console_ns.route("/datasets/api-base-info")
  674. class DatasetApiBaseUrlApi(Resource):
  675. @api.doc("get_dataset_api_base_info")
  676. @api.doc(description="Get dataset API base information")
  677. @api.response(200, "API base info retrieved successfully")
  678. @setup_required
  679. @login_required
  680. @account_initialization_required
  681. def get(self):
  682. return {"api_base_url": (dify_config.SERVICE_API_URL or request.host_url.rstrip("/")) + "/v1"}
  683. @console_ns.route("/datasets/retrieval-setting")
  684. class DatasetRetrievalSettingApi(Resource):
  685. @api.doc("get_dataset_retrieval_setting")
  686. @api.doc(description="Get dataset retrieval settings")
  687. @api.response(200, "Retrieval settings retrieved successfully")
  688. @setup_required
  689. @login_required
  690. @account_initialization_required
  691. def get(self):
  692. vector_type = dify_config.VECTOR_STORE
  693. match vector_type:
  694. case (
  695. VectorType.RELYT
  696. | VectorType.TIDB_VECTOR
  697. | VectorType.CHROMA
  698. | VectorType.PGVECTO_RS
  699. | VectorType.VIKINGDB
  700. | VectorType.UPSTASH
  701. ):
  702. return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]}
  703. case (
  704. VectorType.QDRANT
  705. | VectorType.WEAVIATE
  706. | VectorType.OPENSEARCH
  707. | VectorType.ANALYTICDB
  708. | VectorType.MYSCALE
  709. | VectorType.ORACLE
  710. | VectorType.ELASTICSEARCH
  711. | VectorType.ELASTICSEARCH_JA
  712. | VectorType.PGVECTOR
  713. | VectorType.VASTBASE
  714. | VectorType.TIDB_ON_QDRANT
  715. | VectorType.LINDORM
  716. | VectorType.COUCHBASE
  717. | VectorType.MILVUS
  718. | VectorType.OPENGAUSS
  719. | VectorType.OCEANBASE
  720. | VectorType.TABLESTORE
  721. | VectorType.HUAWEI_CLOUD
  722. | VectorType.TENCENT
  723. | VectorType.MATRIXONE
  724. | VectorType.CLICKZETTA
  725. | VectorType.BAIDU
  726. ):
  727. return {
  728. "retrieval_method": [
  729. RetrievalMethod.SEMANTIC_SEARCH.value,
  730. RetrievalMethod.FULL_TEXT_SEARCH.value,
  731. RetrievalMethod.HYBRID_SEARCH.value,
  732. ]
  733. }
  734. case _:
  735. raise ValueError(f"Unsupported vector db type {vector_type}.")
  736. @console_ns.route("/datasets/retrieval-setting/<string:vector_type>")
  737. class DatasetRetrievalSettingMockApi(Resource):
  738. @api.doc("get_dataset_retrieval_setting_mock")
  739. @api.doc(description="Get mock dataset retrieval settings by vector type")
  740. @api.doc(params={"vector_type": "Vector store type"})
  741. @api.response(200, "Mock retrieval settings retrieved successfully")
  742. @setup_required
  743. @login_required
  744. @account_initialization_required
  745. def get(self, vector_type):
  746. match vector_type:
  747. case (
  748. VectorType.MILVUS
  749. | VectorType.RELYT
  750. | VectorType.TIDB_VECTOR
  751. | VectorType.CHROMA
  752. | VectorType.PGVECTO_RS
  753. | VectorType.VIKINGDB
  754. | VectorType.UPSTASH
  755. ):
  756. return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]}
  757. case (
  758. VectorType.QDRANT
  759. | VectorType.WEAVIATE
  760. | VectorType.OPENSEARCH
  761. | VectorType.ANALYTICDB
  762. | VectorType.MYSCALE
  763. | VectorType.ORACLE
  764. | VectorType.ELASTICSEARCH
  765. | VectorType.ELASTICSEARCH_JA
  766. | VectorType.COUCHBASE
  767. | VectorType.PGVECTOR
  768. | VectorType.VASTBASE
  769. | VectorType.LINDORM
  770. | VectorType.OPENGAUSS
  771. | VectorType.OCEANBASE
  772. | VectorType.TABLESTORE
  773. | VectorType.TENCENT
  774. | VectorType.HUAWEI_CLOUD
  775. | VectorType.MATRIXONE
  776. | VectorType.CLICKZETTA
  777. | VectorType.BAIDU
  778. ):
  779. return {
  780. "retrieval_method": [
  781. RetrievalMethod.SEMANTIC_SEARCH.value,
  782. RetrievalMethod.FULL_TEXT_SEARCH.value,
  783. RetrievalMethod.HYBRID_SEARCH.value,
  784. ]
  785. }
  786. case _:
  787. raise ValueError(f"Unsupported vector db type {vector_type}.")
  788. @console_ns.route("/datasets/<uuid:dataset_id>/error-docs")
  789. class DatasetErrorDocs(Resource):
  790. @api.doc("get_dataset_error_docs")
  791. @api.doc(description="Get dataset error documents")
  792. @api.doc(params={"dataset_id": "Dataset ID"})
  793. @api.response(200, "Error documents retrieved successfully")
  794. @api.response(404, "Dataset not found")
  795. @setup_required
  796. @login_required
  797. @account_initialization_required
  798. def get(self, dataset_id):
  799. dataset_id_str = str(dataset_id)
  800. dataset = DatasetService.get_dataset(dataset_id_str)
  801. if dataset is None:
  802. raise NotFound("Dataset not found.")
  803. results = DocumentService.get_error_documents_by_dataset_id(dataset_id_str)
  804. return {"data": [marshal(item, document_status_fields) for item in results], "total": len(results)}, 200
  805. @console_ns.route("/datasets/<uuid:dataset_id>/permission-part-users")
  806. class DatasetPermissionUserListApi(Resource):
  807. @api.doc("get_dataset_permission_users")
  808. @api.doc(description="Get dataset permission user list")
  809. @api.doc(params={"dataset_id": "Dataset ID"})
  810. @api.response(200, "Permission users retrieved successfully")
  811. @api.response(404, "Dataset not found")
  812. @api.response(403, "Permission denied")
  813. @setup_required
  814. @login_required
  815. @account_initialization_required
  816. def get(self, dataset_id):
  817. dataset_id_str = str(dataset_id)
  818. dataset = DatasetService.get_dataset(dataset_id_str)
  819. if dataset is None:
  820. raise NotFound("Dataset not found.")
  821. try:
  822. DatasetService.check_dataset_permission(dataset, current_user)
  823. except services.errors.account.NoPermissionError as e:
  824. raise Forbidden(str(e))
  825. partial_members_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  826. return {
  827. "data": partial_members_list,
  828. }, 200
  829. @console_ns.route("/datasets/<uuid:dataset_id>/auto-disable-logs")
  830. class DatasetAutoDisableLogApi(Resource):
  831. @api.doc("get_dataset_auto_disable_logs")
  832. @api.doc(description="Get dataset auto disable logs")
  833. @api.doc(params={"dataset_id": "Dataset ID"})
  834. @api.response(200, "Auto disable logs retrieved successfully")
  835. @api.response(404, "Dataset not found")
  836. @setup_required
  837. @login_required
  838. @account_initialization_required
  839. def get(self, dataset_id):
  840. dataset_id_str = str(dataset_id)
  841. dataset = DatasetService.get_dataset(dataset_id_str)
  842. if dataset is None:
  843. raise NotFound("Dataset not found.")
  844. return DatasetService.get_dataset_auto_disable_logs(dataset_id_str), 200