You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816
  1. import flask_restful
  2. from flask import request
  3. from flask_login import current_user
  4. from flask_restful import Resource, marshal, marshal_with, reqparse
  5. from werkzeug.exceptions import Forbidden, NotFound
  6. import services
  7. from configs import dify_config
  8. from controllers.console import api
  9. from controllers.console.apikey import api_key_fields, api_key_list
  10. from controllers.console.app.error import ProviderNotInitializeError
  11. from controllers.console.datasets.error import DatasetInUseError, DatasetNameDuplicateError, IndexingEstimateError
  12. from controllers.console.wraps import (
  13. account_initialization_required,
  14. cloud_edition_billing_rate_limit_check,
  15. enterprise_license_required,
  16. setup_required,
  17. )
  18. from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
  19. from core.indexing_runner import IndexingRunner
  20. from core.model_runtime.entities.model_entities import ModelType
  21. from core.plugin.entities.plugin import ModelProviderID
  22. from core.provider_manager import ProviderManager
  23. from core.rag.datasource.vdb.vector_type import VectorType
  24. from core.rag.extractor.entity.extract_setting import ExtractSetting
  25. from core.rag.retrieval.retrieval_methods import RetrievalMethod
  26. from extensions.ext_database import db
  27. from fields.app_fields import related_app_list
  28. from fields.dataset_fields import dataset_detail_fields, dataset_query_detail_fields
  29. from fields.document_fields import document_status_fields
  30. from libs.login import login_required
  31. from models import ApiToken, Dataset, Document, DocumentSegment, UploadFile
  32. from models.dataset import DatasetPermissionEnum
  33. from services.dataset_service import DatasetPermissionService, DatasetService, DocumentService
  34. def _validate_name(name):
  35. if not name or len(name) < 1 or len(name) > 40:
  36. raise ValueError("Name must be between 1 to 40 characters.")
  37. return name
  38. def _validate_description_length(description):
  39. if len(description) > 400:
  40. raise ValueError("Description cannot exceed 400 characters.")
  41. return description
  42. class DatasetListApi(Resource):
  43. @setup_required
  44. @login_required
  45. @account_initialization_required
  46. @enterprise_license_required
  47. def get(self):
  48. page = request.args.get("page", default=1, type=int)
  49. limit = request.args.get("limit", default=20, type=int)
  50. ids = request.args.getlist("ids")
  51. # provider = request.args.get("provider", default="vendor")
  52. search = request.args.get("keyword", default=None, type=str)
  53. tag_ids = request.args.getlist("tag_ids")
  54. include_all = request.args.get("include_all", default="false").lower() == "true"
  55. if ids:
  56. datasets, total = DatasetService.get_datasets_by_ids(ids, current_user.current_tenant_id)
  57. else:
  58. datasets, total = DatasetService.get_datasets(
  59. page, limit, current_user.current_tenant_id, current_user, search, tag_ids, include_all
  60. )
  61. # check embedding setting
  62. provider_manager = ProviderManager()
  63. configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id)
  64. embedding_models = configurations.get_models(model_type=ModelType.TEXT_EMBEDDING, only_active=True)
  65. model_names = []
  66. for embedding_model in embedding_models:
  67. model_names.append(f"{embedding_model.model}:{embedding_model.provider.provider}")
  68. data = marshal(datasets, dataset_detail_fields)
  69. for item in data:
  70. # convert embedding_model_provider to plugin standard format
  71. if item["indexing_technique"] == "high_quality" and item["embedding_model_provider"]:
  72. item["embedding_model_provider"] = str(ModelProviderID(item["embedding_model_provider"]))
  73. item_model = f"{item['embedding_model']}:{item['embedding_model_provider']}"
  74. if item_model in model_names:
  75. item["embedding_available"] = True
  76. else:
  77. item["embedding_available"] = False
  78. else:
  79. item["embedding_available"] = True
  80. if item.get("permission") == "partial_members":
  81. part_users_list = DatasetPermissionService.get_dataset_partial_member_list(item["id"])
  82. item.update({"partial_member_list": part_users_list})
  83. else:
  84. item.update({"partial_member_list": []})
  85. response = {"data": data, "has_more": len(datasets) == limit, "limit": limit, "total": total, "page": page}
  86. return response, 200
  87. @setup_required
  88. @login_required
  89. @account_initialization_required
  90. @cloud_edition_billing_rate_limit_check("knowledge")
  91. def post(self):
  92. parser = reqparse.RequestParser()
  93. parser.add_argument(
  94. "name",
  95. nullable=False,
  96. required=True,
  97. help="type is required. Name must be between 1 to 40 characters.",
  98. type=_validate_name,
  99. )
  100. parser.add_argument(
  101. "description",
  102. type=str,
  103. nullable=True,
  104. required=False,
  105. default="",
  106. )
  107. parser.add_argument(
  108. "indexing_technique",
  109. type=str,
  110. location="json",
  111. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  112. nullable=True,
  113. help="Invalid indexing technique.",
  114. )
  115. parser.add_argument(
  116. "external_knowledge_api_id",
  117. type=str,
  118. nullable=True,
  119. required=False,
  120. )
  121. parser.add_argument(
  122. "provider",
  123. type=str,
  124. nullable=True,
  125. choices=Dataset.PROVIDER_LIST,
  126. required=False,
  127. default="vendor",
  128. )
  129. parser.add_argument(
  130. "external_knowledge_id",
  131. type=str,
  132. nullable=True,
  133. required=False,
  134. )
  135. args = parser.parse_args()
  136. # The role of the current user in the ta table must be admin, owner, or editor, or dataset_operator
  137. if not current_user.is_dataset_editor:
  138. raise Forbidden()
  139. try:
  140. dataset = DatasetService.create_empty_dataset(
  141. tenant_id=current_user.current_tenant_id,
  142. name=args["name"],
  143. description=args["description"],
  144. indexing_technique=args["indexing_technique"],
  145. account=current_user,
  146. permission=DatasetPermissionEnum.ONLY_ME,
  147. provider=args["provider"],
  148. external_knowledge_api_id=args["external_knowledge_api_id"],
  149. external_knowledge_id=args["external_knowledge_id"],
  150. )
  151. except services.errors.dataset.DatasetNameDuplicateError:
  152. raise DatasetNameDuplicateError()
  153. return marshal(dataset, dataset_detail_fields), 201
  154. class DatasetApi(Resource):
  155. @setup_required
  156. @login_required
  157. @account_initialization_required
  158. def get(self, dataset_id):
  159. dataset_id_str = str(dataset_id)
  160. dataset = DatasetService.get_dataset(dataset_id_str)
  161. if dataset is None:
  162. raise NotFound("Dataset not found.")
  163. try:
  164. DatasetService.check_dataset_permission(dataset, current_user)
  165. except services.errors.account.NoPermissionError as e:
  166. raise Forbidden(str(e))
  167. data = marshal(dataset, dataset_detail_fields)
  168. if dataset.indexing_technique == "high_quality":
  169. if dataset.embedding_model_provider:
  170. provider_id = ModelProviderID(dataset.embedding_model_provider)
  171. data["embedding_model_provider"] = str(provider_id)
  172. if data.get("permission") == "partial_members":
  173. part_users_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  174. data.update({"partial_member_list": part_users_list})
  175. # check embedding setting
  176. provider_manager = ProviderManager()
  177. configurations = provider_manager.get_configurations(tenant_id=current_user.current_tenant_id)
  178. embedding_models = configurations.get_models(model_type=ModelType.TEXT_EMBEDDING, only_active=True)
  179. model_names = []
  180. for embedding_model in embedding_models:
  181. model_names.append(f"{embedding_model.model}:{embedding_model.provider.provider}")
  182. if data["indexing_technique"] == "high_quality":
  183. item_model = f"{data['embedding_model']}:{data['embedding_model_provider']}"
  184. if item_model in model_names:
  185. data["embedding_available"] = True
  186. else:
  187. data["embedding_available"] = False
  188. else:
  189. data["embedding_available"] = True
  190. return data, 200
  191. @setup_required
  192. @login_required
  193. @account_initialization_required
  194. @cloud_edition_billing_rate_limit_check("knowledge")
  195. def patch(self, dataset_id):
  196. dataset_id_str = str(dataset_id)
  197. dataset = DatasetService.get_dataset(dataset_id_str)
  198. if dataset is None:
  199. raise NotFound("Dataset not found.")
  200. parser = reqparse.RequestParser()
  201. parser.add_argument(
  202. "name",
  203. nullable=False,
  204. help="type is required. Name must be between 1 to 40 characters.",
  205. type=_validate_name,
  206. )
  207. parser.add_argument("description", location="json", store_missing=False, type=_validate_description_length)
  208. parser.add_argument(
  209. "indexing_technique",
  210. type=str,
  211. location="json",
  212. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  213. nullable=True,
  214. help="Invalid indexing technique.",
  215. )
  216. parser.add_argument(
  217. "permission",
  218. type=str,
  219. location="json",
  220. choices=(DatasetPermissionEnum.ONLY_ME, DatasetPermissionEnum.ALL_TEAM, DatasetPermissionEnum.PARTIAL_TEAM),
  221. help="Invalid permission.",
  222. )
  223. parser.add_argument("embedding_model", type=str, location="json", help="Invalid embedding model.")
  224. parser.add_argument(
  225. "embedding_model_provider", type=str, location="json", help="Invalid embedding model provider."
  226. )
  227. parser.add_argument("retrieval_model", type=dict, location="json", help="Invalid retrieval model.")
  228. parser.add_argument("partial_member_list", type=list, location="json", help="Invalid parent user list.")
  229. parser.add_argument(
  230. "external_retrieval_model",
  231. type=dict,
  232. required=False,
  233. nullable=True,
  234. location="json",
  235. help="Invalid external retrieval model.",
  236. )
  237. parser.add_argument(
  238. "external_knowledge_id",
  239. type=str,
  240. required=False,
  241. nullable=True,
  242. location="json",
  243. help="Invalid external knowledge id.",
  244. )
  245. parser.add_argument(
  246. "external_knowledge_api_id",
  247. type=str,
  248. required=False,
  249. nullable=True,
  250. location="json",
  251. help="Invalid external knowledge api id.",
  252. )
  253. parser.add_argument(
  254. "icon_info",
  255. type=dict,
  256. required=False,
  257. nullable=True,
  258. location="json",
  259. help="Invalid icon info.",
  260. )
  261. args = parser.parse_args()
  262. data = request.get_json()
  263. # check embedding model setting
  264. if (
  265. data.get("indexing_technique") == "high_quality"
  266. and data.get("embedding_model_provider") is not None
  267. and data.get("embedding_model") is not None
  268. ):
  269. DatasetService.check_embedding_model_setting(
  270. dataset.tenant_id, data.get("embedding_model_provider"), data.get("embedding_model")
  271. )
  272. # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator
  273. DatasetPermissionService.check_permission(
  274. current_user, dataset, data.get("permission"), data.get("partial_member_list")
  275. )
  276. dataset = DatasetService.update_dataset(dataset_id_str, args, current_user)
  277. if dataset is None:
  278. raise NotFound("Dataset not found.")
  279. result_data = marshal(dataset, dataset_detail_fields)
  280. tenant_id = current_user.current_tenant_id
  281. if data.get("partial_member_list") and data.get("permission") == "partial_members":
  282. DatasetPermissionService.update_partial_member_list(
  283. tenant_id, dataset_id_str, data.get("partial_member_list")
  284. )
  285. # clear partial member list when permission is only_me or all_team_members
  286. elif (
  287. data.get("permission") == DatasetPermissionEnum.ONLY_ME
  288. or data.get("permission") == DatasetPermissionEnum.ALL_TEAM
  289. ):
  290. DatasetPermissionService.clear_partial_member_list(dataset_id_str)
  291. partial_member_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  292. result_data.update({"partial_member_list": partial_member_list})
  293. return result_data, 200
  294. @setup_required
  295. @login_required
  296. @account_initialization_required
  297. @cloud_edition_billing_rate_limit_check("knowledge")
  298. def delete(self, dataset_id):
  299. dataset_id_str = str(dataset_id)
  300. # The role of the current user in the ta table must be admin, owner, or editor
  301. if not current_user.is_editor or current_user.is_dataset_operator:
  302. raise Forbidden()
  303. try:
  304. if DatasetService.delete_dataset(dataset_id_str, current_user):
  305. DatasetPermissionService.clear_partial_member_list(dataset_id_str)
  306. return {"result": "success"}, 204
  307. else:
  308. raise NotFound("Dataset not found.")
  309. except services.errors.dataset.DatasetInUseError:
  310. raise DatasetInUseError()
  311. class DatasetUseCheckApi(Resource):
  312. @setup_required
  313. @login_required
  314. @account_initialization_required
  315. def get(self, dataset_id):
  316. dataset_id_str = str(dataset_id)
  317. dataset_is_using = DatasetService.dataset_use_check(dataset_id_str)
  318. return {"is_using": dataset_is_using}, 200
  319. class DatasetQueryApi(Resource):
  320. @setup_required
  321. @login_required
  322. @account_initialization_required
  323. def get(self, dataset_id):
  324. dataset_id_str = str(dataset_id)
  325. dataset = DatasetService.get_dataset(dataset_id_str)
  326. if dataset is None:
  327. raise NotFound("Dataset not found.")
  328. try:
  329. DatasetService.check_dataset_permission(dataset, current_user)
  330. except services.errors.account.NoPermissionError as e:
  331. raise Forbidden(str(e))
  332. page = request.args.get("page", default=1, type=int)
  333. limit = request.args.get("limit", default=20, type=int)
  334. dataset_queries, total = DatasetService.get_dataset_queries(dataset_id=dataset.id, page=page, per_page=limit)
  335. response = {
  336. "data": marshal(dataset_queries, dataset_query_detail_fields),
  337. "has_more": len(dataset_queries) == limit,
  338. "limit": limit,
  339. "total": total,
  340. "page": page,
  341. }
  342. return response, 200
  343. class DatasetIndexingEstimateApi(Resource):
  344. @setup_required
  345. @login_required
  346. @account_initialization_required
  347. def post(self):
  348. parser = reqparse.RequestParser()
  349. parser.add_argument("info_list", type=dict, required=True, nullable=True, location="json")
  350. parser.add_argument("process_rule", type=dict, required=True, nullable=True, location="json")
  351. parser.add_argument(
  352. "indexing_technique",
  353. type=str,
  354. required=True,
  355. choices=Dataset.INDEXING_TECHNIQUE_LIST,
  356. nullable=True,
  357. location="json",
  358. )
  359. parser.add_argument("doc_form", type=str, default="text_model", required=False, nullable=False, location="json")
  360. parser.add_argument("dataset_id", type=str, required=False, nullable=False, location="json")
  361. parser.add_argument(
  362. "doc_language", type=str, default="English", required=False, nullable=False, location="json"
  363. )
  364. args = parser.parse_args()
  365. # validate args
  366. DocumentService.estimate_args_validate(args)
  367. extract_settings = []
  368. if args["info_list"]["data_source_type"] == "upload_file":
  369. file_ids = args["info_list"]["file_info_list"]["file_ids"]
  370. file_details = (
  371. db.session.query(UploadFile)
  372. .where(UploadFile.tenant_id == current_user.current_tenant_id, UploadFile.id.in_(file_ids))
  373. .all()
  374. )
  375. if file_details is None:
  376. raise NotFound("File not found.")
  377. if file_details:
  378. for file_detail in file_details:
  379. extract_setting = ExtractSetting(
  380. datasource_type="upload_file", upload_file=file_detail, document_model=args["doc_form"]
  381. )
  382. extract_settings.append(extract_setting)
  383. elif args["info_list"]["data_source_type"] == "notion_import":
  384. notion_info_list = args["info_list"]["notion_info_list"]
  385. for notion_info in notion_info_list:
  386. workspace_id = notion_info["workspace_id"]
  387. for page in notion_info["pages"]:
  388. extract_setting = ExtractSetting(
  389. datasource_type="notion_import",
  390. notion_info={
  391. "notion_workspace_id": workspace_id,
  392. "notion_obj_id": page["page_id"],
  393. "notion_page_type": page["type"],
  394. "tenant_id": current_user.current_tenant_id,
  395. },
  396. document_model=args["doc_form"],
  397. )
  398. extract_settings.append(extract_setting)
  399. elif args["info_list"]["data_source_type"] == "website_crawl":
  400. website_info_list = args["info_list"]["website_info_list"]
  401. for url in website_info_list["urls"]:
  402. extract_setting = ExtractSetting(
  403. datasource_type="website_crawl",
  404. website_info={
  405. "provider": website_info_list["provider"],
  406. "job_id": website_info_list["job_id"],
  407. "url": url,
  408. "tenant_id": current_user.current_tenant_id,
  409. "mode": "crawl",
  410. "only_main_content": website_info_list["only_main_content"],
  411. },
  412. document_model=args["doc_form"],
  413. )
  414. extract_settings.append(extract_setting)
  415. else:
  416. raise ValueError("Data source type not support")
  417. indexing_runner = IndexingRunner()
  418. try:
  419. response = indexing_runner.indexing_estimate(
  420. current_user.current_tenant_id,
  421. extract_settings,
  422. args["process_rule"],
  423. args["doc_form"],
  424. args["doc_language"],
  425. args["dataset_id"],
  426. args["indexing_technique"],
  427. )
  428. except LLMBadRequestError:
  429. raise ProviderNotInitializeError(
  430. "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
  431. )
  432. except ProviderTokenNotInitError as ex:
  433. raise ProviderNotInitializeError(ex.description)
  434. except Exception as e:
  435. raise IndexingEstimateError(str(e))
  436. return response.model_dump(), 200
  437. class DatasetRelatedAppListApi(Resource):
  438. @setup_required
  439. @login_required
  440. @account_initialization_required
  441. @marshal_with(related_app_list)
  442. def get(self, dataset_id):
  443. dataset_id_str = str(dataset_id)
  444. dataset = DatasetService.get_dataset(dataset_id_str)
  445. if dataset is None:
  446. raise NotFound("Dataset not found.")
  447. try:
  448. DatasetService.check_dataset_permission(dataset, current_user)
  449. except services.errors.account.NoPermissionError as e:
  450. raise Forbidden(str(e))
  451. app_dataset_joins = DatasetService.get_related_apps(dataset.id)
  452. related_apps = []
  453. for app_dataset_join in app_dataset_joins:
  454. app_model = app_dataset_join.app
  455. if app_model:
  456. related_apps.append(app_model)
  457. return {"data": related_apps, "total": len(related_apps)}, 200
  458. class DatasetIndexingStatusApi(Resource):
  459. @setup_required
  460. @login_required
  461. @account_initialization_required
  462. def get(self, dataset_id):
  463. dataset_id = str(dataset_id)
  464. documents = (
  465. db.session.query(Document)
  466. .where(Document.dataset_id == dataset_id, Document.tenant_id == current_user.current_tenant_id)
  467. .all()
  468. )
  469. documents_status = []
  470. for document in documents:
  471. completed_segments = (
  472. db.session.query(DocumentSegment)
  473. .where(
  474. DocumentSegment.completed_at.isnot(None),
  475. DocumentSegment.document_id == str(document.id),
  476. DocumentSegment.status != "re_segment",
  477. )
  478. .count()
  479. )
  480. total_segments = (
  481. db.session.query(DocumentSegment)
  482. .where(DocumentSegment.document_id == str(document.id), DocumentSegment.status != "re_segment")
  483. .count()
  484. )
  485. # Create a dictionary with document attributes and additional fields
  486. document_dict = {
  487. "id": document.id,
  488. "indexing_status": document.indexing_status,
  489. "processing_started_at": document.processing_started_at,
  490. "parsing_completed_at": document.parsing_completed_at,
  491. "cleaning_completed_at": document.cleaning_completed_at,
  492. "splitting_completed_at": document.splitting_completed_at,
  493. "completed_at": document.completed_at,
  494. "paused_at": document.paused_at,
  495. "error": document.error,
  496. "stopped_at": document.stopped_at,
  497. "completed_segments": completed_segments,
  498. "total_segments": total_segments,
  499. }
  500. documents_status.append(marshal(document_dict, document_status_fields))
  501. data = {"data": documents_status}
  502. return data
  503. class DatasetApiKeyApi(Resource):
  504. max_keys = 10
  505. token_prefix = "dataset-"
  506. resource_type = "dataset"
  507. @setup_required
  508. @login_required
  509. @account_initialization_required
  510. @marshal_with(api_key_list)
  511. def get(self):
  512. keys = (
  513. db.session.query(ApiToken)
  514. .where(ApiToken.type == self.resource_type, ApiToken.tenant_id == current_user.current_tenant_id)
  515. .all()
  516. )
  517. return {"items": keys}
  518. @setup_required
  519. @login_required
  520. @account_initialization_required
  521. @marshal_with(api_key_fields)
  522. def post(self):
  523. # The role of the current user in the ta table must be admin or owner
  524. if not current_user.is_admin_or_owner:
  525. raise Forbidden()
  526. current_key_count = (
  527. db.session.query(ApiToken)
  528. .where(ApiToken.type == self.resource_type, ApiToken.tenant_id == current_user.current_tenant_id)
  529. .count()
  530. )
  531. if current_key_count >= self.max_keys:
  532. flask_restful.abort(
  533. 400,
  534. message=f"Cannot create more than {self.max_keys} API keys for this resource type.",
  535. code="max_keys_exceeded",
  536. )
  537. key = ApiToken.generate_api_key(self.token_prefix, 24)
  538. api_token = ApiToken()
  539. api_token.tenant_id = current_user.current_tenant_id
  540. api_token.token = key
  541. api_token.type = self.resource_type
  542. db.session.add(api_token)
  543. db.session.commit()
  544. return api_token, 200
  545. class DatasetApiDeleteApi(Resource):
  546. resource_type = "dataset"
  547. @setup_required
  548. @login_required
  549. @account_initialization_required
  550. def delete(self, api_key_id):
  551. api_key_id = str(api_key_id)
  552. # The role of the current user in the ta table must be admin or owner
  553. if not current_user.is_admin_or_owner:
  554. raise Forbidden()
  555. key = (
  556. db.session.query(ApiToken)
  557. .where(
  558. ApiToken.tenant_id == current_user.current_tenant_id,
  559. ApiToken.type == self.resource_type,
  560. ApiToken.id == api_key_id,
  561. )
  562. .first()
  563. )
  564. if key is None:
  565. flask_restful.abort(404, message="API key not found")
  566. db.session.query(ApiToken).where(ApiToken.id == api_key_id).delete()
  567. db.session.commit()
  568. return {"result": "success"}, 204
  569. class DatasetApiBaseUrlApi(Resource):
  570. @setup_required
  571. @login_required
  572. @account_initialization_required
  573. def get(self):
  574. return {"api_base_url": (dify_config.SERVICE_API_URL or request.host_url.rstrip("/")) + "/v1"}
  575. class DatasetRetrievalSettingApi(Resource):
  576. @setup_required
  577. @login_required
  578. @account_initialization_required
  579. def get(self):
  580. vector_type = dify_config.VECTOR_STORE
  581. match vector_type:
  582. case (
  583. VectorType.RELYT
  584. | VectorType.TIDB_VECTOR
  585. | VectorType.CHROMA
  586. | VectorType.PGVECTO_RS
  587. | VectorType.BAIDU
  588. | VectorType.VIKINGDB
  589. | VectorType.UPSTASH
  590. ):
  591. return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]}
  592. case (
  593. VectorType.QDRANT
  594. | VectorType.WEAVIATE
  595. | VectorType.OPENSEARCH
  596. | VectorType.ANALYTICDB
  597. | VectorType.MYSCALE
  598. | VectorType.ORACLE
  599. | VectorType.ELASTICSEARCH
  600. | VectorType.ELASTICSEARCH_JA
  601. | VectorType.PGVECTOR
  602. | VectorType.VASTBASE
  603. | VectorType.TIDB_ON_QDRANT
  604. | VectorType.LINDORM
  605. | VectorType.COUCHBASE
  606. | VectorType.MILVUS
  607. | VectorType.OPENGAUSS
  608. | VectorType.OCEANBASE
  609. | VectorType.TABLESTORE
  610. | VectorType.HUAWEI_CLOUD
  611. | VectorType.TENCENT
  612. | VectorType.MATRIXONE
  613. ):
  614. return {
  615. "retrieval_method": [
  616. RetrievalMethod.SEMANTIC_SEARCH.value,
  617. RetrievalMethod.FULL_TEXT_SEARCH.value,
  618. RetrievalMethod.HYBRID_SEARCH.value,
  619. ]
  620. }
  621. case _:
  622. raise ValueError(f"Unsupported vector db type {vector_type}.")
  623. class DatasetRetrievalSettingMockApi(Resource):
  624. @setup_required
  625. @login_required
  626. @account_initialization_required
  627. def get(self, vector_type):
  628. match vector_type:
  629. case (
  630. VectorType.MILVUS
  631. | VectorType.RELYT
  632. | VectorType.TIDB_VECTOR
  633. | VectorType.CHROMA
  634. | VectorType.PGVECTO_RS
  635. | VectorType.BAIDU
  636. | VectorType.VIKINGDB
  637. | VectorType.UPSTASH
  638. ):
  639. return {"retrieval_method": [RetrievalMethod.SEMANTIC_SEARCH.value]}
  640. case (
  641. VectorType.QDRANT
  642. | VectorType.WEAVIATE
  643. | VectorType.OPENSEARCH
  644. | VectorType.ANALYTICDB
  645. | VectorType.MYSCALE
  646. | VectorType.ORACLE
  647. | VectorType.ELASTICSEARCH
  648. | VectorType.ELASTICSEARCH_JA
  649. | VectorType.COUCHBASE
  650. | VectorType.PGVECTOR
  651. | VectorType.VASTBASE
  652. | VectorType.LINDORM
  653. | VectorType.OPENGAUSS
  654. | VectorType.OCEANBASE
  655. | VectorType.TABLESTORE
  656. | VectorType.TENCENT
  657. | VectorType.HUAWEI_CLOUD
  658. | VectorType.MATRIXONE
  659. ):
  660. return {
  661. "retrieval_method": [
  662. RetrievalMethod.SEMANTIC_SEARCH.value,
  663. RetrievalMethod.FULL_TEXT_SEARCH.value,
  664. RetrievalMethod.HYBRID_SEARCH.value,
  665. ]
  666. }
  667. case _:
  668. raise ValueError(f"Unsupported vector db type {vector_type}.")
  669. class DatasetErrorDocs(Resource):
  670. @setup_required
  671. @login_required
  672. @account_initialization_required
  673. def get(self, dataset_id):
  674. dataset_id_str = str(dataset_id)
  675. dataset = DatasetService.get_dataset(dataset_id_str)
  676. if dataset is None:
  677. raise NotFound("Dataset not found.")
  678. results = DocumentService.get_error_documents_by_dataset_id(dataset_id_str)
  679. return {"data": [marshal(item, document_status_fields) for item in results], "total": len(results)}, 200
  680. class DatasetPermissionUserListApi(Resource):
  681. @setup_required
  682. @login_required
  683. @account_initialization_required
  684. def get(self, dataset_id):
  685. dataset_id_str = str(dataset_id)
  686. dataset = DatasetService.get_dataset(dataset_id_str)
  687. if dataset is None:
  688. raise NotFound("Dataset not found.")
  689. try:
  690. DatasetService.check_dataset_permission(dataset, current_user)
  691. except services.errors.account.NoPermissionError as e:
  692. raise Forbidden(str(e))
  693. partial_members_list = DatasetPermissionService.get_dataset_partial_member_list(dataset_id_str)
  694. return {
  695. "data": partial_members_list,
  696. }, 200
  697. class DatasetAutoDisableLogApi(Resource):
  698. @setup_required
  699. @login_required
  700. @account_initialization_required
  701. def get(self, dataset_id):
  702. dataset_id_str = str(dataset_id)
  703. dataset = DatasetService.get_dataset(dataset_id_str)
  704. if dataset is None:
  705. raise NotFound("Dataset not found.")
  706. return DatasetService.get_dataset_auto_disable_logs(dataset_id_str), 200
  707. api.add_resource(DatasetListApi, "/datasets")
  708. api.add_resource(DatasetApi, "/datasets/<uuid:dataset_id>")
  709. api.add_resource(DatasetUseCheckApi, "/datasets/<uuid:dataset_id>/use-check")
  710. api.add_resource(DatasetQueryApi, "/datasets/<uuid:dataset_id>/queries")
  711. api.add_resource(DatasetErrorDocs, "/datasets/<uuid:dataset_id>/error-docs")
  712. api.add_resource(DatasetIndexingEstimateApi, "/datasets/indexing-estimate")
  713. api.add_resource(DatasetRelatedAppListApi, "/datasets/<uuid:dataset_id>/related-apps")
  714. api.add_resource(DatasetIndexingStatusApi, "/datasets/<uuid:dataset_id>/indexing-status")
  715. api.add_resource(DatasetApiKeyApi, "/datasets/api-keys")
  716. api.add_resource(DatasetApiDeleteApi, "/datasets/api-keys/<uuid:api_key_id>")
  717. api.add_resource(DatasetApiBaseUrlApi, "/datasets/api-base-info")
  718. api.add_resource(DatasetRetrievalSettingApi, "/datasets/retrieval-setting")
  719. api.add_resource(DatasetRetrievalSettingMockApi, "/datasets/retrieval-setting/<string:vector_type>")
  720. api.add_resource(DatasetPermissionUserListApi, "/datasets/<uuid:dataset_id>/permission-part-users")
  721. api.add_resource(DatasetAutoDisableLogApi, "/datasets/<uuid:dataset_id>/auto-disable-logs")