Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

dataset_service.py 96KB

2 лет назад
10 месяцев назад
10 месяцев назад
1 год назад
2 лет назад
10 месяцев назад
10 месяцев назад
2 лет назад
10 месяцев назад
10 месяцев назад
2 лет назад
10 месяцев назад
10 месяцев назад
2 лет назад
10 месяцев назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
10 месяцев назад
2 лет назад
2 лет назад
10 месяцев назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
10 месяцев назад
2 лет назад
2 лет назад
2 лет назад
10 месяцев назад
10 месяцев назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
10 месяцев назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
10 месяцев назад
10 месяцев назад
2 лет назад
2 лет назад
2 лет назад
10 месяцев назад
2 лет назад
10 месяцев назад
2 лет назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
2 лет назад
10 месяцев назад
2 лет назад
10 месяцев назад
2 лет назад
10 месяцев назад
2 лет назад
10 месяцев назад
2 лет назад
2 лет назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
2 лет назад
2 лет назад
2 лет назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
2 лет назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
2 лет назад
10 месяцев назад
2 лет назад
10 месяцев назад
2 лет назад
2 лет назад
2 лет назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
2 лет назад
10 месяцев назад
2 лет назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
2 лет назад
10 месяцев назад
2 лет назад
10 месяцев назад
2 лет назад
10 месяцев назад
2 лет назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
2 лет назад
10 месяцев назад
1 год назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
10 месяцев назад
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115
  1. import datetime
  2. import json
  3. import logging
  4. import random
  5. import time
  6. import uuid
  7. from typing import Any, Optional
  8. from flask_login import current_user # type: ignore
  9. from sqlalchemy import func
  10. from werkzeug.exceptions import NotFound
  11. from configs import dify_config
  12. from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
  13. from core.model_manager import ModelManager
  14. from core.model_runtime.entities.model_entities import ModelType
  15. from core.rag.index_processor.constant.index_type import IndexType
  16. from core.rag.retrieval.retrieval_methods import RetrievalMethod
  17. from events.dataset_event import dataset_was_deleted
  18. from events.document_event import document_was_deleted
  19. from extensions.ext_database import db
  20. from extensions.ext_redis import redis_client
  21. from libs import helper
  22. from models.account import Account, TenantAccountRole
  23. from models.dataset import (
  24. AppDatasetJoin,
  25. ChildChunk,
  26. Dataset,
  27. DatasetAutoDisableLog,
  28. DatasetCollectionBinding,
  29. DatasetPermission,
  30. DatasetPermissionEnum,
  31. DatasetProcessRule,
  32. DatasetQuery,
  33. Document,
  34. DocumentSegment,
  35. ExternalKnowledgeBindings,
  36. )
  37. from models.model import UploadFile
  38. from models.source import DataSourceOauthBinding
  39. from services.entities.knowledge_entities.knowledge_entities import (
  40. ChildChunkUpdateArgs,
  41. KnowledgeConfig,
  42. RerankingModel,
  43. RetrievalModel,
  44. SegmentUpdateArgs,
  45. )
  46. from services.errors.account import InvalidActionError, NoPermissionError
  47. from services.errors.chunk import ChildChunkDeleteIndexError, ChildChunkIndexingError
  48. from services.errors.dataset import DatasetNameDuplicateError
  49. from services.errors.document import DocumentIndexingError
  50. from services.errors.file import FileNotExistsError
  51. from services.external_knowledge_service import ExternalDatasetService
  52. from services.feature_service import FeatureModel, FeatureService
  53. from services.tag_service import TagService
  54. from services.vector_service import VectorService
  55. from tasks.batch_clean_document_task import batch_clean_document_task
  56. from tasks.clean_notion_document_task import clean_notion_document_task
  57. from tasks.deal_dataset_vector_index_task import deal_dataset_vector_index_task
  58. from tasks.delete_segment_from_index_task import delete_segment_from_index_task
  59. from tasks.disable_segment_from_index_task import disable_segment_from_index_task
  60. from tasks.disable_segments_from_index_task import disable_segments_from_index_task
  61. from tasks.document_indexing_task import document_indexing_task
  62. from tasks.document_indexing_update_task import document_indexing_update_task
  63. from tasks.duplicate_document_indexing_task import duplicate_document_indexing_task
  64. from tasks.enable_segments_to_index_task import enable_segments_to_index_task
  65. from tasks.recover_document_indexing_task import recover_document_indexing_task
  66. from tasks.retry_document_indexing_task import retry_document_indexing_task
  67. from tasks.sync_website_document_indexing_task import sync_website_document_indexing_task
  68. class DatasetService:
  69. @staticmethod
  70. def get_datasets(page, per_page, tenant_id=None, user=None, search=None, tag_ids=None):
  71. query = Dataset.query.filter(Dataset.tenant_id == tenant_id).order_by(Dataset.created_at.desc())
  72. if user:
  73. # get permitted dataset ids
  74. dataset_permission = DatasetPermission.query.filter_by(account_id=user.id, tenant_id=tenant_id).all()
  75. permitted_dataset_ids = {dp.dataset_id for dp in dataset_permission} if dataset_permission else None
  76. if user.current_role == TenantAccountRole.DATASET_OPERATOR:
  77. # only show datasets that the user has permission to access
  78. if permitted_dataset_ids:
  79. query = query.filter(Dataset.id.in_(permitted_dataset_ids))
  80. else:
  81. return [], 0
  82. else:
  83. if user.current_role not in (TenantAccountRole.OWNER, TenantAccountRole.ADMIN):
  84. # show all datasets that the user has permission to access
  85. if permitted_dataset_ids:
  86. query = query.filter(
  87. db.or_(
  88. Dataset.permission == DatasetPermissionEnum.ALL_TEAM,
  89. db.and_(
  90. Dataset.permission == DatasetPermissionEnum.ONLY_ME, Dataset.created_by == user.id
  91. ),
  92. db.and_(
  93. Dataset.permission == DatasetPermissionEnum.PARTIAL_TEAM,
  94. Dataset.id.in_(permitted_dataset_ids),
  95. ),
  96. )
  97. )
  98. else:
  99. query = query.filter(
  100. db.or_(
  101. Dataset.permission == DatasetPermissionEnum.ALL_TEAM,
  102. db.and_(
  103. Dataset.permission == DatasetPermissionEnum.ONLY_ME, Dataset.created_by == user.id
  104. ),
  105. )
  106. )
  107. else:
  108. # if no user, only show datasets that are shared with all team members
  109. query = query.filter(Dataset.permission == DatasetPermissionEnum.ALL_TEAM)
  110. if search:
  111. query = query.filter(Dataset.name.ilike(f"%{search}%"))
  112. if tag_ids:
  113. target_ids = TagService.get_target_ids_by_tag_ids("knowledge", tenant_id, tag_ids)
  114. if target_ids:
  115. query = query.filter(Dataset.id.in_(target_ids))
  116. else:
  117. return [], 0
  118. datasets = query.paginate(page=page, per_page=per_page, max_per_page=100, error_out=False)
  119. return datasets.items, datasets.total
  120. @staticmethod
  121. def get_process_rules(dataset_id):
  122. # get the latest process rule
  123. dataset_process_rule = (
  124. db.session.query(DatasetProcessRule)
  125. .filter(DatasetProcessRule.dataset_id == dataset_id)
  126. .order_by(DatasetProcessRule.created_at.desc())
  127. .limit(1)
  128. .one_or_none()
  129. )
  130. if dataset_process_rule:
  131. mode = dataset_process_rule.mode
  132. rules = dataset_process_rule.rules_dict
  133. else:
  134. mode = DocumentService.DEFAULT_RULES["mode"]
  135. rules = DocumentService.DEFAULT_RULES["rules"]
  136. return {"mode": mode, "rules": rules}
  137. @staticmethod
  138. def get_datasets_by_ids(ids, tenant_id):
  139. datasets = Dataset.query.filter(Dataset.id.in_(ids), Dataset.tenant_id == tenant_id).paginate(
  140. page=1, per_page=len(ids), max_per_page=len(ids), error_out=False
  141. )
  142. return datasets.items, datasets.total
  143. @staticmethod
  144. def create_empty_dataset(
  145. tenant_id: str,
  146. name: str,
  147. description: Optional[str],
  148. indexing_technique: Optional[str],
  149. account: Account,
  150. permission: Optional[str] = None,
  151. provider: str = "vendor",
  152. external_knowledge_api_id: Optional[str] = None,
  153. external_knowledge_id: Optional[str] = None,
  154. ):
  155. # check if dataset name already exists
  156. if Dataset.query.filter_by(name=name, tenant_id=tenant_id).first():
  157. raise DatasetNameDuplicateError(f"Dataset with name {name} already exists.")
  158. embedding_model = None
  159. if indexing_technique == "high_quality":
  160. model_manager = ModelManager()
  161. embedding_model = model_manager.get_default_model_instance(
  162. tenant_id=tenant_id, model_type=ModelType.TEXT_EMBEDDING
  163. )
  164. dataset = Dataset(name=name, indexing_technique=indexing_technique)
  165. # dataset = Dataset(name=name, provider=provider, config=config)
  166. dataset.description = description
  167. dataset.created_by = account.id
  168. dataset.updated_by = account.id
  169. dataset.tenant_id = tenant_id
  170. dataset.embedding_model_provider = embedding_model.provider if embedding_model else None
  171. dataset.embedding_model = embedding_model.model if embedding_model else None
  172. dataset.permission = permission or DatasetPermissionEnum.ONLY_ME
  173. dataset.provider = provider
  174. db.session.add(dataset)
  175. db.session.flush()
  176. if provider == "external" and external_knowledge_api_id:
  177. external_knowledge_api = ExternalDatasetService.get_external_knowledge_api(external_knowledge_api_id)
  178. if not external_knowledge_api:
  179. raise ValueError("External API template not found.")
  180. external_knowledge_binding = ExternalKnowledgeBindings(
  181. tenant_id=tenant_id,
  182. dataset_id=dataset.id,
  183. external_knowledge_api_id=external_knowledge_api_id,
  184. external_knowledge_id=external_knowledge_id,
  185. created_by=account.id,
  186. )
  187. db.session.add(external_knowledge_binding)
  188. db.session.commit()
  189. return dataset
  190. @staticmethod
  191. def get_dataset(dataset_id) -> Optional[Dataset]:
  192. dataset: Optional[Dataset] = Dataset.query.filter_by(id=dataset_id).first()
  193. return dataset
  194. @staticmethod
  195. def check_dataset_model_setting(dataset):
  196. if dataset.indexing_technique == "high_quality":
  197. try:
  198. model_manager = ModelManager()
  199. model_manager.get_model_instance(
  200. tenant_id=dataset.tenant_id,
  201. provider=dataset.embedding_model_provider,
  202. model_type=ModelType.TEXT_EMBEDDING,
  203. model=dataset.embedding_model,
  204. )
  205. except LLMBadRequestError:
  206. raise ValueError(
  207. "No Embedding Model available. Please configure a valid provider "
  208. "in the Settings -> Model Provider."
  209. )
  210. except ProviderTokenNotInitError as ex:
  211. raise ValueError(f"The dataset in unavailable, due to: {ex.description}")
  212. @staticmethod
  213. def check_embedding_model_setting(tenant_id: str, embedding_model_provider: str, embedding_model: str):
  214. try:
  215. model_manager = ModelManager()
  216. model_manager.get_model_instance(
  217. tenant_id=tenant_id,
  218. provider=embedding_model_provider,
  219. model_type=ModelType.TEXT_EMBEDDING,
  220. model=embedding_model,
  221. )
  222. except LLMBadRequestError:
  223. raise ValueError(
  224. "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider."
  225. )
  226. except ProviderTokenNotInitError as ex:
  227. raise ValueError(f"The dataset in unavailable, due to: {ex.description}")
  228. @staticmethod
  229. def update_dataset(dataset_id, data, user):
  230. dataset = DatasetService.get_dataset(dataset_id)
  231. if not dataset:
  232. raise ValueError("Dataset not found")
  233. DatasetService.check_dataset_permission(dataset, user)
  234. if dataset.provider == "external":
  235. external_retrieval_model = data.get("external_retrieval_model", None)
  236. if external_retrieval_model:
  237. dataset.retrieval_model = external_retrieval_model
  238. dataset.name = data.get("name", dataset.name)
  239. dataset.description = data.get("description", "")
  240. permission = data.get("permission")
  241. if permission:
  242. dataset.permission = permission
  243. external_knowledge_id = data.get("external_knowledge_id", None)
  244. db.session.add(dataset)
  245. if not external_knowledge_id:
  246. raise ValueError("External knowledge id is required.")
  247. external_knowledge_api_id = data.get("external_knowledge_api_id", None)
  248. if not external_knowledge_api_id:
  249. raise ValueError("External knowledge api id is required.")
  250. external_knowledge_binding = ExternalKnowledgeBindings.query.filter_by(dataset_id=dataset_id).first()
  251. if (
  252. external_knowledge_binding.external_knowledge_id != external_knowledge_id
  253. or external_knowledge_binding.external_knowledge_api_id != external_knowledge_api_id
  254. ):
  255. external_knowledge_binding.external_knowledge_id = external_knowledge_id
  256. external_knowledge_binding.external_knowledge_api_id = external_knowledge_api_id
  257. db.session.add(external_knowledge_binding)
  258. db.session.commit()
  259. else:
  260. data.pop("partial_member_list", None)
  261. data.pop("external_knowledge_api_id", None)
  262. data.pop("external_knowledge_id", None)
  263. data.pop("external_retrieval_model", None)
  264. filtered_data = {k: v for k, v in data.items() if v is not None or k == "description"}
  265. action = None
  266. if dataset.indexing_technique != data["indexing_technique"]:
  267. # if update indexing_technique
  268. if data["indexing_technique"] == "economy":
  269. action = "remove"
  270. filtered_data["embedding_model"] = None
  271. filtered_data["embedding_model_provider"] = None
  272. filtered_data["collection_binding_id"] = None
  273. elif data["indexing_technique"] == "high_quality":
  274. action = "add"
  275. # get embedding model setting
  276. try:
  277. model_manager = ModelManager()
  278. embedding_model = model_manager.get_model_instance(
  279. tenant_id=current_user.current_tenant_id,
  280. provider=data["embedding_model_provider"],
  281. model_type=ModelType.TEXT_EMBEDDING,
  282. model=data["embedding_model"],
  283. )
  284. filtered_data["embedding_model"] = embedding_model.model
  285. filtered_data["embedding_model_provider"] = embedding_model.provider
  286. dataset_collection_binding = DatasetCollectionBindingService.get_dataset_collection_binding(
  287. embedding_model.provider, embedding_model.model
  288. )
  289. filtered_data["collection_binding_id"] = dataset_collection_binding.id
  290. except LLMBadRequestError:
  291. raise ValueError(
  292. "No Embedding Model available. Please configure a valid provider "
  293. "in the Settings -> Model Provider."
  294. )
  295. except ProviderTokenNotInitError as ex:
  296. raise ValueError(ex.description)
  297. else:
  298. if (
  299. data["embedding_model_provider"] != dataset.embedding_model_provider
  300. or data["embedding_model"] != dataset.embedding_model
  301. ):
  302. action = "update"
  303. try:
  304. model_manager = ModelManager()
  305. embedding_model = model_manager.get_model_instance(
  306. tenant_id=current_user.current_tenant_id,
  307. provider=data["embedding_model_provider"],
  308. model_type=ModelType.TEXT_EMBEDDING,
  309. model=data["embedding_model"],
  310. )
  311. filtered_data["embedding_model"] = embedding_model.model
  312. filtered_data["embedding_model_provider"] = embedding_model.provider
  313. dataset_collection_binding = DatasetCollectionBindingService.get_dataset_collection_binding(
  314. embedding_model.provider, embedding_model.model
  315. )
  316. filtered_data["collection_binding_id"] = dataset_collection_binding.id
  317. except LLMBadRequestError:
  318. raise ValueError(
  319. "No Embedding Model available. Please configure a valid provider "
  320. "in the Settings -> Model Provider."
  321. )
  322. except ProviderTokenNotInitError as ex:
  323. raise ValueError(ex.description)
  324. filtered_data["updated_by"] = user.id
  325. filtered_data["updated_at"] = datetime.datetime.now()
  326. # update Retrieval model
  327. filtered_data["retrieval_model"] = data["retrieval_model"]
  328. dataset.query.filter_by(id=dataset_id).update(filtered_data)
  329. db.session.commit()
  330. if action:
  331. deal_dataset_vector_index_task.delay(dataset_id, action)
  332. return dataset
  333. @staticmethod
  334. def delete_dataset(dataset_id, user):
  335. dataset = DatasetService.get_dataset(dataset_id)
  336. if dataset is None:
  337. return False
  338. DatasetService.check_dataset_permission(dataset, user)
  339. dataset_was_deleted.send(dataset)
  340. db.session.delete(dataset)
  341. db.session.commit()
  342. return True
  343. @staticmethod
  344. def dataset_use_check(dataset_id) -> bool:
  345. count = AppDatasetJoin.query.filter_by(dataset_id=dataset_id).count()
  346. if count > 0:
  347. return True
  348. return False
  349. @staticmethod
  350. def check_dataset_permission(dataset, user):
  351. if dataset.tenant_id != user.current_tenant_id:
  352. logging.debug(f"User {user.id} does not have permission to access dataset {dataset.id}")
  353. raise NoPermissionError("You do not have permission to access this dataset.")
  354. if user.current_role not in (TenantAccountRole.OWNER, TenantAccountRole.ADMIN):
  355. if dataset.permission == DatasetPermissionEnum.ONLY_ME and dataset.created_by != user.id:
  356. logging.debug(f"User {user.id} does not have permission to access dataset {dataset.id}")
  357. raise NoPermissionError("You do not have permission to access this dataset.")
  358. if dataset.permission == "partial_members":
  359. user_permission = DatasetPermission.query.filter_by(dataset_id=dataset.id, account_id=user.id).first()
  360. if (
  361. not user_permission
  362. and dataset.tenant_id != user.current_tenant_id
  363. and dataset.created_by != user.id
  364. ):
  365. logging.debug(f"User {user.id} does not have permission to access dataset {dataset.id}")
  366. raise NoPermissionError("You do not have permission to access this dataset.")
  367. @staticmethod
  368. def check_dataset_operator_permission(user: Optional[Account] = None, dataset: Optional[Dataset] = None):
  369. if not dataset:
  370. raise ValueError("Dataset not found")
  371. if not user:
  372. raise ValueError("User not found")
  373. if user.current_role not in (TenantAccountRole.OWNER, TenantAccountRole.ADMIN):
  374. if dataset.permission == DatasetPermissionEnum.ONLY_ME:
  375. if dataset.created_by != user.id:
  376. raise NoPermissionError("You do not have permission to access this dataset.")
  377. elif dataset.permission == DatasetPermissionEnum.PARTIAL_TEAM:
  378. if not any(
  379. dp.dataset_id == dataset.id for dp in DatasetPermission.query.filter_by(account_id=user.id).all()
  380. ):
  381. raise NoPermissionError("You do not have permission to access this dataset.")
  382. @staticmethod
  383. def get_dataset_queries(dataset_id: str, page: int, per_page: int):
  384. dataset_queries = (
  385. DatasetQuery.query.filter_by(dataset_id=dataset_id)
  386. .order_by(db.desc(DatasetQuery.created_at))
  387. .paginate(page=page, per_page=per_page, max_per_page=100, error_out=False)
  388. )
  389. return dataset_queries.items, dataset_queries.total
  390. @staticmethod
  391. def get_related_apps(dataset_id: str):
  392. return (
  393. AppDatasetJoin.query.filter(AppDatasetJoin.dataset_id == dataset_id)
  394. .order_by(db.desc(AppDatasetJoin.created_at))
  395. .all()
  396. )
  397. @staticmethod
  398. def get_dataset_auto_disable_logs(dataset_id: str) -> dict:
  399. features = FeatureService.get_features(current_user.current_tenant_id)
  400. if not features.billing.enabled or features.billing.subscription.plan == "sandbox":
  401. return {
  402. "document_ids": [],
  403. "count": 0,
  404. }
  405. # get recent 30 days auto disable logs
  406. start_date = datetime.datetime.now() - datetime.timedelta(days=30)
  407. dataset_auto_disable_logs = DatasetAutoDisableLog.query.filter(
  408. DatasetAutoDisableLog.dataset_id == dataset_id,
  409. DatasetAutoDisableLog.created_at >= start_date,
  410. ).all()
  411. if dataset_auto_disable_logs:
  412. return {
  413. "document_ids": [log.document_id for log in dataset_auto_disable_logs],
  414. "count": len(dataset_auto_disable_logs),
  415. }
  416. return {
  417. "document_ids": [],
  418. "count": 0,
  419. }
  420. class DocumentService:
  421. DEFAULT_RULES: dict[str, Any] = {
  422. "mode": "custom",
  423. "rules": {
  424. "pre_processing_rules": [
  425. {"id": "remove_extra_spaces", "enabled": True},
  426. {"id": "remove_urls_emails", "enabled": False},
  427. ],
  428. "segmentation": {"delimiter": "\n", "max_tokens": 500, "chunk_overlap": 50},
  429. },
  430. "limits": {
  431. "indexing_max_segmentation_tokens_length": dify_config.INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH,
  432. },
  433. }
  434. DOCUMENT_METADATA_SCHEMA: dict[str, Any] = {
  435. "book": {
  436. "title": str,
  437. "language": str,
  438. "author": str,
  439. "publisher": str,
  440. "publication_date": str,
  441. "isbn": str,
  442. "category": str,
  443. },
  444. "web_page": {
  445. "title": str,
  446. "url": str,
  447. "language": str,
  448. "publish_date": str,
  449. "author/publisher": str,
  450. "topic/keywords": str,
  451. "description": str,
  452. },
  453. "paper": {
  454. "title": str,
  455. "language": str,
  456. "author": str,
  457. "publish_date": str,
  458. "journal/conference_name": str,
  459. "volume/issue/page_numbers": str,
  460. "doi": str,
  461. "topic/keywords": str,
  462. "abstract": str,
  463. },
  464. "social_media_post": {
  465. "platform": str,
  466. "author/username": str,
  467. "publish_date": str,
  468. "post_url": str,
  469. "topic/tags": str,
  470. },
  471. "wikipedia_entry": {
  472. "title": str,
  473. "language": str,
  474. "web_page_url": str,
  475. "last_edit_date": str,
  476. "editor/contributor": str,
  477. "summary/introduction": str,
  478. },
  479. "personal_document": {
  480. "title": str,
  481. "author": str,
  482. "creation_date": str,
  483. "last_modified_date": str,
  484. "document_type": str,
  485. "tags/category": str,
  486. },
  487. "business_document": {
  488. "title": str,
  489. "author": str,
  490. "creation_date": str,
  491. "last_modified_date": str,
  492. "document_type": str,
  493. "department/team": str,
  494. },
  495. "im_chat_log": {
  496. "chat_platform": str,
  497. "chat_participants/group_name": str,
  498. "start_date": str,
  499. "end_date": str,
  500. "summary": str,
  501. },
  502. "synced_from_notion": {
  503. "title": str,
  504. "language": str,
  505. "author/creator": str,
  506. "creation_date": str,
  507. "last_modified_date": str,
  508. "notion_page_link": str,
  509. "category/tags": str,
  510. "description": str,
  511. },
  512. "synced_from_github": {
  513. "repository_name": str,
  514. "repository_description": str,
  515. "repository_owner/organization": str,
  516. "code_filename": str,
  517. "code_file_path": str,
  518. "programming_language": str,
  519. "github_link": str,
  520. "open_source_license": str,
  521. "commit_date": str,
  522. "commit_author": str,
  523. },
  524. "others": dict,
  525. }
  526. @staticmethod
  527. def get_document(dataset_id: str, document_id: Optional[str] = None) -> Optional[Document]:
  528. if document_id:
  529. document = (
  530. db.session.query(Document).filter(Document.id == document_id, Document.dataset_id == dataset_id).first()
  531. )
  532. return document
  533. else:
  534. return None
  535. @staticmethod
  536. def get_document_by_id(document_id: str) -> Optional[Document]:
  537. document = db.session.query(Document).filter(Document.id == document_id).first()
  538. return document
  539. @staticmethod
  540. def get_document_by_dataset_id(dataset_id: str) -> list[Document]:
  541. documents = db.session.query(Document).filter(Document.dataset_id == dataset_id, Document.enabled == True).all()
  542. return documents
  543. @staticmethod
  544. def get_error_documents_by_dataset_id(dataset_id: str) -> list[Document]:
  545. documents = (
  546. db.session.query(Document)
  547. .filter(Document.dataset_id == dataset_id, Document.indexing_status.in_(["error", "paused"]))
  548. .all()
  549. )
  550. return documents
  551. @staticmethod
  552. def get_batch_documents(dataset_id: str, batch: str) -> list[Document]:
  553. documents = (
  554. db.session.query(Document)
  555. .filter(
  556. Document.batch == batch,
  557. Document.dataset_id == dataset_id,
  558. Document.tenant_id == current_user.current_tenant_id,
  559. )
  560. .all()
  561. )
  562. return documents
  563. @staticmethod
  564. def get_document_file_detail(file_id: str):
  565. file_detail = db.session.query(UploadFile).filter(UploadFile.id == file_id).one_or_none()
  566. return file_detail
  567. @staticmethod
  568. def check_archived(document):
  569. if document.archived:
  570. return True
  571. else:
  572. return False
  573. @staticmethod
  574. def delete_document(document):
  575. # trigger document_was_deleted signal
  576. file_id = None
  577. if document.data_source_type == "upload_file":
  578. if document.data_source_info:
  579. data_source_info = document.data_source_info_dict
  580. if data_source_info and "upload_file_id" in data_source_info:
  581. file_id = data_source_info["upload_file_id"]
  582. document_was_deleted.send(
  583. document.id, dataset_id=document.dataset_id, doc_form=document.doc_form, file_id=file_id
  584. )
  585. db.session.delete(document)
  586. db.session.commit()
  587. @staticmethod
  588. def delete_documents(dataset: Dataset, document_ids: list[str]):
  589. documents = db.session.query(Document).filter(Document.id.in_(document_ids)).all()
  590. file_ids = [
  591. document.data_source_info_dict["upload_file_id"]
  592. for document in documents
  593. if document.data_source_type == "upload_file"
  594. ]
  595. batch_clean_document_task.delay(document_ids, dataset.id, dataset.doc_form, file_ids)
  596. for document in documents:
  597. db.session.delete(document)
  598. db.session.commit()
  599. @staticmethod
  600. def rename_document(dataset_id: str, document_id: str, name: str) -> Document:
  601. dataset = DatasetService.get_dataset(dataset_id)
  602. if not dataset:
  603. raise ValueError("Dataset not found.")
  604. document = DocumentService.get_document(dataset_id, document_id)
  605. if not document:
  606. raise ValueError("Document not found.")
  607. if document.tenant_id != current_user.current_tenant_id:
  608. raise ValueError("No permission.")
  609. document.name = name
  610. db.session.add(document)
  611. db.session.commit()
  612. return document
  613. @staticmethod
  614. def pause_document(document):
  615. if document.indexing_status not in {"waiting", "parsing", "cleaning", "splitting", "indexing"}:
  616. raise DocumentIndexingError()
  617. # update document to be paused
  618. document.is_paused = True
  619. document.paused_by = current_user.id
  620. document.paused_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
  621. db.session.add(document)
  622. db.session.commit()
  623. # set document paused flag
  624. indexing_cache_key = "document_{}_is_paused".format(document.id)
  625. redis_client.setnx(indexing_cache_key, "True")
  626. @staticmethod
  627. def recover_document(document):
  628. if not document.is_paused:
  629. raise DocumentIndexingError()
  630. # update document to be recover
  631. document.is_paused = False
  632. document.paused_by = None
  633. document.paused_at = None
  634. db.session.add(document)
  635. db.session.commit()
  636. # delete paused flag
  637. indexing_cache_key = "document_{}_is_paused".format(document.id)
  638. redis_client.delete(indexing_cache_key)
  639. # trigger async task
  640. recover_document_indexing_task.delay(document.dataset_id, document.id)
  641. @staticmethod
  642. def retry_document(dataset_id: str, documents: list[Document]):
  643. for document in documents:
  644. # add retry flag
  645. retry_indexing_cache_key = "document_{}_is_retried".format(document.id)
  646. cache_result = redis_client.get(retry_indexing_cache_key)
  647. if cache_result is not None:
  648. raise ValueError("Document is being retried, please try again later")
  649. # retry document indexing
  650. document.indexing_status = "waiting"
  651. db.session.add(document)
  652. db.session.commit()
  653. redis_client.setex(retry_indexing_cache_key, 600, 1)
  654. # trigger async task
  655. document_ids = [document.id for document in documents]
  656. retry_document_indexing_task.delay(dataset_id, document_ids)
  657. @staticmethod
  658. def sync_website_document(dataset_id: str, document: Document):
  659. # add sync flag
  660. sync_indexing_cache_key = "document_{}_is_sync".format(document.id)
  661. cache_result = redis_client.get(sync_indexing_cache_key)
  662. if cache_result is not None:
  663. raise ValueError("Document is being synced, please try again later")
  664. # sync document indexing
  665. document.indexing_status = "waiting"
  666. data_source_info = document.data_source_info_dict
  667. data_source_info["mode"] = "scrape"
  668. document.data_source_info = json.dumps(data_source_info, ensure_ascii=False)
  669. db.session.add(document)
  670. db.session.commit()
  671. redis_client.setex(sync_indexing_cache_key, 600, 1)
  672. sync_website_document_indexing_task.delay(dataset_id, document.id)
  673. @staticmethod
  674. def get_documents_position(dataset_id):
  675. document = Document.query.filter_by(dataset_id=dataset_id).order_by(Document.position.desc()).first()
  676. if document:
  677. return document.position + 1
  678. else:
  679. return 1
  680. @staticmethod
  681. def save_document_with_dataset_id(
  682. dataset: Dataset,
  683. knowledge_config: KnowledgeConfig,
  684. account: Account | Any,
  685. dataset_process_rule: Optional[DatasetProcessRule] = None,
  686. created_from: str = "web",
  687. ):
  688. # check document limit
  689. features = FeatureService.get_features(current_user.current_tenant_id)
  690. if features.billing.enabled:
  691. if not knowledge_config.original_document_id:
  692. count = 0
  693. if knowledge_config.data_source:
  694. if knowledge_config.data_source.info_list.data_source_type == "upload_file":
  695. upload_file_list = knowledge_config.data_source.info_list.file_info_list.file_ids # type: ignore
  696. count = len(upload_file_list)
  697. elif knowledge_config.data_source.info_list.data_source_type == "notion_import":
  698. notion_info_list = knowledge_config.data_source.info_list.notion_info_list
  699. for notion_info in notion_info_list: # type: ignore
  700. count = count + len(notion_info.pages)
  701. elif knowledge_config.data_source.info_list.data_source_type == "website_crawl":
  702. website_info = knowledge_config.data_source.info_list.website_info_list
  703. count = len(website_info.urls) # type: ignore
  704. batch_upload_limit = int(dify_config.BATCH_UPLOAD_LIMIT)
  705. if count > batch_upload_limit:
  706. raise ValueError(f"You have reached the batch upload limit of {batch_upload_limit}.")
  707. DocumentService.check_documents_upload_quota(count, features)
  708. # if dataset is empty, update dataset data_source_type
  709. if not dataset.data_source_type:
  710. dataset.data_source_type = knowledge_config.data_source.info_list.data_source_type # type: ignore
  711. if not dataset.indexing_technique:
  712. if knowledge_config.indexing_technique not in Dataset.INDEXING_TECHNIQUE_LIST:
  713. raise ValueError("Indexing technique is invalid")
  714. dataset.indexing_technique = knowledge_config.indexing_technique
  715. if knowledge_config.indexing_technique == "high_quality":
  716. model_manager = ModelManager()
  717. embedding_model = model_manager.get_default_model_instance(
  718. tenant_id=current_user.current_tenant_id, model_type=ModelType.TEXT_EMBEDDING
  719. )
  720. dataset.embedding_model = embedding_model.model
  721. dataset.embedding_model_provider = embedding_model.provider
  722. dataset_collection_binding = DatasetCollectionBindingService.get_dataset_collection_binding(
  723. embedding_model.provider, embedding_model.model
  724. )
  725. dataset.collection_binding_id = dataset_collection_binding.id
  726. if not dataset.retrieval_model:
  727. default_retrieval_model = {
  728. "search_method": RetrievalMethod.SEMANTIC_SEARCH.value,
  729. "reranking_enable": False,
  730. "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""},
  731. "top_k": 2,
  732. "score_threshold_enabled": False,
  733. }
  734. dataset.retrieval_model = knowledge_config.retrieval_model.model_dump() or default_retrieval_model # type: ignore
  735. documents = []
  736. if knowledge_config.original_document_id:
  737. document = DocumentService.update_document_with_dataset_id(dataset, knowledge_config, account)
  738. documents.append(document)
  739. batch = document.batch
  740. else:
  741. batch = time.strftime("%Y%m%d%H%M%S") + str(random.randint(100000, 999999))
  742. # save process rule
  743. if not dataset_process_rule:
  744. process_rule = knowledge_config.process_rule
  745. if process_rule:
  746. if process_rule.mode in ("custom", "hierarchical"):
  747. dataset_process_rule = DatasetProcessRule(
  748. dataset_id=dataset.id,
  749. mode=process_rule.mode,
  750. rules=process_rule.rules.model_dump_json() if process_rule.rules else None,
  751. created_by=account.id,
  752. )
  753. elif process_rule.mode == "automatic":
  754. dataset_process_rule = DatasetProcessRule(
  755. dataset_id=dataset.id,
  756. mode=process_rule.mode,
  757. rules=json.dumps(DatasetProcessRule.AUTOMATIC_RULES),
  758. created_by=account.id,
  759. )
  760. else:
  761. logging.warn(
  762. f"Invalid process rule mode: {process_rule.mode}, can not find dataset process rule"
  763. )
  764. return
  765. db.session.add(dataset_process_rule)
  766. db.session.commit()
  767. lock_name = "add_document_lock_dataset_id_{}".format(dataset.id)
  768. with redis_client.lock(lock_name, timeout=600):
  769. position = DocumentService.get_documents_position(dataset.id)
  770. document_ids = []
  771. duplicate_document_ids = []
  772. if knowledge_config.data_source.info_list.data_source_type == "upload_file":
  773. upload_file_list = knowledge_config.data_source.info_list.file_info_list.file_ids # type: ignore
  774. for file_id in upload_file_list:
  775. file = (
  776. db.session.query(UploadFile)
  777. .filter(UploadFile.tenant_id == dataset.tenant_id, UploadFile.id == file_id)
  778. .first()
  779. )
  780. # raise error if file not found
  781. if not file:
  782. raise FileNotExistsError()
  783. file_name = file.name
  784. data_source_info = {
  785. "upload_file_id": file_id,
  786. }
  787. # check duplicate
  788. if knowledge_config.duplicate:
  789. document = Document.query.filter_by(
  790. dataset_id=dataset.id,
  791. tenant_id=current_user.current_tenant_id,
  792. data_source_type="upload_file",
  793. enabled=True,
  794. name=file_name,
  795. ).first()
  796. if document:
  797. document.dataset_process_rule_id = dataset_process_rule.id # type: ignore
  798. document.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
  799. document.created_from = created_from
  800. document.doc_form = knowledge_config.doc_form
  801. document.doc_language = knowledge_config.doc_language
  802. document.data_source_info = json.dumps(data_source_info)
  803. document.batch = batch
  804. document.indexing_status = "waiting"
  805. db.session.add(document)
  806. documents.append(document)
  807. duplicate_document_ids.append(document.id)
  808. continue
  809. document = DocumentService.build_document(
  810. dataset,
  811. dataset_process_rule.id, # type: ignore
  812. knowledge_config.data_source.info_list.data_source_type,
  813. knowledge_config.doc_form,
  814. knowledge_config.doc_language,
  815. data_source_info,
  816. created_from,
  817. position,
  818. account,
  819. file_name,
  820. batch,
  821. )
  822. db.session.add(document)
  823. db.session.flush()
  824. document_ids.append(document.id)
  825. documents.append(document)
  826. position += 1
  827. elif knowledge_config.data_source.info_list.data_source_type == "notion_import":
  828. notion_info_list = knowledge_config.data_source.info_list.notion_info_list
  829. if not notion_info_list:
  830. raise ValueError("No notion info list found.")
  831. exist_page_ids = []
  832. exist_document = {}
  833. documents = Document.query.filter_by(
  834. dataset_id=dataset.id,
  835. tenant_id=current_user.current_tenant_id,
  836. data_source_type="notion_import",
  837. enabled=True,
  838. ).all()
  839. if documents:
  840. for document in documents:
  841. data_source_info = json.loads(document.data_source_info)
  842. exist_page_ids.append(data_source_info["notion_page_id"])
  843. exist_document[data_source_info["notion_page_id"]] = document.id
  844. for notion_info in notion_info_list:
  845. workspace_id = notion_info.workspace_id
  846. data_source_binding = DataSourceOauthBinding.query.filter(
  847. db.and_(
  848. DataSourceOauthBinding.tenant_id == current_user.current_tenant_id,
  849. DataSourceOauthBinding.provider == "notion",
  850. DataSourceOauthBinding.disabled == False,
  851. DataSourceOauthBinding.source_info["workspace_id"] == f'"{workspace_id}"',
  852. )
  853. ).first()
  854. if not data_source_binding:
  855. raise ValueError("Data source binding not found.")
  856. for page in notion_info.pages:
  857. if page.page_id not in exist_page_ids:
  858. data_source_info = {
  859. "notion_workspace_id": workspace_id,
  860. "notion_page_id": page.page_id,
  861. "notion_page_icon": page.page_icon.model_dump() if page.page_icon else None,
  862. "type": page.type,
  863. }
  864. document = DocumentService.build_document(
  865. dataset,
  866. dataset_process_rule.id, # type: ignore
  867. knowledge_config.data_source.info_list.data_source_type,
  868. knowledge_config.doc_form,
  869. knowledge_config.doc_language,
  870. data_source_info,
  871. created_from,
  872. position,
  873. account,
  874. page.page_name,
  875. batch,
  876. )
  877. db.session.add(document)
  878. db.session.flush()
  879. document_ids.append(document.id)
  880. documents.append(document)
  881. position += 1
  882. else:
  883. exist_document.pop(page.page_id)
  884. # delete not selected documents
  885. if len(exist_document) > 0:
  886. clean_notion_document_task.delay(list(exist_document.values()), dataset.id)
  887. elif knowledge_config.data_source.info_list.data_source_type == "website_crawl":
  888. website_info = knowledge_config.data_source.info_list.website_info_list
  889. if not website_info:
  890. raise ValueError("No website info list found.")
  891. urls = website_info.urls
  892. for url in urls:
  893. data_source_info = {
  894. "url": url,
  895. "provider": website_info.provider,
  896. "job_id": website_info.job_id,
  897. "only_main_content": website_info.only_main_content,
  898. "mode": "crawl",
  899. }
  900. if len(url) > 255:
  901. document_name = url[:200] + "..."
  902. else:
  903. document_name = url
  904. document = DocumentService.build_document(
  905. dataset,
  906. dataset_process_rule.id, # type: ignore
  907. knowledge_config.data_source.info_list.data_source_type,
  908. knowledge_config.doc_form,
  909. knowledge_config.doc_language,
  910. data_source_info,
  911. created_from,
  912. position,
  913. account,
  914. document_name,
  915. batch,
  916. )
  917. db.session.add(document)
  918. db.session.flush()
  919. document_ids.append(document.id)
  920. documents.append(document)
  921. position += 1
  922. db.session.commit()
  923. # trigger async task
  924. if document_ids:
  925. document_indexing_task.delay(dataset.id, document_ids)
  926. if duplicate_document_ids:
  927. duplicate_document_indexing_task.delay(dataset.id, duplicate_document_ids)
  928. return documents, batch
  929. @staticmethod
  930. def check_documents_upload_quota(count: int, features: FeatureModel):
  931. can_upload_size = features.documents_upload_quota.limit - features.documents_upload_quota.size
  932. if count > can_upload_size:
  933. raise ValueError(
  934. f"You have reached the limit of your subscription. Only {can_upload_size} documents can be uploaded."
  935. )
  936. @staticmethod
  937. def build_document(
  938. dataset: Dataset,
  939. process_rule_id: str,
  940. data_source_type: str,
  941. document_form: str,
  942. document_language: str,
  943. data_source_info: dict,
  944. created_from: str,
  945. position: int,
  946. account: Account,
  947. name: str,
  948. batch: str,
  949. ):
  950. document = Document(
  951. tenant_id=dataset.tenant_id,
  952. dataset_id=dataset.id,
  953. position=position,
  954. data_source_type=data_source_type,
  955. data_source_info=json.dumps(data_source_info),
  956. dataset_process_rule_id=process_rule_id,
  957. batch=batch,
  958. name=name,
  959. created_from=created_from,
  960. created_by=account.id,
  961. doc_form=document_form,
  962. doc_language=document_language,
  963. )
  964. return document
  965. @staticmethod
  966. def get_tenant_documents_count():
  967. documents_count = Document.query.filter(
  968. Document.completed_at.isnot(None),
  969. Document.enabled == True,
  970. Document.archived == False,
  971. Document.tenant_id == current_user.current_tenant_id,
  972. ).count()
  973. return documents_count
  974. @staticmethod
  975. def update_document_with_dataset_id(
  976. dataset: Dataset,
  977. document_data: KnowledgeConfig,
  978. account: Account,
  979. dataset_process_rule: Optional[DatasetProcessRule] = None,
  980. created_from: str = "web",
  981. ):
  982. DatasetService.check_dataset_model_setting(dataset)
  983. document = DocumentService.get_document(dataset.id, document_data.original_document_id)
  984. if document is None:
  985. raise NotFound("Document not found")
  986. if document.display_status != "available":
  987. raise ValueError("Document is not available")
  988. # save process rule
  989. if document_data.process_rule:
  990. process_rule = document_data.process_rule
  991. if process_rule.mode in {"custom", "hierarchical"}:
  992. dataset_process_rule = DatasetProcessRule(
  993. dataset_id=dataset.id,
  994. mode=process_rule.mode,
  995. rules=process_rule.rules.model_dump_json() if process_rule.rules else None,
  996. created_by=account.id,
  997. )
  998. elif process_rule.mode == "automatic":
  999. dataset_process_rule = DatasetProcessRule(
  1000. dataset_id=dataset.id,
  1001. mode=process_rule.mode,
  1002. rules=json.dumps(DatasetProcessRule.AUTOMATIC_RULES),
  1003. created_by=account.id,
  1004. )
  1005. if dataset_process_rule is not None:
  1006. db.session.add(dataset_process_rule)
  1007. db.session.commit()
  1008. document.dataset_process_rule_id = dataset_process_rule.id
  1009. # update document data source
  1010. if document_data.data_source:
  1011. file_name = ""
  1012. data_source_info = {}
  1013. if document_data.data_source.info_list.data_source_type == "upload_file":
  1014. if not document_data.data_source.info_list.file_info_list:
  1015. raise ValueError("No file info list found.")
  1016. upload_file_list = document_data.data_source.info_list.file_info_list.file_ids
  1017. for file_id in upload_file_list:
  1018. file = (
  1019. db.session.query(UploadFile)
  1020. .filter(UploadFile.tenant_id == dataset.tenant_id, UploadFile.id == file_id)
  1021. .first()
  1022. )
  1023. # raise error if file not found
  1024. if not file:
  1025. raise FileNotExistsError()
  1026. file_name = file.name
  1027. data_source_info = {
  1028. "upload_file_id": file_id,
  1029. }
  1030. elif document_data.data_source.info_list.data_source_type == "notion_import":
  1031. if not document_data.data_source.info_list.notion_info_list:
  1032. raise ValueError("No notion info list found.")
  1033. notion_info_list = document_data.data_source.info_list.notion_info_list
  1034. for notion_info in notion_info_list:
  1035. workspace_id = notion_info.workspace_id
  1036. data_source_binding = DataSourceOauthBinding.query.filter(
  1037. db.and_(
  1038. DataSourceOauthBinding.tenant_id == current_user.current_tenant_id,
  1039. DataSourceOauthBinding.provider == "notion",
  1040. DataSourceOauthBinding.disabled == False,
  1041. DataSourceOauthBinding.source_info["workspace_id"] == f'"{workspace_id}"',
  1042. )
  1043. ).first()
  1044. if not data_source_binding:
  1045. raise ValueError("Data source binding not found.")
  1046. for page in notion_info.pages:
  1047. data_source_info = {
  1048. "notion_workspace_id": workspace_id,
  1049. "notion_page_id": page.page_id,
  1050. "notion_page_icon": page.page_icon.model_dump() if page.page_icon else None, # type: ignore
  1051. "type": page.type,
  1052. }
  1053. elif document_data.data_source.info_list.data_source_type == "website_crawl":
  1054. website_info = document_data.data_source.info_list.website_info_list
  1055. if website_info:
  1056. urls = website_info.urls
  1057. for url in urls:
  1058. data_source_info = {
  1059. "url": url,
  1060. "provider": website_info.provider,
  1061. "job_id": website_info.job_id,
  1062. "only_main_content": website_info.only_main_content, # type: ignore
  1063. "mode": "crawl",
  1064. }
  1065. document.data_source_type = document_data.data_source.info_list.data_source_type
  1066. document.data_source_info = json.dumps(data_source_info)
  1067. document.name = file_name
  1068. # update document name
  1069. if document_data.name:
  1070. document.name = document_data.name
  1071. # update document to be waiting
  1072. document.indexing_status = "waiting"
  1073. document.completed_at = None
  1074. document.processing_started_at = None
  1075. document.parsing_completed_at = None
  1076. document.cleaning_completed_at = None
  1077. document.splitting_completed_at = None
  1078. document.updated_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
  1079. document.created_from = created_from
  1080. document.doc_form = document_data.doc_form
  1081. db.session.add(document)
  1082. db.session.commit()
  1083. # update document segment
  1084. update_params = {DocumentSegment.status: "re_segment"}
  1085. DocumentSegment.query.filter_by(document_id=document.id).update(update_params)
  1086. db.session.commit()
  1087. # trigger async task
  1088. document_indexing_update_task.delay(document.dataset_id, document.id)
  1089. return document
  1090. @staticmethod
  1091. def save_document_without_dataset_id(tenant_id: str, knowledge_config: KnowledgeConfig, account: Account):
  1092. features = FeatureService.get_features(current_user.current_tenant_id)
  1093. if features.billing.enabled:
  1094. count = 0
  1095. if knowledge_config.data_source.info_list.data_source_type == "upload_file":
  1096. upload_file_list = (
  1097. knowledge_config.data_source.info_list.file_info_list.file_ids
  1098. if knowledge_config.data_source.info_list.file_info_list
  1099. else []
  1100. )
  1101. count = len(upload_file_list)
  1102. elif knowledge_config.data_source.info_list.data_source_type == "notion_import":
  1103. notion_info_list = knowledge_config.data_source.info_list.notion_info_list
  1104. if notion_info_list:
  1105. for notion_info in notion_info_list:
  1106. count = count + len(notion_info.pages)
  1107. elif knowledge_config.data_source.info_list.data_source_type == "website_crawl":
  1108. website_info = knowledge_config.data_source.info_list.website_info_list
  1109. if website_info:
  1110. count = len(website_info.urls)
  1111. batch_upload_limit = int(dify_config.BATCH_UPLOAD_LIMIT)
  1112. if count > batch_upload_limit:
  1113. raise ValueError(f"You have reached the batch upload limit of {batch_upload_limit}.")
  1114. DocumentService.check_documents_upload_quota(count, features)
  1115. dataset_collection_binding_id = None
  1116. retrieval_model = None
  1117. if knowledge_config.indexing_technique == "high_quality":
  1118. dataset_collection_binding = DatasetCollectionBindingService.get_dataset_collection_binding(
  1119. knowledge_config.embedding_model_provider, # type: ignore
  1120. knowledge_config.embedding_model, # type: ignore
  1121. )
  1122. dataset_collection_binding_id = dataset_collection_binding.id
  1123. if knowledge_config.retrieval_model:
  1124. retrieval_model = knowledge_config.retrieval_model
  1125. else:
  1126. retrieval_model = RetrievalModel(
  1127. search_method=RetrievalMethod.SEMANTIC_SEARCH.value,
  1128. reranking_enable=False,
  1129. reranking_model=RerankingModel(reranking_provider_name="", reranking_model_name=""),
  1130. top_k=2,
  1131. score_threshold_enabled=False,
  1132. )
  1133. # save dataset
  1134. dataset = Dataset(
  1135. tenant_id=tenant_id,
  1136. name="",
  1137. data_source_type=knowledge_config.data_source.info_list.data_source_type,
  1138. indexing_technique=knowledge_config.indexing_technique,
  1139. created_by=account.id,
  1140. embedding_model=knowledge_config.embedding_model,
  1141. embedding_model_provider=knowledge_config.embedding_model_provider,
  1142. collection_binding_id=dataset_collection_binding_id,
  1143. retrieval_model=retrieval_model.model_dump() if retrieval_model else None,
  1144. )
  1145. db.session.add(dataset) # type: ignore
  1146. db.session.flush()
  1147. documents, batch = DocumentService.save_document_with_dataset_id(dataset, knowledge_config, account)
  1148. cut_length = 18
  1149. cut_name = documents[0].name[:cut_length]
  1150. dataset.name = cut_name + "..."
  1151. dataset.description = "useful for when you want to answer queries about the " + documents[0].name
  1152. db.session.commit()
  1153. return dataset, documents, batch
  1154. @classmethod
  1155. def document_create_args_validate(cls, knowledge_config: KnowledgeConfig):
  1156. if not knowledge_config.data_source and not knowledge_config.process_rule:
  1157. raise ValueError("Data source or Process rule is required")
  1158. else:
  1159. if knowledge_config.data_source:
  1160. DocumentService.data_source_args_validate(knowledge_config)
  1161. if knowledge_config.process_rule:
  1162. DocumentService.process_rule_args_validate(knowledge_config)
  1163. @classmethod
  1164. def data_source_args_validate(cls, knowledge_config: KnowledgeConfig):
  1165. if not knowledge_config.data_source:
  1166. raise ValueError("Data source is required")
  1167. if knowledge_config.data_source.info_list.data_source_type not in Document.DATA_SOURCES:
  1168. raise ValueError("Data source type is invalid")
  1169. if not knowledge_config.data_source.info_list:
  1170. raise ValueError("Data source info is required")
  1171. if knowledge_config.data_source.info_list.data_source_type == "upload_file":
  1172. if not knowledge_config.data_source.info_list.file_info_list:
  1173. raise ValueError("File source info is required")
  1174. if knowledge_config.data_source.info_list.data_source_type == "notion_import":
  1175. if not knowledge_config.data_source.info_list.notion_info_list:
  1176. raise ValueError("Notion source info is required")
  1177. if knowledge_config.data_source.info_list.data_source_type == "website_crawl":
  1178. if not knowledge_config.data_source.info_list.website_info_list:
  1179. raise ValueError("Website source info is required")
  1180. @classmethod
  1181. def process_rule_args_validate(cls, knowledge_config: KnowledgeConfig):
  1182. if not knowledge_config.process_rule:
  1183. raise ValueError("Process rule is required")
  1184. if not knowledge_config.process_rule.mode:
  1185. raise ValueError("Process rule mode is required")
  1186. if knowledge_config.process_rule.mode not in DatasetProcessRule.MODES:
  1187. raise ValueError("Process rule mode is invalid")
  1188. if knowledge_config.process_rule.mode == "automatic":
  1189. knowledge_config.process_rule.rules = None
  1190. else:
  1191. if not knowledge_config.process_rule.rules:
  1192. raise ValueError("Process rule rules is required")
  1193. if knowledge_config.process_rule.rules.pre_processing_rules is None:
  1194. raise ValueError("Process rule pre_processing_rules is required")
  1195. unique_pre_processing_rule_dicts = {}
  1196. for pre_processing_rule in knowledge_config.process_rule.rules.pre_processing_rules:
  1197. if not pre_processing_rule.id:
  1198. raise ValueError("Process rule pre_processing_rules id is required")
  1199. if not isinstance(pre_processing_rule.enabled, bool):
  1200. raise ValueError("Process rule pre_processing_rules enabled is invalid")
  1201. unique_pre_processing_rule_dicts[pre_processing_rule.id] = pre_processing_rule
  1202. knowledge_config.process_rule.rules.pre_processing_rules = list(unique_pre_processing_rule_dicts.values())
  1203. if not knowledge_config.process_rule.rules.segmentation:
  1204. raise ValueError("Process rule segmentation is required")
  1205. if not knowledge_config.process_rule.rules.segmentation.separator:
  1206. raise ValueError("Process rule segmentation separator is required")
  1207. if not isinstance(knowledge_config.process_rule.rules.segmentation.separator, str):
  1208. raise ValueError("Process rule segmentation separator is invalid")
  1209. if not (
  1210. knowledge_config.process_rule.mode == "hierarchical"
  1211. and knowledge_config.process_rule.rules.parent_mode == "full-doc"
  1212. ):
  1213. if not knowledge_config.process_rule.rules.segmentation.max_tokens:
  1214. raise ValueError("Process rule segmentation max_tokens is required")
  1215. if not isinstance(knowledge_config.process_rule.rules.segmentation.max_tokens, int):
  1216. raise ValueError("Process rule segmentation max_tokens is invalid")
  1217. @classmethod
  1218. def estimate_args_validate(cls, args: dict):
  1219. if "info_list" not in args or not args["info_list"]:
  1220. raise ValueError("Data source info is required")
  1221. if not isinstance(args["info_list"], dict):
  1222. raise ValueError("Data info is invalid")
  1223. if "process_rule" not in args or not args["process_rule"]:
  1224. raise ValueError("Process rule is required")
  1225. if not isinstance(args["process_rule"], dict):
  1226. raise ValueError("Process rule is invalid")
  1227. if "mode" not in args["process_rule"] or not args["process_rule"]["mode"]:
  1228. raise ValueError("Process rule mode is required")
  1229. if args["process_rule"]["mode"] not in DatasetProcessRule.MODES:
  1230. raise ValueError("Process rule mode is invalid")
  1231. if args["process_rule"]["mode"] == "automatic":
  1232. args["process_rule"]["rules"] = {}
  1233. else:
  1234. if "rules" not in args["process_rule"] or not args["process_rule"]["rules"]:
  1235. raise ValueError("Process rule rules is required")
  1236. if not isinstance(args["process_rule"]["rules"], dict):
  1237. raise ValueError("Process rule rules is invalid")
  1238. if (
  1239. "pre_processing_rules" not in args["process_rule"]["rules"]
  1240. or args["process_rule"]["rules"]["pre_processing_rules"] is None
  1241. ):
  1242. raise ValueError("Process rule pre_processing_rules is required")
  1243. if not isinstance(args["process_rule"]["rules"]["pre_processing_rules"], list):
  1244. raise ValueError("Process rule pre_processing_rules is invalid")
  1245. unique_pre_processing_rule_dicts = {}
  1246. for pre_processing_rule in args["process_rule"]["rules"]["pre_processing_rules"]:
  1247. if "id" not in pre_processing_rule or not pre_processing_rule["id"]:
  1248. raise ValueError("Process rule pre_processing_rules id is required")
  1249. if pre_processing_rule["id"] not in DatasetProcessRule.PRE_PROCESSING_RULES:
  1250. raise ValueError("Process rule pre_processing_rules id is invalid")
  1251. if "enabled" not in pre_processing_rule or pre_processing_rule["enabled"] is None:
  1252. raise ValueError("Process rule pre_processing_rules enabled is required")
  1253. if not isinstance(pre_processing_rule["enabled"], bool):
  1254. raise ValueError("Process rule pre_processing_rules enabled is invalid")
  1255. unique_pre_processing_rule_dicts[pre_processing_rule["id"]] = pre_processing_rule
  1256. args["process_rule"]["rules"]["pre_processing_rules"] = list(unique_pre_processing_rule_dicts.values())
  1257. if (
  1258. "segmentation" not in args["process_rule"]["rules"]
  1259. or args["process_rule"]["rules"]["segmentation"] is None
  1260. ):
  1261. raise ValueError("Process rule segmentation is required")
  1262. if not isinstance(args["process_rule"]["rules"]["segmentation"], dict):
  1263. raise ValueError("Process rule segmentation is invalid")
  1264. if (
  1265. "separator" not in args["process_rule"]["rules"]["segmentation"]
  1266. or not args["process_rule"]["rules"]["segmentation"]["separator"]
  1267. ):
  1268. raise ValueError("Process rule segmentation separator is required")
  1269. if not isinstance(args["process_rule"]["rules"]["segmentation"]["separator"], str):
  1270. raise ValueError("Process rule segmentation separator is invalid")
  1271. if (
  1272. "max_tokens" not in args["process_rule"]["rules"]["segmentation"]
  1273. or not args["process_rule"]["rules"]["segmentation"]["max_tokens"]
  1274. ):
  1275. raise ValueError("Process rule segmentation max_tokens is required")
  1276. if not isinstance(args["process_rule"]["rules"]["segmentation"]["max_tokens"], int):
  1277. raise ValueError("Process rule segmentation max_tokens is invalid")
  1278. class SegmentService:
  1279. @classmethod
  1280. def segment_create_args_validate(cls, args: dict, document: Document):
  1281. if document.doc_form == "qa_model":
  1282. if "answer" not in args or not args["answer"]:
  1283. raise ValueError("Answer is required")
  1284. if not args["answer"].strip():
  1285. raise ValueError("Answer is empty")
  1286. if "content" not in args or not args["content"] or not args["content"].strip():
  1287. raise ValueError("Content is empty")
  1288. @classmethod
  1289. def create_segment(cls, args: dict, document: Document, dataset: Dataset):
  1290. content = args["content"]
  1291. doc_id = str(uuid.uuid4())
  1292. segment_hash = helper.generate_text_hash(content)
  1293. tokens = 0
  1294. if dataset.indexing_technique == "high_quality":
  1295. model_manager = ModelManager()
  1296. embedding_model = model_manager.get_model_instance(
  1297. tenant_id=current_user.current_tenant_id,
  1298. provider=dataset.embedding_model_provider,
  1299. model_type=ModelType.TEXT_EMBEDDING,
  1300. model=dataset.embedding_model,
  1301. )
  1302. # calc embedding use tokens
  1303. tokens = embedding_model.get_text_embedding_num_tokens(texts=[content])
  1304. lock_name = "add_segment_lock_document_id_{}".format(document.id)
  1305. with redis_client.lock(lock_name, timeout=600):
  1306. max_position = (
  1307. db.session.query(func.max(DocumentSegment.position))
  1308. .filter(DocumentSegment.document_id == document.id)
  1309. .scalar()
  1310. )
  1311. segment_document = DocumentSegment(
  1312. tenant_id=current_user.current_tenant_id,
  1313. dataset_id=document.dataset_id,
  1314. document_id=document.id,
  1315. index_node_id=doc_id,
  1316. index_node_hash=segment_hash,
  1317. position=max_position + 1 if max_position else 1,
  1318. content=content,
  1319. word_count=len(content),
  1320. tokens=tokens,
  1321. status="completed",
  1322. indexing_at=datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
  1323. completed_at=datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
  1324. created_by=current_user.id,
  1325. )
  1326. if document.doc_form == "qa_model":
  1327. segment_document.word_count += len(args["answer"])
  1328. segment_document.answer = args["answer"]
  1329. db.session.add(segment_document)
  1330. # update document word count
  1331. document.word_count += segment_document.word_count
  1332. db.session.add(document)
  1333. db.session.commit()
  1334. # save vector index
  1335. try:
  1336. VectorService.create_segments_vector([args["keywords"]], [segment_document], dataset, document.doc_form)
  1337. except Exception as e:
  1338. logging.exception("create segment index failed")
  1339. segment_document.enabled = False
  1340. segment_document.disabled_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
  1341. segment_document.status = "error"
  1342. segment_document.error = str(e)
  1343. db.session.commit()
  1344. segment = db.session.query(DocumentSegment).filter(DocumentSegment.id == segment_document.id).first()
  1345. return segment
  1346. @classmethod
  1347. def multi_create_segment(cls, segments: list, document: Document, dataset: Dataset):
  1348. lock_name = "multi_add_segment_lock_document_id_{}".format(document.id)
  1349. increment_word_count = 0
  1350. with redis_client.lock(lock_name, timeout=600):
  1351. embedding_model = None
  1352. if dataset.indexing_technique == "high_quality":
  1353. model_manager = ModelManager()
  1354. embedding_model = model_manager.get_model_instance(
  1355. tenant_id=current_user.current_tenant_id,
  1356. provider=dataset.embedding_model_provider,
  1357. model_type=ModelType.TEXT_EMBEDDING,
  1358. model=dataset.embedding_model,
  1359. )
  1360. max_position = (
  1361. db.session.query(func.max(DocumentSegment.position))
  1362. .filter(DocumentSegment.document_id == document.id)
  1363. .scalar()
  1364. )
  1365. pre_segment_data_list = []
  1366. segment_data_list = []
  1367. keywords_list = []
  1368. position = max_position + 1 if max_position else 1
  1369. for segment_item in segments:
  1370. content = segment_item["content"]
  1371. doc_id = str(uuid.uuid4())
  1372. segment_hash = helper.generate_text_hash(content)
  1373. tokens = 0
  1374. if dataset.indexing_technique == "high_quality" and embedding_model:
  1375. # calc embedding use tokens
  1376. if document.doc_form == "qa_model":
  1377. tokens = embedding_model.get_text_embedding_num_tokens(texts=[content + segment_item["answer"]])
  1378. else:
  1379. tokens = embedding_model.get_text_embedding_num_tokens(texts=[content])
  1380. segment_document = DocumentSegment(
  1381. tenant_id=current_user.current_tenant_id,
  1382. dataset_id=document.dataset_id,
  1383. document_id=document.id,
  1384. index_node_id=doc_id,
  1385. index_node_hash=segment_hash,
  1386. position=position,
  1387. content=content,
  1388. word_count=len(content),
  1389. tokens=tokens,
  1390. status="completed",
  1391. indexing_at=datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
  1392. completed_at=datetime.datetime.now(datetime.UTC).replace(tzinfo=None),
  1393. created_by=current_user.id,
  1394. )
  1395. if document.doc_form == "qa_model":
  1396. segment_document.answer = segment_item["answer"]
  1397. segment_document.word_count += len(segment_item["answer"])
  1398. increment_word_count += segment_document.word_count
  1399. db.session.add(segment_document)
  1400. segment_data_list.append(segment_document)
  1401. position += 1
  1402. pre_segment_data_list.append(segment_document)
  1403. if "keywords" in segment_item:
  1404. keywords_list.append(segment_item["keywords"])
  1405. else:
  1406. keywords_list.append(None)
  1407. # update document word count
  1408. document.word_count += increment_word_count
  1409. db.session.add(document)
  1410. try:
  1411. # save vector index
  1412. VectorService.create_segments_vector(keywords_list, pre_segment_data_list, dataset, document.doc_form)
  1413. except Exception as e:
  1414. logging.exception("create segment index failed")
  1415. for segment_document in segment_data_list:
  1416. segment_document.enabled = False
  1417. segment_document.disabled_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
  1418. segment_document.status = "error"
  1419. segment_document.error = str(e)
  1420. db.session.commit()
  1421. return segment_data_list
  1422. @classmethod
  1423. def update_segment(cls, args: SegmentUpdateArgs, segment: DocumentSegment, document: Document, dataset: Dataset):
  1424. indexing_cache_key = "segment_{}_indexing".format(segment.id)
  1425. cache_result = redis_client.get(indexing_cache_key)
  1426. if cache_result is not None:
  1427. raise ValueError("Segment is indexing, please try again later")
  1428. if args.enabled is not None:
  1429. action = args.enabled
  1430. if segment.enabled != action:
  1431. if not action:
  1432. segment.enabled = action
  1433. segment.disabled_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
  1434. segment.disabled_by = current_user.id
  1435. db.session.add(segment)
  1436. db.session.commit()
  1437. # Set cache to prevent indexing the same segment multiple times
  1438. redis_client.setex(indexing_cache_key, 600, 1)
  1439. disable_segment_from_index_task.delay(segment.id)
  1440. return segment
  1441. if not segment.enabled:
  1442. if args.enabled is not None:
  1443. if not args.enabled:
  1444. raise ValueError("Can't update disabled segment")
  1445. else:
  1446. raise ValueError("Can't update disabled segment")
  1447. try:
  1448. word_count_change = segment.word_count
  1449. content = args.content or segment.content
  1450. if segment.content == content:
  1451. segment.word_count = len(content)
  1452. if document.doc_form == "qa_model":
  1453. segment.answer = args.answer
  1454. segment.word_count += len(args.answer) if args.answer else 0
  1455. word_count_change = segment.word_count - word_count_change
  1456. if args.keywords:
  1457. segment.keywords = args.keywords
  1458. segment.enabled = True
  1459. segment.disabled_at = None
  1460. segment.disabled_by = None
  1461. db.session.add(segment)
  1462. db.session.commit()
  1463. # update document word count
  1464. if word_count_change != 0:
  1465. document.word_count = max(0, document.word_count + word_count_change)
  1466. db.session.add(document)
  1467. # update segment index task
  1468. if args.enabled:
  1469. VectorService.create_segments_vector(
  1470. [args.keywords] if args.keywords else None,
  1471. [segment],
  1472. dataset,
  1473. document.doc_form,
  1474. )
  1475. if document.doc_form == IndexType.PARENT_CHILD_INDEX and args.regenerate_child_chunks:
  1476. # regenerate child chunks
  1477. # get embedding model instance
  1478. if dataset.indexing_technique == "high_quality":
  1479. # check embedding model setting
  1480. model_manager = ModelManager()
  1481. if dataset.embedding_model_provider:
  1482. embedding_model_instance = model_manager.get_model_instance(
  1483. tenant_id=dataset.tenant_id,
  1484. provider=dataset.embedding_model_provider,
  1485. model_type=ModelType.TEXT_EMBEDDING,
  1486. model=dataset.embedding_model,
  1487. )
  1488. else:
  1489. embedding_model_instance = model_manager.get_default_model_instance(
  1490. tenant_id=dataset.tenant_id,
  1491. model_type=ModelType.TEXT_EMBEDDING,
  1492. )
  1493. else:
  1494. raise ValueError("The knowledge base index technique is not high quality!")
  1495. # get the process rule
  1496. processing_rule = (
  1497. db.session.query(DatasetProcessRule)
  1498. .filter(DatasetProcessRule.id == document.dataset_process_rule_id)
  1499. .first()
  1500. )
  1501. if not processing_rule:
  1502. raise ValueError("No processing rule found.")
  1503. VectorService.generate_child_chunks(
  1504. segment, document, dataset, embedding_model_instance, processing_rule, True
  1505. )
  1506. else:
  1507. segment_hash = helper.generate_text_hash(content)
  1508. tokens = 0
  1509. if dataset.indexing_technique == "high_quality":
  1510. model_manager = ModelManager()
  1511. embedding_model = model_manager.get_model_instance(
  1512. tenant_id=current_user.current_tenant_id,
  1513. provider=dataset.embedding_model_provider,
  1514. model_type=ModelType.TEXT_EMBEDDING,
  1515. model=dataset.embedding_model,
  1516. )
  1517. # calc embedding use tokens
  1518. if document.doc_form == "qa_model":
  1519. tokens = embedding_model.get_text_embedding_num_tokens(texts=[content + segment.answer])
  1520. else:
  1521. tokens = embedding_model.get_text_embedding_num_tokens(texts=[content])
  1522. segment.content = content
  1523. segment.index_node_hash = segment_hash
  1524. segment.word_count = len(content)
  1525. segment.tokens = tokens
  1526. segment.status = "completed"
  1527. segment.indexing_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
  1528. segment.completed_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
  1529. segment.updated_by = current_user.id
  1530. segment.updated_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
  1531. segment.enabled = True
  1532. segment.disabled_at = None
  1533. segment.disabled_by = None
  1534. if document.doc_form == "qa_model":
  1535. segment.answer = args.answer
  1536. segment.word_count += len(args.answer) if args.answer else 0
  1537. word_count_change = segment.word_count - word_count_change
  1538. # update document word count
  1539. if word_count_change != 0:
  1540. document.word_count = max(0, document.word_count + word_count_change)
  1541. db.session.add(document)
  1542. db.session.add(segment)
  1543. db.session.commit()
  1544. if document.doc_form == IndexType.PARENT_CHILD_INDEX and args.regenerate_child_chunks:
  1545. # get embedding model instance
  1546. if dataset.indexing_technique == "high_quality":
  1547. # check embedding model setting
  1548. model_manager = ModelManager()
  1549. if dataset.embedding_model_provider:
  1550. embedding_model_instance = model_manager.get_model_instance(
  1551. tenant_id=dataset.tenant_id,
  1552. provider=dataset.embedding_model_provider,
  1553. model_type=ModelType.TEXT_EMBEDDING,
  1554. model=dataset.embedding_model,
  1555. )
  1556. else:
  1557. embedding_model_instance = model_manager.get_default_model_instance(
  1558. tenant_id=dataset.tenant_id,
  1559. model_type=ModelType.TEXT_EMBEDDING,
  1560. )
  1561. else:
  1562. raise ValueError("The knowledge base index technique is not high quality!")
  1563. # get the process rule
  1564. processing_rule = (
  1565. db.session.query(DatasetProcessRule)
  1566. .filter(DatasetProcessRule.id == document.dataset_process_rule_id)
  1567. .first()
  1568. )
  1569. if not processing_rule:
  1570. raise ValueError("No processing rule found.")
  1571. VectorService.generate_child_chunks(
  1572. segment, document, dataset, embedding_model_instance, processing_rule, True
  1573. )
  1574. elif document.doc_form in (IndexType.PARAGRAPH_INDEX, IndexType.QA_INDEX):
  1575. # update segment vector index
  1576. VectorService.update_segment_vector(args.keywords, segment, dataset)
  1577. except Exception as e:
  1578. logging.exception("update segment index failed")
  1579. segment.enabled = False
  1580. segment.disabled_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
  1581. segment.status = "error"
  1582. segment.error = str(e)
  1583. db.session.commit()
  1584. new_segment = db.session.query(DocumentSegment).filter(DocumentSegment.id == segment.id).first()
  1585. return new_segment
  1586. @classmethod
  1587. def delete_segment(cls, segment: DocumentSegment, document: Document, dataset: Dataset):
  1588. indexing_cache_key = "segment_{}_delete_indexing".format(segment.id)
  1589. cache_result = redis_client.get(indexing_cache_key)
  1590. if cache_result is not None:
  1591. raise ValueError("Segment is deleting.")
  1592. # enabled segment need to delete index
  1593. if segment.enabled:
  1594. # send delete segment index task
  1595. redis_client.setex(indexing_cache_key, 600, 1)
  1596. delete_segment_from_index_task.delay([segment.index_node_id], dataset.id, document.id)
  1597. db.session.delete(segment)
  1598. # update document word count
  1599. document.word_count -= segment.word_count
  1600. db.session.add(document)
  1601. db.session.commit()
  1602. @classmethod
  1603. def delete_segments(cls, segment_ids: list, document: Document, dataset: Dataset):
  1604. index_node_ids = (
  1605. DocumentSegment.query.with_entities(DocumentSegment.index_node_id)
  1606. .filter(
  1607. DocumentSegment.id.in_(segment_ids),
  1608. DocumentSegment.dataset_id == dataset.id,
  1609. DocumentSegment.document_id == document.id,
  1610. DocumentSegment.tenant_id == current_user.current_tenant_id,
  1611. )
  1612. .all()
  1613. )
  1614. index_node_ids = [index_node_id[0] for index_node_id in index_node_ids]
  1615. delete_segment_from_index_task.delay(index_node_ids, dataset.id, document.id)
  1616. db.session.query(DocumentSegment).filter(DocumentSegment.id.in_(segment_ids)).delete()
  1617. db.session.commit()
  1618. @classmethod
  1619. def update_segments_status(cls, segment_ids: list, action: str, dataset: Dataset, document: Document):
  1620. if action == "enable":
  1621. segments = (
  1622. db.session.query(DocumentSegment)
  1623. .filter(
  1624. DocumentSegment.id.in_(segment_ids),
  1625. DocumentSegment.dataset_id == dataset.id,
  1626. DocumentSegment.document_id == document.id,
  1627. DocumentSegment.enabled == False,
  1628. )
  1629. .all()
  1630. )
  1631. if not segments:
  1632. return
  1633. real_deal_segmment_ids = []
  1634. for segment in segments:
  1635. indexing_cache_key = "segment_{}_indexing".format(segment.id)
  1636. cache_result = redis_client.get(indexing_cache_key)
  1637. if cache_result is not None:
  1638. continue
  1639. segment.enabled = True
  1640. segment.disabled_at = None
  1641. segment.disabled_by = None
  1642. db.session.add(segment)
  1643. real_deal_segmment_ids.append(segment.id)
  1644. db.session.commit()
  1645. enable_segments_to_index_task.delay(real_deal_segmment_ids, dataset.id, document.id)
  1646. elif action == "disable":
  1647. segments = (
  1648. db.session.query(DocumentSegment)
  1649. .filter(
  1650. DocumentSegment.id.in_(segment_ids),
  1651. DocumentSegment.dataset_id == dataset.id,
  1652. DocumentSegment.document_id == document.id,
  1653. DocumentSegment.enabled == True,
  1654. )
  1655. .all()
  1656. )
  1657. if not segments:
  1658. return
  1659. real_deal_segmment_ids = []
  1660. for segment in segments:
  1661. indexing_cache_key = "segment_{}_indexing".format(segment.id)
  1662. cache_result = redis_client.get(indexing_cache_key)
  1663. if cache_result is not None:
  1664. continue
  1665. segment.enabled = False
  1666. segment.disabled_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
  1667. segment.disabled_by = current_user.id
  1668. db.session.add(segment)
  1669. real_deal_segmment_ids.append(segment.id)
  1670. db.session.commit()
  1671. disable_segments_from_index_task.delay(real_deal_segmment_ids, dataset.id, document.id)
  1672. else:
  1673. raise InvalidActionError()
  1674. @classmethod
  1675. def create_child_chunk(
  1676. cls, content: str, segment: DocumentSegment, document: Document, dataset: Dataset
  1677. ) -> ChildChunk:
  1678. lock_name = "add_child_lock_{}".format(segment.id)
  1679. with redis_client.lock(lock_name, timeout=20):
  1680. index_node_id = str(uuid.uuid4())
  1681. index_node_hash = helper.generate_text_hash(content)
  1682. child_chunk_count = (
  1683. db.session.query(ChildChunk)
  1684. .filter(
  1685. ChildChunk.tenant_id == current_user.current_tenant_id,
  1686. ChildChunk.dataset_id == dataset.id,
  1687. ChildChunk.document_id == document.id,
  1688. ChildChunk.segment_id == segment.id,
  1689. )
  1690. .count()
  1691. )
  1692. max_position = (
  1693. db.session.query(func.max(ChildChunk.position))
  1694. .filter(
  1695. ChildChunk.tenant_id == current_user.current_tenant_id,
  1696. ChildChunk.dataset_id == dataset.id,
  1697. ChildChunk.document_id == document.id,
  1698. ChildChunk.segment_id == segment.id,
  1699. )
  1700. .scalar()
  1701. )
  1702. child_chunk = ChildChunk(
  1703. tenant_id=current_user.current_tenant_id,
  1704. dataset_id=dataset.id,
  1705. document_id=document.id,
  1706. segment_id=segment.id,
  1707. position=max_position + 1,
  1708. index_node_id=index_node_id,
  1709. index_node_hash=index_node_hash,
  1710. content=content,
  1711. word_count=len(content),
  1712. type="customized",
  1713. created_by=current_user.id,
  1714. )
  1715. db.session.add(child_chunk)
  1716. # save vector index
  1717. try:
  1718. VectorService.create_child_chunk_vector(child_chunk, dataset)
  1719. except Exception as e:
  1720. logging.exception("create child chunk index failed")
  1721. db.session.rollback()
  1722. raise ChildChunkIndexingError(str(e))
  1723. db.session.commit()
  1724. return child_chunk
  1725. @classmethod
  1726. def update_child_chunks(
  1727. cls,
  1728. child_chunks_update_args: list[ChildChunkUpdateArgs],
  1729. segment: DocumentSegment,
  1730. document: Document,
  1731. dataset: Dataset,
  1732. ) -> list[ChildChunk]:
  1733. child_chunks = (
  1734. db.session.query(ChildChunk)
  1735. .filter(
  1736. ChildChunk.dataset_id == dataset.id,
  1737. ChildChunk.document_id == document.id,
  1738. ChildChunk.segment_id == segment.id,
  1739. )
  1740. .all()
  1741. )
  1742. child_chunks_map = {chunk.id: chunk for chunk in child_chunks}
  1743. new_child_chunks, update_child_chunks, delete_child_chunks, new_child_chunks_args = [], [], [], []
  1744. for child_chunk_update_args in child_chunks_update_args:
  1745. if child_chunk_update_args.id:
  1746. child_chunk = child_chunks_map.pop(child_chunk_update_args.id, None)
  1747. if child_chunk:
  1748. if child_chunk.content != child_chunk_update_args.content:
  1749. child_chunk.content = child_chunk_update_args.content
  1750. child_chunk.word_count = len(child_chunk.content)
  1751. child_chunk.updated_by = current_user.id
  1752. child_chunk.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
  1753. child_chunk.type = "customized"
  1754. update_child_chunks.append(child_chunk)
  1755. else:
  1756. new_child_chunks_args.append(child_chunk_update_args)
  1757. if child_chunks_map:
  1758. delete_child_chunks = list(child_chunks_map.values())
  1759. try:
  1760. if update_child_chunks:
  1761. db.session.bulk_save_objects(update_child_chunks)
  1762. if delete_child_chunks:
  1763. for child_chunk in delete_child_chunks:
  1764. db.session.delete(child_chunk)
  1765. if new_child_chunks_args:
  1766. child_chunk_count = len(child_chunks)
  1767. for position, args in enumerate(new_child_chunks_args, start=child_chunk_count + 1):
  1768. index_node_id = str(uuid.uuid4())
  1769. index_node_hash = helper.generate_text_hash(args.content)
  1770. child_chunk = ChildChunk(
  1771. tenant_id=current_user.current_tenant_id,
  1772. dataset_id=dataset.id,
  1773. document_id=document.id,
  1774. segment_id=segment.id,
  1775. position=position,
  1776. index_node_id=index_node_id,
  1777. index_node_hash=index_node_hash,
  1778. content=args.content,
  1779. word_count=len(args.content),
  1780. type="customized",
  1781. created_by=current_user.id,
  1782. )
  1783. db.session.add(child_chunk)
  1784. db.session.flush()
  1785. new_child_chunks.append(child_chunk)
  1786. VectorService.update_child_chunk_vector(new_child_chunks, update_child_chunks, delete_child_chunks, dataset)
  1787. db.session.commit()
  1788. except Exception as e:
  1789. logging.exception("update child chunk index failed")
  1790. db.session.rollback()
  1791. raise ChildChunkIndexingError(str(e))
  1792. return sorted(new_child_chunks + update_child_chunks, key=lambda x: x.position)
  1793. @classmethod
  1794. def update_child_chunk(
  1795. cls,
  1796. content: str,
  1797. child_chunk: ChildChunk,
  1798. segment: DocumentSegment,
  1799. document: Document,
  1800. dataset: Dataset,
  1801. ) -> ChildChunk:
  1802. try:
  1803. child_chunk.content = content
  1804. child_chunk.word_count = len(content)
  1805. child_chunk.updated_by = current_user.id
  1806. child_chunk.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
  1807. child_chunk.type = "customized"
  1808. db.session.add(child_chunk)
  1809. VectorService.update_child_chunk_vector([], [child_chunk], [], dataset)
  1810. db.session.commit()
  1811. except Exception as e:
  1812. logging.exception("update child chunk index failed")
  1813. db.session.rollback()
  1814. raise ChildChunkIndexingError(str(e))
  1815. return child_chunk
  1816. @classmethod
  1817. def delete_child_chunk(cls, child_chunk: ChildChunk, dataset: Dataset):
  1818. db.session.delete(child_chunk)
  1819. try:
  1820. VectorService.delete_child_chunk_vector(child_chunk, dataset)
  1821. except Exception as e:
  1822. logging.exception("delete child chunk index failed")
  1823. db.session.rollback()
  1824. raise ChildChunkDeleteIndexError(str(e))
  1825. db.session.commit()
  1826. @classmethod
  1827. def get_child_chunks(
  1828. cls, segment_id: str, document_id: str, dataset_id: str, page: int, limit: int, keyword: Optional[str] = None
  1829. ):
  1830. query = ChildChunk.query.filter_by(
  1831. tenant_id=current_user.current_tenant_id,
  1832. dataset_id=dataset_id,
  1833. document_id=document_id,
  1834. segment_id=segment_id,
  1835. ).order_by(ChildChunk.position.asc())
  1836. if keyword:
  1837. query = query.where(ChildChunk.content.ilike(f"%{keyword}%"))
  1838. return query.paginate(page=page, per_page=limit, max_per_page=100, error_out=False)
  1839. class DatasetCollectionBindingService:
  1840. @classmethod
  1841. def get_dataset_collection_binding(
  1842. cls, provider_name: str, model_name: str, collection_type: str = "dataset"
  1843. ) -> DatasetCollectionBinding:
  1844. dataset_collection_binding = (
  1845. db.session.query(DatasetCollectionBinding)
  1846. .filter(
  1847. DatasetCollectionBinding.provider_name == provider_name,
  1848. DatasetCollectionBinding.model_name == model_name,
  1849. DatasetCollectionBinding.type == collection_type,
  1850. )
  1851. .order_by(DatasetCollectionBinding.created_at)
  1852. .first()
  1853. )
  1854. if not dataset_collection_binding:
  1855. dataset_collection_binding = DatasetCollectionBinding(
  1856. provider_name=provider_name,
  1857. model_name=model_name,
  1858. collection_name=Dataset.gen_collection_name_by_id(str(uuid.uuid4())),
  1859. type=collection_type,
  1860. )
  1861. db.session.add(dataset_collection_binding)
  1862. db.session.commit()
  1863. return dataset_collection_binding
  1864. @classmethod
  1865. def get_dataset_collection_binding_by_id_and_type(
  1866. cls, collection_binding_id: str, collection_type: str = "dataset"
  1867. ) -> DatasetCollectionBinding:
  1868. dataset_collection_binding = (
  1869. db.session.query(DatasetCollectionBinding)
  1870. .filter(
  1871. DatasetCollectionBinding.id == collection_binding_id, DatasetCollectionBinding.type == collection_type
  1872. )
  1873. .order_by(DatasetCollectionBinding.created_at)
  1874. .first()
  1875. )
  1876. if not dataset_collection_binding:
  1877. raise ValueError("Dataset collection binding not found")
  1878. return dataset_collection_binding
  1879. class DatasetPermissionService:
  1880. @classmethod
  1881. def get_dataset_partial_member_list(cls, dataset_id):
  1882. user_list_query = (
  1883. db.session.query(
  1884. DatasetPermission.account_id,
  1885. )
  1886. .filter(DatasetPermission.dataset_id == dataset_id)
  1887. .all()
  1888. )
  1889. user_list = []
  1890. for user in user_list_query:
  1891. user_list.append(user.account_id)
  1892. return user_list
  1893. @classmethod
  1894. def update_partial_member_list(cls, tenant_id, dataset_id, user_list):
  1895. try:
  1896. db.session.query(DatasetPermission).filter(DatasetPermission.dataset_id == dataset_id).delete()
  1897. permissions = []
  1898. for user in user_list:
  1899. permission = DatasetPermission(
  1900. tenant_id=tenant_id,
  1901. dataset_id=dataset_id,
  1902. account_id=user["user_id"],
  1903. )
  1904. permissions.append(permission)
  1905. db.session.add_all(permissions)
  1906. db.session.commit()
  1907. except Exception as e:
  1908. db.session.rollback()
  1909. raise e
  1910. @classmethod
  1911. def check_permission(cls, user, dataset, requested_permission, requested_partial_member_list):
  1912. if not user.is_dataset_editor:
  1913. raise NoPermissionError("User does not have permission to edit this dataset.")
  1914. if user.is_dataset_operator and dataset.permission != requested_permission:
  1915. raise NoPermissionError("Dataset operators cannot change the dataset permissions.")
  1916. if user.is_dataset_operator and requested_permission == "partial_members":
  1917. if not requested_partial_member_list:
  1918. raise ValueError("Partial member list is required when setting to partial members.")
  1919. local_member_list = cls.get_dataset_partial_member_list(dataset.id)
  1920. request_member_list = [user["user_id"] for user in requested_partial_member_list]
  1921. if set(local_member_list) != set(request_member_list):
  1922. raise ValueError("Dataset operators cannot change the dataset permissions.")
  1923. @classmethod
  1924. def clear_partial_member_list(cls, dataset_id):
  1925. try:
  1926. db.session.query(DatasetPermission).filter(DatasetPermission.dataset_id == dataset_id).delete()
  1927. db.session.commit()
  1928. except Exception as e:
  1929. db.session.rollback()
  1930. raise e