You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

indexing_runner.py 33KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756
  1. import concurrent.futures
  2. import json
  3. import logging
  4. import re
  5. import threading
  6. import time
  7. import uuid
  8. from typing import Any, Optional, cast
  9. from flask import current_app
  10. from sqlalchemy.orm.exc import ObjectDeletedError
  11. from configs import dify_config
  12. from core.entities.knowledge_entities import IndexingEstimate, PreviewDetail, QAPreviewDetail
  13. from core.errors.error import ProviderTokenNotInitError
  14. from core.model_manager import ModelInstance, ModelManager
  15. from core.model_runtime.entities.model_entities import ModelType
  16. from core.rag.cleaner.clean_processor import CleanProcessor
  17. from core.rag.datasource.keyword.keyword_factory import Keyword
  18. from core.rag.docstore.dataset_docstore import DatasetDocumentStore
  19. from core.rag.extractor.entity.extract_setting import ExtractSetting
  20. from core.rag.index_processor.constant.index_type import IndexType
  21. from core.rag.index_processor.index_processor_base import BaseIndexProcessor
  22. from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
  23. from core.rag.models.document import ChildDocument, Document
  24. from core.rag.splitter.fixed_text_splitter import (
  25. EnhanceRecursiveCharacterTextSplitter,
  26. FixedRecursiveCharacterTextSplitter,
  27. )
  28. from core.rag.splitter.text_splitter import TextSplitter
  29. from core.tools.utils.web_reader_tool import get_image_upload_file_ids
  30. from extensions.ext_database import db
  31. from extensions.ext_redis import redis_client
  32. from extensions.ext_storage import storage
  33. from libs import helper
  34. from libs.datetime_utils import naive_utc_now
  35. from models.dataset import ChildChunk, Dataset, DatasetProcessRule, DocumentSegment
  36. from models.dataset import Document as DatasetDocument
  37. from models.model import UploadFile
  38. from services.feature_service import FeatureService
  39. class IndexingRunner:
  40. def __init__(self):
  41. self.storage = storage
  42. self.model_manager = ModelManager()
  43. def run(self, dataset_documents: list[DatasetDocument]):
  44. """Run the indexing process."""
  45. for dataset_document in dataset_documents:
  46. try:
  47. # get dataset
  48. dataset = db.session.query(Dataset).filter_by(id=dataset_document.dataset_id).first()
  49. if not dataset:
  50. raise ValueError("no dataset found")
  51. # get the process rule
  52. processing_rule = (
  53. db.session.query(DatasetProcessRule)
  54. .where(DatasetProcessRule.id == dataset_document.dataset_process_rule_id)
  55. .first()
  56. )
  57. if not processing_rule:
  58. raise ValueError("no process rule found")
  59. index_type = dataset_document.doc_form
  60. index_processor = IndexProcessorFactory(index_type).init_index_processor()
  61. # extract
  62. text_docs = self._extract(index_processor, dataset_document, processing_rule.to_dict())
  63. # transform
  64. documents = self._transform(
  65. index_processor, dataset, text_docs, dataset_document.doc_language, processing_rule.to_dict()
  66. )
  67. # save segment
  68. self._load_segments(dataset, dataset_document, documents)
  69. # load
  70. self._load(
  71. index_processor=index_processor,
  72. dataset=dataset,
  73. dataset_document=dataset_document,
  74. documents=documents,
  75. )
  76. except DocumentIsPausedError:
  77. raise DocumentIsPausedError(f"Document paused, document id: {dataset_document.id}")
  78. except ProviderTokenNotInitError as e:
  79. dataset_document.indexing_status = "error"
  80. dataset_document.error = str(e.description)
  81. dataset_document.stopped_at = naive_utc_now()
  82. db.session.commit()
  83. except ObjectDeletedError:
  84. logging.warning("Document deleted, document id: %s", dataset_document.id)
  85. except Exception as e:
  86. logging.exception("consume document failed")
  87. dataset_document.indexing_status = "error"
  88. dataset_document.error = str(e)
  89. dataset_document.stopped_at = naive_utc_now()
  90. db.session.commit()
  91. def run_in_splitting_status(self, dataset_document: DatasetDocument):
  92. """Run the indexing process when the index_status is splitting."""
  93. try:
  94. # get dataset
  95. dataset = db.session.query(Dataset).filter_by(id=dataset_document.dataset_id).first()
  96. if not dataset:
  97. raise ValueError("no dataset found")
  98. # get exist document_segment list and delete
  99. document_segments = (
  100. db.session.query(DocumentSegment)
  101. .filter_by(dataset_id=dataset.id, document_id=dataset_document.id)
  102. .all()
  103. )
  104. for document_segment in document_segments:
  105. db.session.delete(document_segment)
  106. if dataset_document.doc_form == IndexType.PARENT_CHILD_INDEX:
  107. # delete child chunks
  108. db.session.query(ChildChunk).where(ChildChunk.segment_id == document_segment.id).delete()
  109. db.session.commit()
  110. # get the process rule
  111. processing_rule = (
  112. db.session.query(DatasetProcessRule)
  113. .where(DatasetProcessRule.id == dataset_document.dataset_process_rule_id)
  114. .first()
  115. )
  116. if not processing_rule:
  117. raise ValueError("no process rule found")
  118. index_type = dataset_document.doc_form
  119. index_processor = IndexProcessorFactory(index_type).init_index_processor()
  120. # extract
  121. text_docs = self._extract(index_processor, dataset_document, processing_rule.to_dict())
  122. # transform
  123. documents = self._transform(
  124. index_processor, dataset, text_docs, dataset_document.doc_language, processing_rule.to_dict()
  125. )
  126. # save segment
  127. self._load_segments(dataset, dataset_document, documents)
  128. # load
  129. self._load(
  130. index_processor=index_processor, dataset=dataset, dataset_document=dataset_document, documents=documents
  131. )
  132. except DocumentIsPausedError:
  133. raise DocumentIsPausedError(f"Document paused, document id: {dataset_document.id}")
  134. except ProviderTokenNotInitError as e:
  135. dataset_document.indexing_status = "error"
  136. dataset_document.error = str(e.description)
  137. dataset_document.stopped_at = naive_utc_now()
  138. db.session.commit()
  139. except Exception as e:
  140. logging.exception("consume document failed")
  141. dataset_document.indexing_status = "error"
  142. dataset_document.error = str(e)
  143. dataset_document.stopped_at = naive_utc_now()
  144. db.session.commit()
  145. def run_in_indexing_status(self, dataset_document: DatasetDocument):
  146. """Run the indexing process when the index_status is indexing."""
  147. try:
  148. # get dataset
  149. dataset = db.session.query(Dataset).filter_by(id=dataset_document.dataset_id).first()
  150. if not dataset:
  151. raise ValueError("no dataset found")
  152. # get exist document_segment list and delete
  153. document_segments = (
  154. db.session.query(DocumentSegment)
  155. .filter_by(dataset_id=dataset.id, document_id=dataset_document.id)
  156. .all()
  157. )
  158. documents = []
  159. if document_segments:
  160. for document_segment in document_segments:
  161. # transform segment to node
  162. if document_segment.status != "completed":
  163. document = Document(
  164. page_content=document_segment.content,
  165. metadata={
  166. "doc_id": document_segment.index_node_id,
  167. "doc_hash": document_segment.index_node_hash,
  168. "document_id": document_segment.document_id,
  169. "dataset_id": document_segment.dataset_id,
  170. },
  171. )
  172. if dataset_document.doc_form == IndexType.PARENT_CHILD_INDEX:
  173. child_chunks = document_segment.get_child_chunks()
  174. if child_chunks:
  175. child_documents = []
  176. for child_chunk in child_chunks:
  177. child_document = ChildDocument(
  178. page_content=child_chunk.content,
  179. metadata={
  180. "doc_id": child_chunk.index_node_id,
  181. "doc_hash": child_chunk.index_node_hash,
  182. "document_id": document_segment.document_id,
  183. "dataset_id": document_segment.dataset_id,
  184. },
  185. )
  186. child_documents.append(child_document)
  187. document.children = child_documents
  188. documents.append(document)
  189. # build index
  190. # get the process rule
  191. processing_rule = (
  192. db.session.query(DatasetProcessRule)
  193. .where(DatasetProcessRule.id == dataset_document.dataset_process_rule_id)
  194. .first()
  195. )
  196. index_type = dataset_document.doc_form
  197. index_processor = IndexProcessorFactory(index_type).init_index_processor()
  198. self._load(
  199. index_processor=index_processor, dataset=dataset, dataset_document=dataset_document, documents=documents
  200. )
  201. except DocumentIsPausedError:
  202. raise DocumentIsPausedError(f"Document paused, document id: {dataset_document.id}")
  203. except ProviderTokenNotInitError as e:
  204. dataset_document.indexing_status = "error"
  205. dataset_document.error = str(e.description)
  206. dataset_document.stopped_at = naive_utc_now()
  207. db.session.commit()
  208. except Exception as e:
  209. logging.exception("consume document failed")
  210. dataset_document.indexing_status = "error"
  211. dataset_document.error = str(e)
  212. dataset_document.stopped_at = naive_utc_now()
  213. db.session.commit()
  214. def indexing_estimate(
  215. self,
  216. tenant_id: str,
  217. extract_settings: list[ExtractSetting],
  218. tmp_processing_rule: dict,
  219. doc_form: Optional[str] = None,
  220. doc_language: str = "English",
  221. dataset_id: Optional[str] = None,
  222. indexing_technique: str = "economy",
  223. ) -> IndexingEstimate:
  224. """
  225. Estimate the indexing for the document.
  226. """
  227. # check document limit
  228. features = FeatureService.get_features(tenant_id)
  229. if features.billing.enabled:
  230. count = len(extract_settings)
  231. batch_upload_limit = dify_config.BATCH_UPLOAD_LIMIT
  232. if count > batch_upload_limit:
  233. raise ValueError(f"You have reached the batch upload limit of {batch_upload_limit}.")
  234. embedding_model_instance = None
  235. if dataset_id:
  236. dataset = db.session.query(Dataset).filter_by(id=dataset_id).first()
  237. if not dataset:
  238. raise ValueError("Dataset not found.")
  239. if dataset.indexing_technique == "high_quality" or indexing_technique == "high_quality":
  240. if dataset.embedding_model_provider:
  241. embedding_model_instance = self.model_manager.get_model_instance(
  242. tenant_id=tenant_id,
  243. provider=dataset.embedding_model_provider,
  244. model_type=ModelType.TEXT_EMBEDDING,
  245. model=dataset.embedding_model,
  246. )
  247. else:
  248. embedding_model_instance = self.model_manager.get_default_model_instance(
  249. tenant_id=tenant_id,
  250. model_type=ModelType.TEXT_EMBEDDING,
  251. )
  252. else:
  253. if indexing_technique == "high_quality":
  254. embedding_model_instance = self.model_manager.get_default_model_instance(
  255. tenant_id=tenant_id,
  256. model_type=ModelType.TEXT_EMBEDDING,
  257. )
  258. preview_texts = [] # type: ignore
  259. total_segments = 0
  260. index_type = doc_form
  261. index_processor = IndexProcessorFactory(index_type).init_index_processor()
  262. for extract_setting in extract_settings:
  263. # extract
  264. processing_rule = DatasetProcessRule(
  265. mode=tmp_processing_rule["mode"], rules=json.dumps(tmp_processing_rule["rules"])
  266. )
  267. text_docs = index_processor.extract(extract_setting, process_rule_mode=tmp_processing_rule["mode"])
  268. documents = index_processor.transform(
  269. text_docs,
  270. embedding_model_instance=embedding_model_instance,
  271. process_rule=processing_rule.to_dict(),
  272. tenant_id=tenant_id,
  273. doc_language=doc_language,
  274. preview=True,
  275. )
  276. total_segments += len(documents)
  277. for document in documents:
  278. if len(preview_texts) < 10:
  279. if doc_form and doc_form == "qa_model":
  280. preview_detail = QAPreviewDetail(
  281. question=document.page_content, answer=document.metadata.get("answer") or ""
  282. )
  283. preview_texts.append(preview_detail)
  284. else:
  285. preview_detail = PreviewDetail(content=document.page_content) # type: ignore
  286. if document.children:
  287. preview_detail.child_chunks = [child.page_content for child in document.children] # type: ignore
  288. preview_texts.append(preview_detail)
  289. # delete image files and related db records
  290. image_upload_file_ids = get_image_upload_file_ids(document.page_content)
  291. for upload_file_id in image_upload_file_ids:
  292. image_file = db.session.query(UploadFile).where(UploadFile.id == upload_file_id).first()
  293. if image_file is None:
  294. continue
  295. try:
  296. storage.delete(image_file.key)
  297. except Exception:
  298. logging.exception(
  299. "Delete image_files failed while indexing_estimate, \
  300. image_upload_file_is: %s",
  301. upload_file_id,
  302. )
  303. db.session.delete(image_file)
  304. if doc_form and doc_form == "qa_model":
  305. return IndexingEstimate(total_segments=total_segments * 20, qa_preview=preview_texts, preview=[])
  306. return IndexingEstimate(total_segments=total_segments, preview=preview_texts) # type: ignore
  307. def _extract(
  308. self, index_processor: BaseIndexProcessor, dataset_document: DatasetDocument, process_rule: dict
  309. ) -> list[Document]:
  310. # load file
  311. if dataset_document.data_source_type not in {"upload_file", "notion_import", "website_crawl"}:
  312. return []
  313. data_source_info = dataset_document.data_source_info_dict
  314. text_docs = []
  315. if dataset_document.data_source_type == "upload_file":
  316. if not data_source_info or "upload_file_id" not in data_source_info:
  317. raise ValueError("no upload file found")
  318. file_detail = (
  319. db.session.query(UploadFile).where(UploadFile.id == data_source_info["upload_file_id"]).one_or_none()
  320. )
  321. if file_detail:
  322. extract_setting = ExtractSetting(
  323. datasource_type="upload_file", upload_file=file_detail, document_model=dataset_document.doc_form
  324. )
  325. text_docs = index_processor.extract(extract_setting, process_rule_mode=process_rule["mode"])
  326. elif dataset_document.data_source_type == "notion_import":
  327. if (
  328. not data_source_info
  329. or "notion_workspace_id" not in data_source_info
  330. or "notion_page_id" not in data_source_info
  331. ):
  332. raise ValueError("no notion import info found")
  333. extract_setting = ExtractSetting(
  334. datasource_type="notion_import",
  335. notion_info={
  336. "notion_workspace_id": data_source_info["notion_workspace_id"],
  337. "notion_obj_id": data_source_info["notion_page_id"],
  338. "notion_page_type": data_source_info["type"],
  339. "document": dataset_document,
  340. "tenant_id": dataset_document.tenant_id,
  341. },
  342. document_model=dataset_document.doc_form,
  343. )
  344. text_docs = index_processor.extract(extract_setting, process_rule_mode=process_rule["mode"])
  345. elif dataset_document.data_source_type == "website_crawl":
  346. if (
  347. not data_source_info
  348. or "provider" not in data_source_info
  349. or "url" not in data_source_info
  350. or "job_id" not in data_source_info
  351. ):
  352. raise ValueError("no website import info found")
  353. extract_setting = ExtractSetting(
  354. datasource_type="website_crawl",
  355. website_info={
  356. "provider": data_source_info["provider"],
  357. "job_id": data_source_info["job_id"],
  358. "tenant_id": dataset_document.tenant_id,
  359. "url": data_source_info["url"],
  360. "mode": data_source_info["mode"],
  361. "only_main_content": data_source_info["only_main_content"],
  362. },
  363. document_model=dataset_document.doc_form,
  364. )
  365. text_docs = index_processor.extract(extract_setting, process_rule_mode=process_rule["mode"])
  366. # update document status to splitting
  367. self._update_document_index_status(
  368. document_id=dataset_document.id,
  369. after_indexing_status="splitting",
  370. extra_update_params={
  371. DatasetDocument.word_count: sum(len(text_doc.page_content) for text_doc in text_docs),
  372. DatasetDocument.parsing_completed_at: naive_utc_now(),
  373. },
  374. )
  375. # replace doc id to document model id
  376. text_docs = cast(list[Document], text_docs)
  377. for text_doc in text_docs:
  378. if text_doc.metadata is not None:
  379. text_doc.metadata["document_id"] = dataset_document.id
  380. text_doc.metadata["dataset_id"] = dataset_document.dataset_id
  381. return text_docs
  382. @staticmethod
  383. def filter_string(text):
  384. text = re.sub(r"<\|", "<", text)
  385. text = re.sub(r"\|>", ">", text)
  386. text = re.sub(r"[\x00-\x08\x0B\x0C\x0E-\x1F\x7F\xEF\xBF\xBE]", "", text)
  387. # Unicode U+FFFE
  388. text = re.sub("\ufffe", "", text)
  389. return text
  390. @staticmethod
  391. def _get_splitter(
  392. processing_rule_mode: str,
  393. max_tokens: int,
  394. chunk_overlap: int,
  395. separator: str,
  396. embedding_model_instance: Optional[ModelInstance],
  397. ) -> TextSplitter:
  398. """
  399. Get the NodeParser object according to the processing rule.
  400. """
  401. if processing_rule_mode in ["custom", "hierarchical"]:
  402. # The user-defined segmentation rule
  403. max_segmentation_tokens_length = dify_config.INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH
  404. if max_tokens < 50 or max_tokens > max_segmentation_tokens_length:
  405. raise ValueError(f"Custom segment length should be between 50 and {max_segmentation_tokens_length}.")
  406. if separator:
  407. separator = separator.replace("\\n", "\n")
  408. character_splitter = FixedRecursiveCharacterTextSplitter.from_encoder(
  409. chunk_size=max_tokens,
  410. chunk_overlap=chunk_overlap,
  411. fixed_separator=separator,
  412. separators=["\n\n", "。", ". ", " ", ""],
  413. embedding_model_instance=embedding_model_instance,
  414. )
  415. else:
  416. # Automatic segmentation
  417. automatic_rules: dict[str, Any] = dict(DatasetProcessRule.AUTOMATIC_RULES["segmentation"])
  418. character_splitter = EnhanceRecursiveCharacterTextSplitter.from_encoder(
  419. chunk_size=automatic_rules["max_tokens"],
  420. chunk_overlap=automatic_rules["chunk_overlap"],
  421. separators=["\n\n", "。", ". ", " ", ""],
  422. embedding_model_instance=embedding_model_instance,
  423. )
  424. return character_splitter # type: ignore
  425. def _split_to_documents_for_estimate(
  426. self, text_docs: list[Document], splitter: TextSplitter, processing_rule: DatasetProcessRule
  427. ) -> list[Document]:
  428. """
  429. Split the text documents into nodes.
  430. """
  431. all_documents: list[Document] = []
  432. for text_doc in text_docs:
  433. # document clean
  434. document_text = self._document_clean(text_doc.page_content, processing_rule)
  435. text_doc.page_content = document_text
  436. # parse document to nodes
  437. documents = splitter.split_documents([text_doc])
  438. split_documents = []
  439. for document in documents:
  440. if document.page_content is None or not document.page_content.strip():
  441. continue
  442. if document.metadata is not None:
  443. doc_id = str(uuid.uuid4())
  444. hash = helper.generate_text_hash(document.page_content)
  445. document.metadata["doc_id"] = doc_id
  446. document.metadata["doc_hash"] = hash
  447. split_documents.append(document)
  448. all_documents.extend(split_documents)
  449. return all_documents
  450. @staticmethod
  451. def _document_clean(text: str, processing_rule: DatasetProcessRule) -> str:
  452. """
  453. Clean the document text according to the processing rules.
  454. """
  455. if processing_rule.mode == "automatic":
  456. rules = DatasetProcessRule.AUTOMATIC_RULES
  457. else:
  458. rules = json.loads(processing_rule.rules) if processing_rule.rules else {}
  459. document_text = CleanProcessor.clean(text, {"rules": rules})
  460. return document_text
  461. @staticmethod
  462. def format_split_text(text: str) -> list[QAPreviewDetail]:
  463. regex = r"Q\d+:\s*(.*?)\s*A\d+:\s*([\s\S]*?)(?=Q\d+:|$)"
  464. matches = re.findall(regex, text, re.UNICODE)
  465. return [QAPreviewDetail(question=q, answer=re.sub(r"\n\s*", "\n", a.strip())) for q, a in matches if q and a]
  466. def _load(
  467. self,
  468. index_processor: BaseIndexProcessor,
  469. dataset: Dataset,
  470. dataset_document: DatasetDocument,
  471. documents: list[Document],
  472. ) -> None:
  473. """
  474. insert index and update document/segment status to completed
  475. """
  476. embedding_model_instance = None
  477. if dataset.indexing_technique == "high_quality":
  478. embedding_model_instance = self.model_manager.get_model_instance(
  479. tenant_id=dataset.tenant_id,
  480. provider=dataset.embedding_model_provider,
  481. model_type=ModelType.TEXT_EMBEDDING,
  482. model=dataset.embedding_model,
  483. )
  484. # chunk nodes by chunk size
  485. indexing_start_at = time.perf_counter()
  486. tokens = 0
  487. if dataset_document.doc_form != IndexType.PARENT_CHILD_INDEX and dataset.indexing_technique == "economy":
  488. # create keyword index
  489. create_keyword_thread = threading.Thread(
  490. target=self._process_keyword_index,
  491. args=(current_app._get_current_object(), dataset.id, dataset_document.id, documents), # type: ignore
  492. )
  493. create_keyword_thread.start()
  494. max_workers = 10
  495. if dataset.indexing_technique == "high_quality":
  496. with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
  497. futures = []
  498. # Distribute documents into multiple groups based on the hash values of page_content
  499. # This is done to prevent multiple threads from processing the same document,
  500. # Thereby avoiding potential database insertion deadlocks
  501. document_groups: list[list[Document]] = [[] for _ in range(max_workers)]
  502. for document in documents:
  503. hash = helper.generate_text_hash(document.page_content)
  504. group_index = int(hash, 16) % max_workers
  505. document_groups[group_index].append(document)
  506. for chunk_documents in document_groups:
  507. if len(chunk_documents) == 0:
  508. continue
  509. futures.append(
  510. executor.submit(
  511. self._process_chunk,
  512. current_app._get_current_object(), # type: ignore
  513. index_processor,
  514. chunk_documents,
  515. dataset,
  516. dataset_document,
  517. embedding_model_instance,
  518. )
  519. )
  520. for future in futures:
  521. tokens += future.result()
  522. if dataset_document.doc_form != IndexType.PARENT_CHILD_INDEX and dataset.indexing_technique == "economy":
  523. create_keyword_thread.join()
  524. indexing_end_at = time.perf_counter()
  525. # update document status to completed
  526. self._update_document_index_status(
  527. document_id=dataset_document.id,
  528. after_indexing_status="completed",
  529. extra_update_params={
  530. DatasetDocument.tokens: tokens,
  531. DatasetDocument.completed_at: naive_utc_now(),
  532. DatasetDocument.indexing_latency: indexing_end_at - indexing_start_at,
  533. DatasetDocument.error: None,
  534. },
  535. )
  536. @staticmethod
  537. def _process_keyword_index(flask_app, dataset_id, document_id, documents):
  538. with flask_app.app_context():
  539. dataset = db.session.query(Dataset).filter_by(id=dataset_id).first()
  540. if not dataset:
  541. raise ValueError("no dataset found")
  542. keyword = Keyword(dataset)
  543. keyword.create(documents)
  544. if dataset.indexing_technique != "high_quality":
  545. document_ids = [document.metadata["doc_id"] for document in documents]
  546. db.session.query(DocumentSegment).where(
  547. DocumentSegment.document_id == document_id,
  548. DocumentSegment.dataset_id == dataset_id,
  549. DocumentSegment.index_node_id.in_(document_ids),
  550. DocumentSegment.status == "indexing",
  551. ).update(
  552. {
  553. DocumentSegment.status: "completed",
  554. DocumentSegment.enabled: True,
  555. DocumentSegment.completed_at: naive_utc_now(),
  556. }
  557. )
  558. db.session.commit()
  559. def _process_chunk(
  560. self, flask_app, index_processor, chunk_documents, dataset, dataset_document, embedding_model_instance
  561. ):
  562. with flask_app.app_context():
  563. # check document is paused
  564. self._check_document_paused_status(dataset_document.id)
  565. tokens = 0
  566. if embedding_model_instance:
  567. page_content_list = [document.page_content for document in chunk_documents]
  568. tokens += sum(embedding_model_instance.get_text_embedding_num_tokens(page_content_list))
  569. # load index
  570. index_processor.load(dataset, chunk_documents, with_keywords=False)
  571. document_ids = [document.metadata["doc_id"] for document in chunk_documents]
  572. db.session.query(DocumentSegment).where(
  573. DocumentSegment.document_id == dataset_document.id,
  574. DocumentSegment.dataset_id == dataset.id,
  575. DocumentSegment.index_node_id.in_(document_ids),
  576. DocumentSegment.status == "indexing",
  577. ).update(
  578. {
  579. DocumentSegment.status: "completed",
  580. DocumentSegment.enabled: True,
  581. DocumentSegment.completed_at: naive_utc_now(),
  582. }
  583. )
  584. db.session.commit()
  585. return tokens
  586. @staticmethod
  587. def _check_document_paused_status(document_id: str):
  588. indexing_cache_key = f"document_{document_id}_is_paused"
  589. result = redis_client.get(indexing_cache_key)
  590. if result:
  591. raise DocumentIsPausedError()
  592. @staticmethod
  593. def _update_document_index_status(
  594. document_id: str, after_indexing_status: str, extra_update_params: Optional[dict] = None
  595. ) -> None:
  596. """
  597. Update the document indexing status.
  598. """
  599. count = db.session.query(DatasetDocument).filter_by(id=document_id, is_paused=True).count()
  600. if count > 0:
  601. raise DocumentIsPausedError()
  602. document = db.session.query(DatasetDocument).filter_by(id=document_id).first()
  603. if not document:
  604. raise DocumentIsDeletedPausedError()
  605. update_params = {DatasetDocument.indexing_status: after_indexing_status}
  606. if extra_update_params:
  607. update_params.update(extra_update_params)
  608. db.session.query(DatasetDocument).filter_by(id=document_id).update(update_params) # type: ignore
  609. db.session.commit()
  610. @staticmethod
  611. def _update_segments_by_document(dataset_document_id: str, update_params: dict) -> None:
  612. """
  613. Update the document segment by document id.
  614. """
  615. db.session.query(DocumentSegment).filter_by(document_id=dataset_document_id).update(update_params)
  616. db.session.commit()
  617. def _transform(
  618. self,
  619. index_processor: BaseIndexProcessor,
  620. dataset: Dataset,
  621. text_docs: list[Document],
  622. doc_language: str,
  623. process_rule: dict,
  624. ) -> list[Document]:
  625. # get embedding model instance
  626. embedding_model_instance = None
  627. if dataset.indexing_technique == "high_quality":
  628. if dataset.embedding_model_provider:
  629. embedding_model_instance = self.model_manager.get_model_instance(
  630. tenant_id=dataset.tenant_id,
  631. provider=dataset.embedding_model_provider,
  632. model_type=ModelType.TEXT_EMBEDDING,
  633. model=dataset.embedding_model,
  634. )
  635. else:
  636. embedding_model_instance = self.model_manager.get_default_model_instance(
  637. tenant_id=dataset.tenant_id,
  638. model_type=ModelType.TEXT_EMBEDDING,
  639. )
  640. documents = index_processor.transform(
  641. text_docs,
  642. embedding_model_instance=embedding_model_instance,
  643. process_rule=process_rule,
  644. tenant_id=dataset.tenant_id,
  645. doc_language=doc_language,
  646. )
  647. return documents
  648. def _load_segments(self, dataset, dataset_document, documents):
  649. # save node to document segment
  650. doc_store = DatasetDocumentStore(
  651. dataset=dataset, user_id=dataset_document.created_by, document_id=dataset_document.id
  652. )
  653. # add document segments
  654. doc_store.add_documents(docs=documents, save_child=dataset_document.doc_form == IndexType.PARENT_CHILD_INDEX)
  655. # update document status to indexing
  656. cur_time = naive_utc_now()
  657. self._update_document_index_status(
  658. document_id=dataset_document.id,
  659. after_indexing_status="indexing",
  660. extra_update_params={
  661. DatasetDocument.cleaning_completed_at: cur_time,
  662. DatasetDocument.splitting_completed_at: cur_time,
  663. },
  664. )
  665. # update segment status to indexing
  666. self._update_segments_by_document(
  667. dataset_document_id=dataset_document.id,
  668. update_params={
  669. DocumentSegment.status: "indexing",
  670. DocumentSegment.indexing_at: naive_utc_now(),
  671. },
  672. )
  673. pass
  674. class DocumentIsPausedError(Exception):
  675. pass
  676. class DocumentIsDeletedPausedError(Exception):
  677. pass