You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

document_indexing_sync_task.py 4.7KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. import datetime
  2. import logging
  3. import time
  4. import click
  5. from celery import shared_task # type: ignore
  6. from core.indexing_runner import DocumentIsPausedError, IndexingRunner
  7. from core.rag.extractor.notion_extractor import NotionExtractor
  8. from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
  9. from extensions.ext_database import db
  10. from models.dataset import Dataset, Document, DocumentSegment
  11. from models.source import DataSourceOauthBinding
  12. @shared_task(queue="dataset")
  13. def document_indexing_sync_task(dataset_id: str, document_id: str):
  14. """
  15. Async update document
  16. :param dataset_id:
  17. :param document_id:
  18. Usage: document_indexing_sync_task.delay(dataset_id, document_id)
  19. """
  20. logging.info(click.style("Start sync document: {}".format(document_id), fg="green"))
  21. start_at = time.perf_counter()
  22. document = db.session.query(Document).filter(Document.id == document_id, Document.dataset_id == dataset_id).first()
  23. if not document:
  24. logging.info(click.style("Document not found: {}".format(document_id), fg="red"))
  25. db.session.close()
  26. return
  27. data_source_info = document.data_source_info_dict
  28. if document.data_source_type == "notion_import":
  29. if (
  30. not data_source_info
  31. or "notion_page_id" not in data_source_info
  32. or "notion_workspace_id" not in data_source_info
  33. ):
  34. raise ValueError("no notion page found")
  35. workspace_id = data_source_info["notion_workspace_id"]
  36. page_id = data_source_info["notion_page_id"]
  37. page_type = data_source_info["type"]
  38. page_edited_time = data_source_info["last_edited_time"]
  39. data_source_binding = (
  40. db.session.query(DataSourceOauthBinding)
  41. .filter(
  42. db.and_(
  43. DataSourceOauthBinding.tenant_id == document.tenant_id,
  44. DataSourceOauthBinding.provider == "notion",
  45. DataSourceOauthBinding.disabled == False,
  46. DataSourceOauthBinding.source_info["workspace_id"] == f'"{workspace_id}"',
  47. )
  48. )
  49. .first()
  50. )
  51. if not data_source_binding:
  52. raise ValueError("Data source binding not found.")
  53. loader = NotionExtractor(
  54. notion_workspace_id=workspace_id,
  55. notion_obj_id=page_id,
  56. notion_page_type=page_type,
  57. notion_access_token=data_source_binding.access_token,
  58. tenant_id=document.tenant_id,
  59. )
  60. last_edited_time = loader.get_notion_last_edited_time()
  61. # check the page is updated
  62. if last_edited_time != page_edited_time:
  63. document.indexing_status = "parsing"
  64. document.processing_started_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
  65. db.session.commit()
  66. # delete all document segment and index
  67. try:
  68. dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
  69. if not dataset:
  70. raise Exception("Dataset not found")
  71. index_type = document.doc_form
  72. index_processor = IndexProcessorFactory(index_type).init_index_processor()
  73. segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document_id).all()
  74. index_node_ids = [segment.index_node_id for segment in segments]
  75. # delete from vector index
  76. index_processor.clean(dataset, index_node_ids, with_keywords=True, delete_child_chunks=True)
  77. for segment in segments:
  78. db.session.delete(segment)
  79. end_at = time.perf_counter()
  80. logging.info(
  81. click.style(
  82. "Cleaned document when document update data source or process rule: {} latency: {}".format(
  83. document_id, end_at - start_at
  84. ),
  85. fg="green",
  86. )
  87. )
  88. except Exception:
  89. logging.exception("Cleaned document when document update data source or process rule failed")
  90. try:
  91. indexing_runner = IndexingRunner()
  92. indexing_runner.run([document])
  93. end_at = time.perf_counter()
  94. logging.info(
  95. click.style("update document: {} latency: {}".format(document.id, end_at - start_at), fg="green")
  96. )
  97. except DocumentIsPausedError as ex:
  98. logging.info(click.style(str(ex), fg="yellow"))
  99. except Exception:
  100. logging.exception("document_indexing_sync_task failed, document_id: {}".format(document_id))