Você não pode selecionar mais de 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

sync_website_document_indexing_task.py 3.6KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. import logging
  2. import time
  3. import click
  4. from celery import shared_task
  5. from core.indexing_runner import IndexingRunner
  6. from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
  7. from extensions.ext_database import db
  8. from extensions.ext_redis import redis_client
  9. from libs.datetime_utils import naive_utc_now
  10. from models.dataset import Dataset, Document, DocumentSegment
  11. from services.feature_service import FeatureService
  12. logger = logging.getLogger(__name__)
  13. @shared_task(queue="dataset")
  14. def sync_website_document_indexing_task(dataset_id: str, document_id: str):
  15. """
  16. Async process document
  17. :param dataset_id:
  18. :param document_id:
  19. Usage: sync_website_document_indexing_task.delay(dataset_id, document_id)
  20. """
  21. start_at = time.perf_counter()
  22. dataset = db.session.query(Dataset).where(Dataset.id == dataset_id).first()
  23. if dataset is None:
  24. raise ValueError("Dataset not found")
  25. sync_indexing_cache_key = f"document_{document_id}_is_sync"
  26. # check document limit
  27. features = FeatureService.get_features(dataset.tenant_id)
  28. try:
  29. if features.billing.enabled:
  30. vector_space = features.vector_space
  31. if 0 < vector_space.limit <= vector_space.size:
  32. raise ValueError(
  33. "Your total number of documents plus the number of uploads have over the limit of "
  34. "your subscription."
  35. )
  36. except Exception as e:
  37. document = (
  38. db.session.query(Document).where(Document.id == document_id, Document.dataset_id == dataset_id).first()
  39. )
  40. if document:
  41. document.indexing_status = "error"
  42. document.error = str(e)
  43. document.stopped_at = naive_utc_now()
  44. db.session.add(document)
  45. db.session.commit()
  46. redis_client.delete(sync_indexing_cache_key)
  47. return
  48. logger.info(click.style(f"Start sync website document: {document_id}", fg="green"))
  49. document = db.session.query(Document).where(Document.id == document_id, Document.dataset_id == dataset_id).first()
  50. if not document:
  51. logger.info(click.style(f"Document not found: {document_id}", fg="yellow"))
  52. return
  53. try:
  54. # clean old data
  55. index_processor = IndexProcessorFactory(document.doc_form).init_index_processor()
  56. segments = db.session.query(DocumentSegment).where(DocumentSegment.document_id == document_id).all()
  57. if segments:
  58. index_node_ids = [segment.index_node_id for segment in segments]
  59. # delete from vector index
  60. index_processor.clean(dataset, index_node_ids, with_keywords=True, delete_child_chunks=True)
  61. for segment in segments:
  62. db.session.delete(segment)
  63. db.session.commit()
  64. document.indexing_status = "parsing"
  65. document.processing_started_at = naive_utc_now()
  66. db.session.add(document)
  67. db.session.commit()
  68. indexing_runner = IndexingRunner()
  69. indexing_runner.run([document])
  70. redis_client.delete(sync_indexing_cache_key)
  71. except Exception as ex:
  72. document.indexing_status = "error"
  73. document.error = str(ex)
  74. document.stopped_at = naive_utc_now()
  75. db.session.add(document)
  76. db.session.commit()
  77. logger.info(click.style(str(ex), fg="yellow"))
  78. redis_client.delete(sync_indexing_cache_key)
  79. logger.exception("sync_website_document_indexing_task failed, document_id: %s", document_id)
  80. end_at = time.perf_counter()
  81. logger.info(click.style(f"Sync document: {document_id} latency: {end_at - start_at}", fg="green"))