Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

clean_unused_datasets_task.py 8.5KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. import datetime
  2. import time
  3. import click
  4. from sqlalchemy import func, select
  5. from werkzeug.exceptions import NotFound
  6. import app
  7. from configs import dify_config
  8. from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
  9. from extensions.ext_database import db
  10. from extensions.ext_redis import redis_client
  11. from models.dataset import Dataset, DatasetAutoDisableLog, DatasetQuery, Document
  12. from services.feature_service import FeatureService
  13. @app.celery.task(queue="dataset")
  14. def clean_unused_datasets_task():
  15. click.echo(click.style("Start clean unused datasets indexes.", fg="green"))
  16. plan_sandbox_clean_day_setting = dify_config.PLAN_SANDBOX_CLEAN_DAY_SETTING
  17. plan_pro_clean_day_setting = dify_config.PLAN_PRO_CLEAN_DAY_SETTING
  18. start_at = time.perf_counter()
  19. plan_sandbox_clean_day = datetime.datetime.now() - datetime.timedelta(days=plan_sandbox_clean_day_setting)
  20. plan_pro_clean_day = datetime.datetime.now() - datetime.timedelta(days=plan_pro_clean_day_setting)
  21. while True:
  22. try:
  23. # Subquery for counting new documents
  24. document_subquery_new = (
  25. db.session.query(Document.dataset_id, func.count(Document.id).label("document_count"))
  26. .filter(
  27. Document.indexing_status == "completed",
  28. Document.enabled == True,
  29. Document.archived == False,
  30. Document.updated_at > plan_sandbox_clean_day,
  31. )
  32. .group_by(Document.dataset_id)
  33. .subquery()
  34. )
  35. # Subquery for counting old documents
  36. document_subquery_old = (
  37. db.session.query(Document.dataset_id, func.count(Document.id).label("document_count"))
  38. .filter(
  39. Document.indexing_status == "completed",
  40. Document.enabled == True,
  41. Document.archived == False,
  42. Document.updated_at < plan_sandbox_clean_day,
  43. )
  44. .group_by(Document.dataset_id)
  45. .subquery()
  46. )
  47. # Main query with join and filter
  48. stmt = (
  49. select(Dataset)
  50. .outerjoin(document_subquery_new, Dataset.id == document_subquery_new.c.dataset_id)
  51. .outerjoin(document_subquery_old, Dataset.id == document_subquery_old.c.dataset_id)
  52. .filter(
  53. Dataset.created_at < plan_sandbox_clean_day,
  54. func.coalesce(document_subquery_new.c.document_count, 0) == 0,
  55. func.coalesce(document_subquery_old.c.document_count, 0) > 0,
  56. )
  57. .order_by(Dataset.created_at.desc())
  58. )
  59. datasets = db.paginate(stmt, page=1, per_page=50)
  60. except NotFound:
  61. break
  62. if datasets.items is None or len(datasets.items) == 0:
  63. break
  64. for dataset in datasets:
  65. dataset_query = (
  66. db.session.query(DatasetQuery)
  67. .filter(DatasetQuery.created_at > plan_sandbox_clean_day, DatasetQuery.dataset_id == dataset.id)
  68. .all()
  69. )
  70. if not dataset_query or len(dataset_query) == 0:
  71. try:
  72. # add auto disable log
  73. documents = (
  74. db.session.query(Document)
  75. .filter(
  76. Document.dataset_id == dataset.id,
  77. Document.enabled == True,
  78. Document.archived == False,
  79. )
  80. .all()
  81. )
  82. for document in documents:
  83. dataset_auto_disable_log = DatasetAutoDisableLog(
  84. tenant_id=dataset.tenant_id,
  85. dataset_id=dataset.id,
  86. document_id=document.id,
  87. )
  88. db.session.add(dataset_auto_disable_log)
  89. # remove index
  90. index_processor = IndexProcessorFactory(dataset.doc_form).init_index_processor()
  91. index_processor.clean(dataset, None)
  92. # update document
  93. update_params = {Document.enabled: False}
  94. db.session.query(Document).filter_by(dataset_id=dataset.id).update(update_params)
  95. db.session.commit()
  96. click.echo(click.style("Cleaned unused dataset {} from db success!".format(dataset.id), fg="green"))
  97. except Exception as e:
  98. click.echo(
  99. click.style("clean dataset index error: {} {}".format(e.__class__.__name__, str(e)), fg="red")
  100. )
  101. while True:
  102. try:
  103. # Subquery for counting new documents
  104. document_subquery_new = (
  105. db.session.query(Document.dataset_id, func.count(Document.id).label("document_count"))
  106. .filter(
  107. Document.indexing_status == "completed",
  108. Document.enabled == True,
  109. Document.archived == False,
  110. Document.updated_at > plan_pro_clean_day,
  111. )
  112. .group_by(Document.dataset_id)
  113. .subquery()
  114. )
  115. # Subquery for counting old documents
  116. document_subquery_old = (
  117. db.session.query(Document.dataset_id, func.count(Document.id).label("document_count"))
  118. .filter(
  119. Document.indexing_status == "completed",
  120. Document.enabled == True,
  121. Document.archived == False,
  122. Document.updated_at < plan_pro_clean_day,
  123. )
  124. .group_by(Document.dataset_id)
  125. .subquery()
  126. )
  127. # Main query with join and filter
  128. stmt = (
  129. select(Dataset)
  130. .outerjoin(document_subquery_new, Dataset.id == document_subquery_new.c.dataset_id)
  131. .outerjoin(document_subquery_old, Dataset.id == document_subquery_old.c.dataset_id)
  132. .filter(
  133. Dataset.created_at < plan_pro_clean_day,
  134. func.coalesce(document_subquery_new.c.document_count, 0) == 0,
  135. func.coalesce(document_subquery_old.c.document_count, 0) > 0,
  136. )
  137. .order_by(Dataset.created_at.desc())
  138. )
  139. datasets = db.paginate(stmt, page=1, per_page=50)
  140. except NotFound:
  141. break
  142. if datasets.items is None or len(datasets.items) == 0:
  143. break
  144. for dataset in datasets:
  145. dataset_query = (
  146. db.session.query(DatasetQuery)
  147. .filter(DatasetQuery.created_at > plan_pro_clean_day, DatasetQuery.dataset_id == dataset.id)
  148. .all()
  149. )
  150. if not dataset_query or len(dataset_query) == 0:
  151. try:
  152. features_cache_key = f"features:{dataset.tenant_id}"
  153. plan_cache = redis_client.get(features_cache_key)
  154. if plan_cache is None:
  155. features = FeatureService.get_features(dataset.tenant_id)
  156. redis_client.setex(features_cache_key, 600, features.billing.subscription.plan)
  157. plan = features.billing.subscription.plan
  158. else:
  159. plan = plan_cache.decode()
  160. if plan == "sandbox":
  161. # remove index
  162. index_processor = IndexProcessorFactory(dataset.doc_form).init_index_processor()
  163. index_processor.clean(dataset, None)
  164. # update document
  165. update_params = {Document.enabled: False}
  166. db.session.query(Document).filter_by(dataset_id=dataset.id).update(update_params)
  167. db.session.commit()
  168. click.echo(
  169. click.style("Cleaned unused dataset {} from db success!".format(dataset.id), fg="green")
  170. )
  171. except Exception as e:
  172. click.echo(
  173. click.style("clean dataset index error: {} {}".format(e.__class__.__name__, str(e)), fg="red")
  174. )
  175. end_at = time.perf_counter()
  176. click.echo(click.style("Cleaned unused dataset from db success latency: {}".format(end_at - start_at), fg="green"))