You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

task_service.py 7.3KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import os
  17. import random
  18. from api.db.db_utils import bulk_insert_into_db
  19. from deepdoc.parser import PdfParser
  20. from peewee import JOIN
  21. from api.db.db_models import DB, File2Document, File
  22. from api.db import StatusEnum, FileType, TaskStatus
  23. from api.db.db_models import Task, Document, Knowledgebase, Tenant
  24. from api.db.services.common_service import CommonService
  25. from api.db.services.document_service import DocumentService
  26. from api.utils import current_timestamp, get_uuid
  27. from deepdoc.parser.excel_parser import RAGFlowExcelParser
  28. from rag.settings import SVR_QUEUE_NAME
  29. from rag.utils.storage_factory import STORAGE_IMPL
  30. from rag.utils.redis_conn import REDIS_CONN
  31. class TaskService(CommonService):
  32. model = Task
  33. @classmethod
  34. @DB.connection_context()
  35. def get_task(cls, task_id):
  36. fields = [
  37. cls.model.id,
  38. cls.model.doc_id,
  39. cls.model.from_page,
  40. cls.model.to_page,
  41. cls.model.retry_count,
  42. Document.kb_id,
  43. Document.parser_id,
  44. Document.parser_config,
  45. Document.name,
  46. Document.type,
  47. Document.location,
  48. Document.size,
  49. Knowledgebase.tenant_id,
  50. Knowledgebase.language,
  51. Knowledgebase.embd_id,
  52. Knowledgebase.pagerank,
  53. Tenant.img2txt_id,
  54. Tenant.asr_id,
  55. Tenant.llm_id,
  56. cls.model.update_time,
  57. ]
  58. docs = (
  59. cls.model.select(*fields)
  60. .join(Document, on=(cls.model.doc_id == Document.id))
  61. .join(Knowledgebase, on=(Document.kb_id == Knowledgebase.id))
  62. .join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))
  63. .where(cls.model.id == task_id)
  64. )
  65. docs = list(docs.dicts())
  66. if not docs:
  67. return None
  68. msg = "\nTask has been received."
  69. prog = random.random() / 10.0
  70. if docs[0]["retry_count"] >= 3:
  71. msg = "\nERROR: Task is abandoned after 3 times attempts."
  72. prog = -1
  73. cls.model.update(
  74. progress_msg=cls.model.progress_msg + msg,
  75. progress=prog,
  76. retry_count=docs[0]["retry_count"] + 1,
  77. ).where(cls.model.id == docs[0]["id"]).execute()
  78. if docs[0]["retry_count"] >= 3:
  79. return None
  80. return docs[0]
  81. @classmethod
  82. @DB.connection_context()
  83. def get_ongoing_doc_name(cls):
  84. with DB.lock("get_task", -1):
  85. docs = (
  86. cls.model.select(
  87. *[Document.id, Document.kb_id, Document.location, File.parent_id]
  88. )
  89. .join(Document, on=(cls.model.doc_id == Document.id))
  90. .join(
  91. File2Document,
  92. on=(File2Document.document_id == Document.id),
  93. join_type=JOIN.LEFT_OUTER,
  94. )
  95. .join(
  96. File,
  97. on=(File2Document.file_id == File.id),
  98. join_type=JOIN.LEFT_OUTER,
  99. )
  100. .where(
  101. Document.status == StatusEnum.VALID.value,
  102. Document.run == TaskStatus.RUNNING.value,
  103. ~(Document.type == FileType.VIRTUAL.value),
  104. cls.model.progress < 1,
  105. cls.model.create_time >= current_timestamp() - 1000 * 600,
  106. )
  107. )
  108. docs = list(docs.dicts())
  109. if not docs:
  110. return []
  111. return list(
  112. set(
  113. [
  114. (
  115. d["parent_id"] if d["parent_id"] else d["kb_id"],
  116. d["location"],
  117. )
  118. for d in docs
  119. ]
  120. )
  121. )
  122. @classmethod
  123. @DB.connection_context()
  124. def do_cancel(cls, id):
  125. try:
  126. task = cls.model.get_by_id(id)
  127. _, doc = DocumentService.get_by_id(task.doc_id)
  128. return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
  129. except Exception:
  130. pass
  131. return False
  132. @classmethod
  133. @DB.connection_context()
  134. def update_progress(cls, id, info):
  135. if os.environ.get("MACOS"):
  136. if info["progress_msg"]:
  137. cls.model.update(
  138. progress_msg=cls.model.progress_msg + "\n" + info["progress_msg"]
  139. ).where(cls.model.id == id).execute()
  140. if "progress" in info:
  141. cls.model.update(progress=info["progress"]).where(
  142. cls.model.id == id
  143. ).execute()
  144. return
  145. with DB.lock("update_progress", -1):
  146. if info["progress_msg"]:
  147. cls.model.update(
  148. progress_msg=cls.model.progress_msg + "\n" + info["progress_msg"]
  149. ).where(cls.model.id == id).execute()
  150. if "progress" in info:
  151. cls.model.update(progress=info["progress"]).where(
  152. cls.model.id == id
  153. ).execute()
  154. def queue_tasks(doc: dict, bucket: str, name: str):
  155. def new_task():
  156. return {"id": get_uuid(), "doc_id": doc["id"]}
  157. tsks = []
  158. if doc["type"] == FileType.PDF.value:
  159. file_bin = STORAGE_IMPL.get(bucket, name)
  160. do_layout = doc["parser_config"].get("layout_recognize", True)
  161. pages = PdfParser.total_page_number(doc["name"], file_bin)
  162. page_size = doc["parser_config"].get("task_page_size", 12)
  163. if doc["parser_id"] == "paper":
  164. page_size = doc["parser_config"].get("task_page_size", 22)
  165. if doc["parser_id"] in ["one", "knowledge_graph"] or not do_layout:
  166. page_size = 10**9
  167. page_ranges = doc["parser_config"].get("pages") or [(1, 10**5)]
  168. for s, e in page_ranges:
  169. s -= 1
  170. s = max(0, s)
  171. e = min(e - 1, pages)
  172. for p in range(s, e, page_size):
  173. task = new_task()
  174. task["from_page"] = p
  175. task["to_page"] = min(p + page_size, e)
  176. tsks.append(task)
  177. elif doc["parser_id"] == "table":
  178. file_bin = STORAGE_IMPL.get(bucket, name)
  179. rn = RAGFlowExcelParser.row_number(doc["name"], file_bin)
  180. for i in range(0, rn, 3000):
  181. task = new_task()
  182. task["from_page"] = i
  183. task["to_page"] = min(i + 3000, rn)
  184. tsks.append(task)
  185. else:
  186. tsks.append(new_task())
  187. bulk_insert_into_db(Task, tsks, True)
  188. DocumentService.begin2parse(doc["id"])
  189. for t in tsks:
  190. assert REDIS_CONN.queue_product(
  191. SVR_QUEUE_NAME, message=t
  192. ), "Can't access Redis. Please check the Redis' status."