Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.

task_service.py 11KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import os
  17. import random
  18. import xxhash
  19. from datetime import datetime
  20. from api.db.db_utils import bulk_insert_into_db
  21. from deepdoc.parser import PdfParser
  22. from peewee import JOIN
  23. from api.db.db_models import DB, File2Document, File
  24. from api.db import StatusEnum, FileType, TaskStatus
  25. from api.db.db_models import Task, Document, Knowledgebase, Tenant
  26. from api.db.services.common_service import CommonService
  27. from api.db.services.document_service import DocumentService
  28. from api.utils import current_timestamp, get_uuid
  29. from deepdoc.parser.excel_parser import RAGFlowExcelParser
  30. from rag.settings import SVR_QUEUE_NAME
  31. from rag.utils.storage_factory import STORAGE_IMPL
  32. from rag.utils.redis_conn import REDIS_CONN
  33. from api import settings
  34. from rag.nlp import search
  35. def trim_header_by_lines(text: str, max_length) -> str:
  36. len_text = len(text)
  37. if len_text <= max_length:
  38. return text
  39. for i in range(len_text):
  40. if text[i] == '\n' and len_text - i <= max_length:
  41. return text[i + 1:]
  42. return text
  43. class TaskService(CommonService):
  44. model = Task
  45. @classmethod
  46. @DB.connection_context()
  47. def get_task(cls, task_id):
  48. fields = [
  49. cls.model.id,
  50. cls.model.doc_id,
  51. cls.model.from_page,
  52. cls.model.to_page,
  53. cls.model.retry_count,
  54. Document.kb_id,
  55. Document.parser_id,
  56. Document.parser_config,
  57. Document.name,
  58. Document.type,
  59. Document.location,
  60. Document.size,
  61. Knowledgebase.tenant_id,
  62. Knowledgebase.language,
  63. Knowledgebase.embd_id,
  64. Knowledgebase.pagerank,
  65. Knowledgebase.parser_config.alias("kb_parser_config"),
  66. Tenant.img2txt_id,
  67. Tenant.asr_id,
  68. Tenant.llm_id,
  69. cls.model.update_time,
  70. ]
  71. docs = (
  72. cls.model.select(*fields)
  73. .join(Document, on=(cls.model.doc_id == Document.id))
  74. .join(Knowledgebase, on=(Document.kb_id == Knowledgebase.id))
  75. .join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))
  76. .where(cls.model.id == task_id)
  77. )
  78. docs = list(docs.dicts())
  79. if not docs:
  80. return None
  81. msg = f"\n{datetime.now().strftime('%H:%M:%S')} Task has been received."
  82. prog = random.random() / 10.0
  83. if docs[0]["retry_count"] >= 3:
  84. msg = "\nERROR: Task is abandoned after 3 times attempts."
  85. prog = -1
  86. cls.model.update(
  87. progress_msg=cls.model.progress_msg + msg,
  88. progress=prog,
  89. retry_count=docs[0]["retry_count"] + 1,
  90. ).where(cls.model.id == docs[0]["id"]).execute()
  91. if docs[0]["retry_count"] >= 3:
  92. return None
  93. return docs[0]
  94. @classmethod
  95. @DB.connection_context()
  96. def get_tasks(cls, doc_id: str):
  97. fields = [
  98. cls.model.id,
  99. cls.model.from_page,
  100. cls.model.progress,
  101. cls.model.digest,
  102. cls.model.chunk_ids,
  103. ]
  104. tasks = (
  105. cls.model.select(*fields).order_by(cls.model.from_page.asc(), cls.model.create_time.desc())
  106. .where(cls.model.doc_id == doc_id)
  107. )
  108. tasks = list(tasks.dicts())
  109. if not tasks:
  110. return None
  111. return tasks
  112. @classmethod
  113. @DB.connection_context()
  114. def update_chunk_ids(cls, id: str, chunk_ids: str):
  115. cls.model.update(chunk_ids=chunk_ids).where(cls.model.id == id).execute()
  116. @classmethod
  117. @DB.connection_context()
  118. def get_ongoing_doc_name(cls):
  119. with DB.lock("get_task", -1):
  120. docs = (
  121. cls.model.select(
  122. *[Document.id, Document.kb_id, Document.location, File.parent_id]
  123. )
  124. .join(Document, on=(cls.model.doc_id == Document.id))
  125. .join(
  126. File2Document,
  127. on=(File2Document.document_id == Document.id),
  128. join_type=JOIN.LEFT_OUTER,
  129. )
  130. .join(
  131. File,
  132. on=(File2Document.file_id == File.id),
  133. join_type=JOIN.LEFT_OUTER,
  134. )
  135. .where(
  136. Document.status == StatusEnum.VALID.value,
  137. Document.run == TaskStatus.RUNNING.value,
  138. ~(Document.type == FileType.VIRTUAL.value),
  139. cls.model.progress < 1,
  140. cls.model.create_time >= current_timestamp() - 1000 * 600,
  141. )
  142. )
  143. docs = list(docs.dicts())
  144. if not docs:
  145. return []
  146. return list(
  147. set(
  148. [
  149. (
  150. d["parent_id"] if d["parent_id"] else d["kb_id"],
  151. d["location"],
  152. )
  153. for d in docs
  154. ]
  155. )
  156. )
  157. @classmethod
  158. @DB.connection_context()
  159. def do_cancel(cls, id):
  160. task = cls.model.get_by_id(id)
  161. _, doc = DocumentService.get_by_id(task.doc_id)
  162. return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
  163. @classmethod
  164. @DB.connection_context()
  165. def update_progress(cls, id, info):
  166. if os.environ.get("MACOS"):
  167. if info["progress_msg"]:
  168. task = cls.model.get_by_id(id)
  169. progress_msg = trim_header_by_lines(task.progress_msg + "\n" + info["progress_msg"], 3000)
  170. cls.model.update(progress_msg=progress_msg).where(cls.model.id == id).execute()
  171. if "progress" in info:
  172. cls.model.update(progress=info["progress"]).where(
  173. cls.model.id == id
  174. ).execute()
  175. return
  176. with DB.lock("update_progress", -1):
  177. if info["progress_msg"]:
  178. task = cls.model.get_by_id(id)
  179. progress_msg = trim_header_by_lines(task.progress_msg + "\n" + info["progress_msg"], 3000)
  180. cls.model.update(progress_msg=progress_msg).where(cls.model.id == id).execute()
  181. if "progress" in info:
  182. cls.model.update(progress=info["progress"]).where(
  183. cls.model.id == id
  184. ).execute()
  185. def queue_tasks(doc: dict, bucket: str, name: str):
  186. def new_task():
  187. return {"id": get_uuid(), "doc_id": doc["id"], "progress": 0.0, "from_page": 0, "to_page": 100000000}
  188. parse_task_array = []
  189. if doc["type"] == FileType.PDF.value:
  190. file_bin = STORAGE_IMPL.get(bucket, name)
  191. do_layout = doc["parser_config"].get("layout_recognize", "DeepDOC")
  192. pages = PdfParser.total_page_number(doc["name"], file_bin)
  193. page_size = doc["parser_config"].get("task_page_size", 12)
  194. if doc["parser_id"] == "paper":
  195. page_size = doc["parser_config"].get("task_page_size", 22)
  196. if doc["parser_id"] in ["one", "knowledge_graph"] or do_layout != "DeepDOC":
  197. page_size = 10 ** 9
  198. page_ranges = doc["parser_config"].get("pages") or [(1, 10 ** 5)]
  199. for s, e in page_ranges:
  200. s -= 1
  201. s = max(0, s)
  202. e = min(e - 1, pages)
  203. for p in range(s, e, page_size):
  204. task = new_task()
  205. task["from_page"] = p
  206. task["to_page"] = min(p + page_size, e)
  207. parse_task_array.append(task)
  208. elif doc["parser_id"] == "table":
  209. file_bin = STORAGE_IMPL.get(bucket, name)
  210. rn = RAGFlowExcelParser.row_number(doc["name"], file_bin)
  211. for i in range(0, rn, 3000):
  212. task = new_task()
  213. task["from_page"] = i
  214. task["to_page"] = min(i + 3000, rn)
  215. parse_task_array.append(task)
  216. else:
  217. parse_task_array.append(new_task())
  218. chunking_config = DocumentService.get_chunking_config(doc["id"])
  219. for task in parse_task_array:
  220. hasher = xxhash.xxh64()
  221. for field in sorted(chunking_config.keys()):
  222. if field == "parser_config":
  223. for k in ["raptor", "graphrag"]:
  224. if k in chunking_config[field]:
  225. del chunking_config[field][k]
  226. hasher.update(str(chunking_config[field]).encode("utf-8"))
  227. for field in ["doc_id", "from_page", "to_page"]:
  228. hasher.update(str(task.get(field, "")).encode("utf-8"))
  229. task_digest = hasher.hexdigest()
  230. task["digest"] = task_digest
  231. task["progress"] = 0.0
  232. prev_tasks = TaskService.get_tasks(doc["id"])
  233. ck_num = 0
  234. if prev_tasks:
  235. for task in parse_task_array:
  236. ck_num += reuse_prev_task_chunks(task, prev_tasks, chunking_config)
  237. TaskService.filter_delete([Task.doc_id == doc["id"]])
  238. chunk_ids = []
  239. for task in prev_tasks:
  240. if task["chunk_ids"]:
  241. chunk_ids.extend(task["chunk_ids"].split())
  242. if chunk_ids:
  243. settings.docStoreConn.delete({"id": chunk_ids}, search.index_name(chunking_config["tenant_id"]),
  244. chunking_config["kb_id"])
  245. DocumentService.update_by_id(doc["id"], {"chunk_num": ck_num})
  246. bulk_insert_into_db(Task, parse_task_array, True)
  247. DocumentService.begin2parse(doc["id"])
  248. unfinished_task_array = [task for task in parse_task_array if task["progress"] < 1.0]
  249. for unfinished_task in unfinished_task_array:
  250. assert REDIS_CONN.queue_product(
  251. SVR_QUEUE_NAME, message=unfinished_task
  252. ), "Can't access Redis. Please check the Redis' status."
  253. def reuse_prev_task_chunks(task: dict, prev_tasks: list[dict], chunking_config: dict):
  254. idx = 0
  255. while idx < len(prev_tasks):
  256. prev_task = prev_tasks[idx]
  257. if prev_task.get("from_page", 0) == task.get("from_page", 0) \
  258. and prev_task.get("digest", 0) == task.get("digest", ""):
  259. break
  260. idx += 1
  261. if idx >= len(prev_tasks):
  262. return 0
  263. prev_task = prev_tasks[idx]
  264. if prev_task["progress"] < 1.0 or not prev_task["chunk_ids"]:
  265. return 0
  266. task["chunk_ids"] = prev_task["chunk_ids"]
  267. task["progress"] = 1.0
  268. if "from_page" in task and "to_page" in task and int(task['to_page']) - int(task['from_page']) >= 10 ** 6:
  269. task["progress_msg"] = f"Page({task['from_page']}~{task['to_page']}): "
  270. else:
  271. task["progress_msg"] = ""
  272. task["progress_msg"] = " ".join(
  273. [datetime.now().strftime("%H:%M:%S"), task["progress_msg"], "Reused previous task's chunks."])
  274. prev_task["chunk_ids"] = ""
  275. return len(task["chunk_ids"].split())