Você não pode selecionar mais de 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

task_executor.py 11KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import datetime
  17. import json
  18. import logging
  19. import os
  20. import hashlib
  21. import copy
  22. import re
  23. import sys
  24. import time
  25. import traceback
  26. from functools import partial
  27. from api.db.services.file2document_service import File2DocumentService
  28. from rag.utils import MINIO
  29. from api.db.db_models import close_connection
  30. from rag.settings import database_logger
  31. from rag.settings import cron_logger, DOC_MAXIMUM_SIZE
  32. from multiprocessing import Pool
  33. import numpy as np
  34. from elasticsearch_dsl import Q
  35. from multiprocessing.context import TimeoutError
  36. from api.db.services.task_service import TaskService
  37. from rag.utils import ELASTICSEARCH
  38. from timeit import default_timer as timer
  39. from rag.utils import rmSpace, findMaxTm
  40. from rag.nlp import search
  41. from io import BytesIO
  42. import pandas as pd
  43. from rag.app import laws, paper, presentation, manual, qa, table, book, resume, picture, naive, one
  44. from api.db import LLMType, ParserType
  45. from api.db.services.document_service import DocumentService
  46. from api.db.services.llm_service import LLMBundle
  47. from api.utils.file_utils import get_project_base_directory
  48. from rag.utils.redis_conn import REDIS_CONN
  49. BATCH_SIZE = 64
  50. FACTORY = {
  51. "general": naive,
  52. ParserType.NAIVE.value: naive,
  53. ParserType.PAPER.value: paper,
  54. ParserType.BOOK.value: book,
  55. ParserType.PRESENTATION.value: presentation,
  56. ParserType.MANUAL.value: manual,
  57. ParserType.LAWS.value: laws,
  58. ParserType.QA.value: qa,
  59. ParserType.TABLE.value: table,
  60. ParserType.RESUME.value: resume,
  61. ParserType.PICTURE.value: picture,
  62. ParserType.ONE.value: one,
  63. }
  64. def set_progress(task_id, from_page=0, to_page=-1,
  65. prog=None, msg="Processing..."):
  66. if prog is not None and prog < 0:
  67. msg = "[ERROR]" + msg
  68. cancel = TaskService.do_cancel(task_id)
  69. if cancel:
  70. msg += " [Canceled]"
  71. prog = -1
  72. if to_page > 0:
  73. if msg:
  74. msg = f"Page({from_page+1}~{to_page+1}): " + msg
  75. d = {"progress_msg": msg}
  76. if prog is not None:
  77. d["progress"] = prog
  78. try:
  79. TaskService.update_progress(task_id, d)
  80. except Exception as e:
  81. cron_logger.error("set_progress:({}), {}".format(task_id, str(e)))
  82. if cancel:
  83. sys.exit()
  84. def collect(comm, mod, tm):
  85. tasks = TaskService.get_tasks(tm, mod, comm)
  86. #print(tasks)
  87. if len(tasks) == 0:
  88. time.sleep(1)
  89. return pd.DataFrame()
  90. tasks = pd.DataFrame(tasks)
  91. mtm = tasks["update_time"].max()
  92. cron_logger.info("TOTAL:{}, To:{}".format(len(tasks), mtm))
  93. return tasks
  94. def get_minio_binary(bucket, name):
  95. global MINIO
  96. if REDIS_CONN.is_alive():
  97. try:
  98. for _ in range(30):
  99. if REDIS_CONN.exist("{}/{}".format(bucket, name)):
  100. time.sleep(1)
  101. break
  102. time.sleep(1)
  103. r = REDIS_CONN.get("{}/{}".format(bucket, name))
  104. if r: return r
  105. cron_logger.warning("Cache missing: {}".format(name))
  106. except Exception as e:
  107. cron_logger.warning("Get redis[EXCEPTION]:" + str(e))
  108. return MINIO.get(bucket, name)
  109. def build(row):
  110. if row["size"] > DOC_MAXIMUM_SIZE:
  111. set_progress(row["id"], prog=-1, msg="File size exceeds( <= %dMb )" %
  112. (int(DOC_MAXIMUM_SIZE / 1024 / 1024)))
  113. return []
  114. callback = partial(
  115. set_progress,
  116. row["id"],
  117. row["from_page"],
  118. row["to_page"])
  119. chunker = FACTORY[row["parser_id"].lower()]
  120. pool = Pool(processes=1)
  121. try:
  122. st = timer()
  123. bucket, name = File2DocumentService.get_minio_address(doc_id=row["doc_id"])
  124. thr = pool.apply_async(get_minio_binary, args=(bucket, name))
  125. binary = thr.get(timeout=90)
  126. pool.terminate()
  127. cron_logger.info(
  128. "From minio({}) {}/{}".format(timer()-st, row["location"], row["name"]))
  129. cks = chunker.chunk(row["name"], binary=binary, from_page=row["from_page"],
  130. to_page=row["to_page"], lang=row["language"], callback=callback,
  131. kb_id=row["kb_id"], parser_config=row["parser_config"], tenant_id=row["tenant_id"])
  132. cron_logger.info(
  133. "Chunkking({}) {}/{}".format(timer()-st, row["location"], row["name"]))
  134. except TimeoutError as e:
  135. callback(-1, f"Internal server error: Fetch file timeout. Could you try it again.")
  136. cron_logger.error(
  137. "Chunkking {}/{}: Fetch file timeout.".format(row["location"], row["name"]))
  138. return
  139. except Exception as e:
  140. if re.search("(No such file|not found)", str(e)):
  141. callback(-1, "Can not find file <%s>" % row["name"])
  142. else:
  143. callback(-1, f"Internal server error: %s" %
  144. str(e).replace("'", ""))
  145. pool.terminate()
  146. traceback.print_exc()
  147. cron_logger.error(
  148. "Chunkking {}/{}: {}".format(row["location"], row["name"], str(e)))
  149. return
  150. docs = []
  151. doc = {
  152. "doc_id": row["doc_id"],
  153. "kb_id": [str(row["kb_id"])]
  154. }
  155. el = 0
  156. for ck in cks:
  157. d = copy.deepcopy(doc)
  158. d.update(ck)
  159. md5 = hashlib.md5()
  160. md5.update((ck["content_with_weight"] +
  161. str(d["doc_id"])).encode("utf-8"))
  162. d["_id"] = md5.hexdigest()
  163. d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
  164. d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
  165. if not d.get("image"):
  166. docs.append(d)
  167. continue
  168. output_buffer = BytesIO()
  169. if isinstance(d["image"], bytes):
  170. output_buffer = BytesIO(d["image"])
  171. else:
  172. d["image"].save(output_buffer, format='JPEG')
  173. st = timer()
  174. MINIO.put(row["kb_id"], d["_id"], output_buffer.getvalue())
  175. el += timer() - st
  176. d["img_id"] = "{}-{}".format(row["kb_id"], d["_id"])
  177. del d["image"]
  178. docs.append(d)
  179. cron_logger.info("MINIO PUT({}):{}".format(row["name"], el))
  180. return docs
  181. def init_kb(row):
  182. idxnm = search.index_name(row["tenant_id"])
  183. if ELASTICSEARCH.indexExist(idxnm):
  184. return
  185. return ELASTICSEARCH.createIdx(idxnm, json.load(
  186. open(os.path.join(get_project_base_directory(), "conf", "mapping.json"), "r")))
  187. def embedding(docs, mdl, parser_config={}, callback=None):
  188. batch_size = 32
  189. tts, cnts = [rmSpace(d["title_tks"]) for d in docs if d.get("title_tks")], [
  190. re.sub(r"</?(table|td|caption|tr|th)( [^<>]{0,12})?>", " ", d["content_with_weight"]) for d in docs]
  191. tk_count = 0
  192. if len(tts) == len(cnts):
  193. tts_ = np.array([])
  194. for i in range(0, len(tts), batch_size):
  195. vts, c = mdl.encode(tts[i: i + batch_size])
  196. if len(tts_) == 0:
  197. tts_ = vts
  198. else:
  199. tts_ = np.concatenate((tts_, vts), axis=0)
  200. tk_count += c
  201. callback(prog=0.6 + 0.1 * (i + 1) / len(tts), msg="")
  202. tts = tts_
  203. cnts_ = np.array([])
  204. for i in range(0, len(cnts), batch_size):
  205. vts, c = mdl.encode(cnts[i: i + batch_size])
  206. if len(cnts_) == 0:
  207. cnts_ = vts
  208. else:
  209. cnts_ = np.concatenate((cnts_, vts), axis=0)
  210. tk_count += c
  211. callback(prog=0.7 + 0.2 * (i + 1) / len(cnts), msg="")
  212. cnts = cnts_
  213. title_w = float(parser_config.get("filename_embd_weight", 0.1))
  214. vects = (title_w * tts + (1 - title_w) *
  215. cnts) if len(tts) == len(cnts) else cnts
  216. assert len(vects) == len(docs)
  217. for i, d in enumerate(docs):
  218. v = vects[i].tolist()
  219. d["q_%d_vec" % len(v)] = v
  220. return tk_count
  221. def main(comm, mod):
  222. tm_fnm = os.path.join(
  223. get_project_base_directory(),
  224. "rag/res",
  225. f"{comm}-{mod}.tm")
  226. tm = findMaxTm(tm_fnm)
  227. rows = collect(comm, mod, tm)
  228. if len(rows) == 0:
  229. return
  230. tmf = open(tm_fnm, "a+")
  231. for _, r in rows.iterrows():
  232. callback = partial(set_progress, r["id"], r["from_page"], r["to_page"])
  233. #callback(random.random()/10., "Task has been received.")
  234. try:
  235. embd_mdl = LLMBundle(r["tenant_id"], LLMType.EMBEDDING, llm_name=r["embd_id"], lang=r["language"])
  236. except Exception as e:
  237. traceback.print_stack(e)
  238. callback(prog=-1, msg=str(e))
  239. continue
  240. st = timer()
  241. cks = build(r)
  242. cron_logger.info("Build chunks({}): {}".format(r["name"], timer()-st))
  243. if cks is None:
  244. continue
  245. if not cks:
  246. tmf.write(str(r["update_time"]) + "\n")
  247. callback(1., "No chunk! Done!")
  248. continue
  249. # TODO: exception handler
  250. ## set_progress(r["did"], -1, "ERROR: ")
  251. callback(
  252. msg="Finished slicing files(%d). Start to embedding the content." %
  253. len(cks))
  254. st = timer()
  255. try:
  256. tk_count = embedding(cks, embd_mdl, r["parser_config"], callback)
  257. except Exception as e:
  258. callback(-1, "Embedding error:{}".format(str(e)))
  259. cron_logger.error(str(e))
  260. tk_count = 0
  261. cron_logger.info("Embedding elapsed({}): {}".format(r["name"], timer()-st))
  262. callback(msg="Finished embedding({})! Start to build index!".format(timer()-st))
  263. init_kb(r)
  264. chunk_count = len(set([c["_id"] for c in cks]))
  265. st = timer()
  266. es_r = ELASTICSEARCH.bulk(cks, search.index_name(r["tenant_id"]))
  267. cron_logger.info("Indexing elapsed({}): {}".format(r["name"], timer()-st))
  268. if es_r:
  269. callback(-1, "Index failure!")
  270. ELASTICSEARCH.deleteByQuery(
  271. Q("match", doc_id=r["doc_id"]), idxnm=search.index_name(r["tenant_id"]))
  272. cron_logger.error(str(es_r))
  273. else:
  274. if TaskService.do_cancel(r["id"]):
  275. ELASTICSEARCH.deleteByQuery(
  276. Q("match", doc_id=r["doc_id"]), idxnm=search.index_name(r["tenant_id"]))
  277. continue
  278. callback(1., "Done!")
  279. DocumentService.increment_chunk_num(
  280. r["doc_id"], r["kb_id"], tk_count, chunk_count, 0)
  281. cron_logger.info(
  282. "Chunk doc({}), token({}), chunks({}), elapsed:{}".format(
  283. r["id"], tk_count, len(cks), timer()-st))
  284. tmf.write(str(r["update_time"]) + "\n")
  285. tmf.close()
  286. if __name__ == "__main__":
  287. peewee_logger = logging.getLogger('peewee')
  288. peewee_logger.propagate = False
  289. peewee_logger.addHandler(database_logger.handlers[0])
  290. peewee_logger.setLevel(database_logger.level)
  291. #from mpi4py import MPI
  292. #comm = MPI.COMM_WORLD
  293. while True:
  294. main(int(sys.argv[2]), int(sys.argv[1]))
  295. close_connection()