Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.

task_executor.py 20KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. # from beartype import BeartypeConf
  16. # from beartype.claw import beartype_all # <-- you didn't sign up for this
  17. # beartype_all(conf=BeartypeConf(violation_type=UserWarning)) # <-- emit warnings from all code
  18. import logging
  19. import sys
  20. from api.utils.log_utils import initRootLogger
  21. CONSUMER_NO = "0" if len(sys.argv) < 2 else sys.argv[1]
  22. CONSUMER_NAME = "task_executor_" + CONSUMER_NO
  23. initRootLogger(CONSUMER_NAME)
  24. for module in ["pdfminer"]:
  25. module_logger = logging.getLogger(module)
  26. module_logger.setLevel(logging.WARNING)
  27. for module in ["peewee"]:
  28. module_logger = logging.getLogger(module)
  29. module_logger.handlers.clear()
  30. module_logger.propagate = True
  31. from datetime import datetime
  32. import json
  33. import os
  34. import hashlib
  35. import copy
  36. import re
  37. import sys
  38. import time
  39. import threading
  40. from functools import partial
  41. from io import BytesIO
  42. from multiprocessing.context import TimeoutError
  43. from timeit import default_timer as timer
  44. import tracemalloc
  45. import numpy as np
  46. from api.db import LLMType, ParserType
  47. from api.db.services.dialog_service import keyword_extraction, question_proposal
  48. from api.db.services.document_service import DocumentService
  49. from api.db.services.llm_service import LLMBundle
  50. from api.db.services.task_service import TaskService
  51. from api.db.services.file2document_service import File2DocumentService
  52. from api import settings
  53. from api.db.db_models import close_connection
  54. from rag.app import laws, paper, presentation, manual, qa, table, book, resume, picture, naive, one, audio, \
  55. knowledge_graph, email
  56. from rag.nlp import search, rag_tokenizer
  57. from rag.raptor import RecursiveAbstractiveProcessing4TreeOrganizedRetrieval as Raptor
  58. from rag.settings import DOC_MAXIMUM_SIZE, SVR_QUEUE_NAME
  59. from rag.utils import rmSpace, num_tokens_from_string
  60. from rag.utils.redis_conn import REDIS_CONN, Payload
  61. from rag.utils.storage_factory import STORAGE_IMPL
  62. BATCH_SIZE = 64
  63. FACTORY = {
  64. "general": naive,
  65. ParserType.NAIVE.value: naive,
  66. ParserType.PAPER.value: paper,
  67. ParserType.BOOK.value: book,
  68. ParserType.PRESENTATION.value: presentation,
  69. ParserType.MANUAL.value: manual,
  70. ParserType.LAWS.value: laws,
  71. ParserType.QA.value: qa,
  72. ParserType.TABLE.value: table,
  73. ParserType.RESUME.value: resume,
  74. ParserType.PICTURE.value: picture,
  75. ParserType.ONE.value: one,
  76. ParserType.AUDIO.value: audio,
  77. ParserType.EMAIL.value: email,
  78. ParserType.KG.value: knowledge_graph
  79. }
  80. CONSUMER_NAME = "task_consumer_" + CONSUMER_NO
  81. PAYLOAD: Payload | None = None
  82. BOOT_AT = datetime.now().isoformat()
  83. PENDING_TASKS = 0
  84. LAG_TASKS = 0
  85. mt_lock = threading.Lock()
  86. DONE_TASKS = 0
  87. FAILED_TASKS = 0
  88. CURRENT_TASK = None
  89. def set_progress(task_id, from_page=0, to_page=-1, prog=None, msg="Processing..."):
  90. global PAYLOAD
  91. if prog is not None and prog < 0:
  92. msg = "[ERROR]" + msg
  93. cancel = TaskService.do_cancel(task_id)
  94. if cancel:
  95. msg += " [Canceled]"
  96. prog = -1
  97. if to_page > 0:
  98. if msg:
  99. msg = f"Page({from_page + 1}~{to_page + 1}): " + msg
  100. d = {"progress_msg": msg}
  101. if prog is not None:
  102. d["progress"] = prog
  103. try:
  104. TaskService.update_progress(task_id, d)
  105. except Exception:
  106. logging.exception(f"set_progress({task_id}) got exception")
  107. close_connection()
  108. if cancel:
  109. if PAYLOAD:
  110. PAYLOAD.ack()
  111. PAYLOAD = None
  112. os._exit(0)
  113. def collect():
  114. global CONSUMER_NAME, PAYLOAD, DONE_TASKS, FAILED_TASKS
  115. try:
  116. PAYLOAD = REDIS_CONN.get_unacked_for(CONSUMER_NAME, SVR_QUEUE_NAME, "rag_flow_svr_task_broker")
  117. if not PAYLOAD:
  118. PAYLOAD = REDIS_CONN.queue_consumer(SVR_QUEUE_NAME, "rag_flow_svr_task_broker", CONSUMER_NAME)
  119. if not PAYLOAD:
  120. time.sleep(1)
  121. return None
  122. except Exception:
  123. logging.exception("Get task event from queue exception")
  124. return None
  125. msg = PAYLOAD.get_message()
  126. if not msg:
  127. return None
  128. if TaskService.do_cancel(msg["id"]):
  129. with mt_lock:
  130. DONE_TASKS += 1
  131. logging.info("Task {} has been canceled.".format(msg["id"]))
  132. return None
  133. task = TaskService.get_task(msg["id"])
  134. if not task:
  135. with mt_lock:
  136. DONE_TASKS += 1
  137. logging.warning("{} empty task!".format(msg["id"]))
  138. return None
  139. if msg.get("type", "") == "raptor":
  140. task["task_type"] = "raptor"
  141. return task
  142. def get_storage_binary(bucket, name):
  143. return STORAGE_IMPL.get(bucket, name)
  144. def build(row):
  145. if row["size"] > DOC_MAXIMUM_SIZE:
  146. set_progress(row["id"], prog=-1, msg="File size exceeds( <= %dMb )" %
  147. (int(DOC_MAXIMUM_SIZE / 1024 / 1024)))
  148. return []
  149. callback = partial(
  150. set_progress,
  151. row["id"],
  152. row["from_page"],
  153. row["to_page"])
  154. chunker = FACTORY[row["parser_id"].lower()]
  155. try:
  156. st = timer()
  157. bucket, name = File2DocumentService.get_storage_address(doc_id=row["doc_id"])
  158. binary = get_storage_binary(bucket, name)
  159. logging.info(
  160. "From minio({}) {}/{}".format(timer() - st, row["location"], row["name"]))
  161. except TimeoutError:
  162. callback(-1, "Internal server error: Fetch file from minio timeout. Could you try it again.")
  163. logging.exception(
  164. "Minio {}/{} got timeout: Fetch file from minio timeout.".format(row["location"], row["name"]))
  165. raise
  166. except Exception as e:
  167. if re.search("(No such file|not found)", str(e)):
  168. callback(-1, "Can not find file <%s> from minio. Could you try it again?" % row["name"])
  169. else:
  170. callback(-1, "Get file from minio: %s" % str(e).replace("'", ""))
  171. logging.exception("Chunking {}/{} got exception".format(row["location"], row["name"]))
  172. raise
  173. try:
  174. cks = chunker.chunk(row["name"], binary=binary, from_page=row["from_page"],
  175. to_page=row["to_page"], lang=row["language"], callback=callback,
  176. kb_id=row["kb_id"], parser_config=row["parser_config"], tenant_id=row["tenant_id"])
  177. logging.info("Chunking({}) {}/{} done".format(timer() - st, row["location"], row["name"]))
  178. except Exception as e:
  179. callback(-1, "Internal server error while chunking: %s" %
  180. str(e).replace("'", ""))
  181. logging.exception("Chunking {}/{} got exception".format(row["location"], row["name"]))
  182. raise
  183. docs = []
  184. doc = {
  185. "doc_id": row["doc_id"],
  186. "kb_id": str(row["kb_id"])
  187. }
  188. el = 0
  189. for ck in cks:
  190. d = copy.deepcopy(doc)
  191. d.update(ck)
  192. md5 = hashlib.md5()
  193. md5.update((ck["content_with_weight"] +
  194. str(d["doc_id"])).encode("utf-8"))
  195. d["id"] = md5.hexdigest()
  196. d["create_time"] = str(datetime.now()).replace("T", " ")[:19]
  197. d["create_timestamp_flt"] = datetime.now().timestamp()
  198. if not d.get("image"):
  199. _ = d.pop("image", None)
  200. d["img_id"] = ""
  201. d["page_num_list"] = json.dumps([])
  202. d["position_list"] = json.dumps([])
  203. d["top_list"] = json.dumps([])
  204. docs.append(d)
  205. continue
  206. try:
  207. output_buffer = BytesIO()
  208. if isinstance(d["image"], bytes):
  209. output_buffer = BytesIO(d["image"])
  210. else:
  211. d["image"].save(output_buffer, format='JPEG')
  212. st = timer()
  213. STORAGE_IMPL.put(row["kb_id"], d["id"], output_buffer.getvalue())
  214. el += timer() - st
  215. except Exception:
  216. logging.exception(
  217. "Saving image of chunk {}/{}/{} got exception".format(row["location"], row["name"], d["_id"]))
  218. raise
  219. d["img_id"] = "{}-{}".format(row["kb_id"], d["id"])
  220. del d["image"]
  221. docs.append(d)
  222. logging.info("MINIO PUT({}):{}".format(row["name"], el))
  223. if row["parser_config"].get("auto_keywords", 0):
  224. st = timer()
  225. callback(msg="Start to generate keywords for every chunk ...")
  226. chat_mdl = LLMBundle(row["tenant_id"], LLMType.CHAT, llm_name=row["llm_id"], lang=row["language"])
  227. for d in docs:
  228. d["important_kwd"] = keyword_extraction(chat_mdl, d["content_with_weight"],
  229. row["parser_config"]["auto_keywords"]).split(",")
  230. d["important_tks"] = rag_tokenizer.tokenize(" ".join(d["important_kwd"]))
  231. callback(msg="Keywords generation completed in {:.2f}s".format(timer() - st))
  232. if row["parser_config"].get("auto_questions", 0):
  233. st = timer()
  234. callback(msg="Start to generate questions for every chunk ...")
  235. chat_mdl = LLMBundle(row["tenant_id"], LLMType.CHAT, llm_name=row["llm_id"], lang=row["language"])
  236. for d in docs:
  237. qst = question_proposal(chat_mdl, d["content_with_weight"], row["parser_config"]["auto_questions"])
  238. d["content_with_weight"] = f"Question: \n{qst}\n\nAnswer:\n" + d["content_with_weight"]
  239. qst = rag_tokenizer.tokenize(qst)
  240. if "content_ltks" in d:
  241. d["content_ltks"] += " " + qst
  242. if "content_sm_ltks" in d:
  243. d["content_sm_ltks"] += " " + rag_tokenizer.fine_grained_tokenize(qst)
  244. callback(msg="Question generation completed in {:.2f}s".format(timer() - st))
  245. return docs
  246. def init_kb(row, vector_size: int):
  247. idxnm = search.index_name(row["tenant_id"])
  248. return settings.docStoreConn.createIdx(idxnm, row["kb_id"], vector_size)
  249. def embedding(docs, mdl, parser_config=None, callback=None):
  250. if parser_config is None:
  251. parser_config = {}
  252. batch_size = 32
  253. tts, cnts = [rmSpace(d["title_tks"]) for d in docs if d.get("title_tks")], [
  254. re.sub(r"</?(table|td|caption|tr|th)( [^<>]{0,12})?>", " ", d["content_with_weight"]) for d in docs]
  255. tk_count = 0
  256. if len(tts) == len(cnts):
  257. tts_ = np.array([])
  258. for i in range(0, len(tts), batch_size):
  259. vts, c = mdl.encode(tts[i: i + batch_size])
  260. if len(tts_) == 0:
  261. tts_ = vts
  262. else:
  263. tts_ = np.concatenate((tts_, vts), axis=0)
  264. tk_count += c
  265. callback(prog=0.6 + 0.1 * (i + 1) / len(tts), msg="")
  266. tts = tts_
  267. cnts_ = np.array([])
  268. for i in range(0, len(cnts), batch_size):
  269. vts, c = mdl.encode(cnts[i: i + batch_size])
  270. if len(cnts_) == 0:
  271. cnts_ = vts
  272. else:
  273. cnts_ = np.concatenate((cnts_, vts), axis=0)
  274. tk_count += c
  275. callback(prog=0.7 + 0.2 * (i + 1) / len(cnts), msg="")
  276. cnts = cnts_
  277. title_w = float(parser_config.get("filename_embd_weight", 0.1))
  278. vects = (title_w * tts + (1 - title_w) *
  279. cnts) if len(tts) == len(cnts) else cnts
  280. assert len(vects) == len(docs)
  281. vector_size = 0
  282. for i, d in enumerate(docs):
  283. v = vects[i].tolist()
  284. vector_size = len(v)
  285. d["q_%d_vec" % len(v)] = v
  286. return tk_count, vector_size
  287. def run_raptor(row, chat_mdl, embd_mdl, callback=None):
  288. vts, _ = embd_mdl.encode(["ok"])
  289. vector_size = len(vts[0])
  290. vctr_nm = "q_%d_vec" % vector_size
  291. chunks = []
  292. for d in settings.retrievaler.chunk_list(row["doc_id"], row["tenant_id"], [str(row["kb_id"])],
  293. fields=["content_with_weight", vctr_nm]):
  294. chunks.append((d["content_with_weight"], np.array(d[vctr_nm])))
  295. raptor = Raptor(
  296. row["parser_config"]["raptor"].get("max_cluster", 64),
  297. chat_mdl,
  298. embd_mdl,
  299. row["parser_config"]["raptor"]["prompt"],
  300. row["parser_config"]["raptor"]["max_token"],
  301. row["parser_config"]["raptor"]["threshold"]
  302. )
  303. original_length = len(chunks)
  304. raptor(chunks, row["parser_config"]["raptor"]["random_seed"], callback)
  305. doc = {
  306. "doc_id": row["doc_id"],
  307. "kb_id": [str(row["kb_id"])],
  308. "docnm_kwd": row["name"],
  309. "title_tks": rag_tokenizer.tokenize(row["name"])
  310. }
  311. res = []
  312. tk_count = 0
  313. for content, vctr in chunks[original_length:]:
  314. d = copy.deepcopy(doc)
  315. md5 = hashlib.md5()
  316. md5.update((content + str(d["doc_id"])).encode("utf-8"))
  317. d["id"] = md5.hexdigest()
  318. d["create_time"] = str(datetime.now()).replace("T", " ")[:19]
  319. d["create_timestamp_flt"] = datetime.now().timestamp()
  320. d[vctr_nm] = vctr.tolist()
  321. d["content_with_weight"] = content
  322. d["content_ltks"] = rag_tokenizer.tokenize(content)
  323. d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
  324. res.append(d)
  325. tk_count += num_tokens_from_string(content)
  326. return res, tk_count, vector_size
  327. def do_handle_task(r):
  328. callback = partial(set_progress, r["id"], r["from_page"], r["to_page"])
  329. try:
  330. embd_mdl = LLMBundle(r["tenant_id"], LLMType.EMBEDDING, llm_name=r["embd_id"], lang=r["language"])
  331. except Exception as e:
  332. callback(-1, msg=str(e))
  333. raise
  334. if r.get("task_type", "") == "raptor":
  335. try:
  336. chat_mdl = LLMBundle(r["tenant_id"], LLMType.CHAT, llm_name=r["llm_id"], lang=r["language"])
  337. cks, tk_count, vector_size = run_raptor(r, chat_mdl, embd_mdl, callback)
  338. except Exception as e:
  339. callback(-1, msg=str(e))
  340. raise
  341. else:
  342. st = timer()
  343. cks = build(r)
  344. logging.info("Build chunks({}): {}".format(r["name"], timer() - st))
  345. if cks is None:
  346. return
  347. if not cks:
  348. callback(1., "No chunk! Done!")
  349. return
  350. # TODO: exception handler
  351. ## set_progress(r["did"], -1, "ERROR: ")
  352. callback(
  353. msg="Finished slicing files ({} chunks in {:.2f}s). Start to embedding the content.".format(len(cks),
  354. timer() - st)
  355. )
  356. st = timer()
  357. try:
  358. tk_count, vector_size = embedding(cks, embd_mdl, r["parser_config"], callback)
  359. except Exception as e:
  360. callback(-1, "Embedding error:{}".format(str(e)))
  361. logging.exception("run_rembedding got exception")
  362. tk_count = 0
  363. raise
  364. logging.info("Embedding elapsed({}): {:.2f}".format(r["name"], timer() - st))
  365. callback(msg="Finished embedding (in {:.2f}s)! Start to build index!".format(timer() - st))
  366. # logging.info(f"task_executor init_kb index {search.index_name(r["tenant_id"])} embd_mdl {embd_mdl.llm_name} vector length {vector_size}")
  367. init_kb(r, vector_size)
  368. chunk_count = len(set([c["id"] for c in cks]))
  369. st = timer()
  370. es_r = ""
  371. es_bulk_size = 4
  372. for b in range(0, len(cks), es_bulk_size):
  373. es_r = settings.docStoreConn.insert(cks[b:b + es_bulk_size], search.index_name(r["tenant_id"]), r["kb_id"])
  374. if b % 128 == 0:
  375. callback(prog=0.8 + 0.1 * (b + 1) / len(cks), msg="")
  376. logging.info("Indexing elapsed({}): {:.2f}".format(r["name"], timer() - st))
  377. if es_r:
  378. callback(-1, "Insert chunk error, detail info please check log file. Please also check Elasticsearch/Infinity status!")
  379. settings.docStoreConn.delete({"doc_id": r["doc_id"]}, search.index_name(r["tenant_id"]), r["kb_id"])
  380. logging.error('Insert chunk error: ' + str(es_r))
  381. raise Exception('Insert chunk error: ' + str(es_r))
  382. if TaskService.do_cancel(r["id"]):
  383. settings.docStoreConn.delete({"doc_id": r["doc_id"]}, search.index_name(r["tenant_id"]), r["kb_id"])
  384. return
  385. callback(msg="Indexing elapsed in {:.2f}s.".format(timer() - st))
  386. callback(1., "Done!")
  387. DocumentService.increment_chunk_num(
  388. r["doc_id"], r["kb_id"], tk_count, chunk_count, 0)
  389. logging.info(
  390. "Chunk doc({}), token({}), chunks({}), elapsed:{:.2f}".format(
  391. r["id"], tk_count, len(cks), timer() - st))
  392. def handle_task():
  393. global PAYLOAD, mt_lock, DONE_TASKS, FAILED_TASKS, CURRENT_TASK
  394. task = collect()
  395. if task:
  396. try:
  397. logging.info(f"handle_task begin for task {json.dumps(task)}")
  398. with mt_lock:
  399. CURRENT_TASK = copy.deepcopy(task)
  400. do_handle_task(task)
  401. with mt_lock:
  402. DONE_TASKS += 1
  403. CURRENT_TASK = None
  404. logging.info(f"handle_task done for task {json.dumps(task)}")
  405. except Exception:
  406. with mt_lock:
  407. FAILED_TASKS += 1
  408. CURRENT_TASK = None
  409. logging.exception(f"handle_task got exception for task {json.dumps(task)}")
  410. if PAYLOAD:
  411. PAYLOAD.ack()
  412. PAYLOAD = None
  413. def report_status():
  414. global CONSUMER_NAME, BOOT_AT, PENDING_TASKS, LAG_TASKS, mt_lock, DONE_TASKS, FAILED_TASKS, CURRENT_TASK
  415. REDIS_CONN.sadd("TASKEXE", CONSUMER_NAME)
  416. while True:
  417. try:
  418. now = datetime.now()
  419. group_info = REDIS_CONN.queue_info(SVR_QUEUE_NAME, "rag_flow_svr_task_broker")
  420. if group_info is not None:
  421. PENDING_TASKS = int(group_info["pending"])
  422. LAG_TASKS = int(group_info["lag"])
  423. with mt_lock:
  424. heartbeat = json.dumps({
  425. "name": CONSUMER_NAME,
  426. "now": now.isoformat(),
  427. "boot_at": BOOT_AT,
  428. "pending": PENDING_TASKS,
  429. "lag": LAG_TASKS,
  430. "done": DONE_TASKS,
  431. "failed": FAILED_TASKS,
  432. "current": CURRENT_TASK,
  433. })
  434. REDIS_CONN.zadd(CONSUMER_NAME, heartbeat, now.timestamp())
  435. logging.info(f"{CONSUMER_NAME} reported heartbeat: {heartbeat}")
  436. expired = REDIS_CONN.zcount(CONSUMER_NAME, 0, now.timestamp() - 60 * 30)
  437. if expired > 0:
  438. REDIS_CONN.zpopmin(CONSUMER_NAME, expired)
  439. except Exception:
  440. logging.exception("report_status got exception")
  441. time.sleep(30)
  442. def analyze_heap(snapshot1: tracemalloc.Snapshot, snapshot2: tracemalloc.Snapshot, snapshot_id: int, dump_full: bool):
  443. msg = ""
  444. if dump_full:
  445. stats2 = snapshot2.statistics('lineno')
  446. msg += f"{CONSUMER_NAME} memory usage of snapshot {snapshot_id}:\n"
  447. for stat in stats2[:10]:
  448. msg += f"{stat}\n"
  449. stats1_vs_2 = snapshot2.compare_to(snapshot1, 'lineno')
  450. msg += f"{CONSUMER_NAME} memory usage increase from snapshot {snapshot_id-1} to snapshot {snapshot_id}:\n"
  451. for stat in stats1_vs_2[:10]:
  452. msg += f"{stat}\n"
  453. msg += f"{CONSUMER_NAME} detailed traceback for the top memory consumers:\n"
  454. for stat in stats1_vs_2[:3]:
  455. msg += '\n'.join(stat.traceback.format())
  456. logging.info(msg)
  457. def main():
  458. settings.init_settings()
  459. background_thread = threading.Thread(target=report_status)
  460. background_thread.daemon = True
  461. background_thread.start()
  462. TRACE_MALLOC_DELTA = int(os.environ.get('TRACE_MALLOC_DELTA', "0"))
  463. TRACE_MALLOC_FULL = int(os.environ.get('TRACE_MALLOC_FULL', "0"))
  464. if TRACE_MALLOC_DELTA > 0:
  465. if TRACE_MALLOC_FULL < TRACE_MALLOC_DELTA:
  466. TRACE_MALLOC_FULL = TRACE_MALLOC_DELTA
  467. tracemalloc.start()
  468. snapshot1 = tracemalloc.take_snapshot()
  469. while True:
  470. handle_task()
  471. num_tasks = DONE_TASKS + FAILED_TASKS
  472. if TRACE_MALLOC_DELTA> 0 and num_tasks > 0 and num_tasks % TRACE_MALLOC_DELTA == 0:
  473. snapshot2 = tracemalloc.take_snapshot()
  474. analyze_heap(snapshot1, snapshot2, int(num_tasks/TRACE_MALLOC_DELTA), num_tasks % TRACE_MALLOC_FULL == 0)
  475. snapshot1 = snapshot2
  476. snapshot2 = None
  477. if __name__ == "__main__":
  478. main()