Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.

pdf_parser.py 50KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292
  1. #
  2. # Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import logging
  17. import os
  18. import random
  19. import re
  20. import sys
  21. import threading
  22. from copy import deepcopy
  23. from io import BytesIO
  24. from timeit import default_timer as timer
  25. import numpy as np
  26. import pdfplumber
  27. import trio
  28. import xgboost as xgb
  29. from huggingface_hub import snapshot_download
  30. from PIL import Image
  31. from pypdf import PdfReader as pdf2_read
  32. from api import settings
  33. from api.utils.file_utils import get_project_base_directory
  34. from deepdoc.vision import OCR, LayoutRecognizer, Recognizer, TableStructureRecognizer
  35. from rag.app.picture import vision_llm_chunk as picture_vision_llm_chunk
  36. from rag.nlp import rag_tokenizer
  37. from rag.prompts import vision_llm_describe_prompt
  38. from rag.settings import PARALLEL_DEVICES
  39. LOCK_KEY_pdfplumber = "global_shared_lock_pdfplumber"
  40. if LOCK_KEY_pdfplumber not in sys.modules:
  41. sys.modules[LOCK_KEY_pdfplumber] = threading.Lock()
  42. class RAGFlowPdfParser:
  43. def __init__(self, **kwargs):
  44. """
  45. If you have trouble downloading HuggingFace models, -_^ this might help!!
  46. For Linux:
  47. export HF_ENDPOINT=https://hf-mirror.com
  48. For Windows:
  49. Good luck
  50. ^_-
  51. """
  52. self.ocr = OCR()
  53. self.parallel_limiter = None
  54. if PARALLEL_DEVICES is not None and PARALLEL_DEVICES > 1:
  55. self.parallel_limiter = [trio.CapacityLimiter(1) for _ in range(PARALLEL_DEVICES)]
  56. if hasattr(self, "model_speciess"):
  57. self.layouter = LayoutRecognizer("layout." + self.model_speciess)
  58. else:
  59. self.layouter = LayoutRecognizer("layout")
  60. self.tbl_det = TableStructureRecognizer()
  61. self.updown_cnt_mdl = xgb.Booster()
  62. if not settings.LIGHTEN:
  63. try:
  64. import torch.cuda
  65. if torch.cuda.is_available():
  66. self.updown_cnt_mdl.set_param({"device": "cuda"})
  67. except Exception:
  68. logging.exception("RAGFlowPdfParser __init__")
  69. try:
  70. model_dir = os.path.join(
  71. get_project_base_directory(),
  72. "rag/res/deepdoc")
  73. self.updown_cnt_mdl.load_model(os.path.join(
  74. model_dir, "updown_concat_xgb.model"))
  75. except Exception:
  76. model_dir = snapshot_download(
  77. repo_id="InfiniFlow/text_concat_xgb_v1.0",
  78. local_dir=os.path.join(get_project_base_directory(), "rag/res/deepdoc"),
  79. local_dir_use_symlinks=False)
  80. self.updown_cnt_mdl.load_model(os.path.join(
  81. model_dir, "updown_concat_xgb.model"))
  82. self.page_from = 0
  83. def __char_width(self, c):
  84. return (c["x1"] - c["x0"]) // max(len(c["text"]), 1)
  85. def __height(self, c):
  86. return c["bottom"] - c["top"]
  87. def _x_dis(self, a, b):
  88. return min(abs(a["x1"] - b["x0"]), abs(a["x0"] - b["x1"]),
  89. abs(a["x0"] + a["x1"] - b["x0"] - b["x1"]) / 2)
  90. def _y_dis(
  91. self, a, b):
  92. return (
  93. b["top"] + b["bottom"] - a["top"] - a["bottom"]) / 2
  94. def _match_proj(self, b):
  95. proj_patt = [
  96. r"第[零一二三四五六七八九十百]+章",
  97. r"第[零一二三四五六七八九十百]+[条节]",
  98. r"[零一二三四五六七八九十百]+[、是  ]",
  99. r"[\((][零一二三四五六七八九十百]+[)\)]",
  100. r"[\((][0-9]+[)\)]",
  101. r"[0-9]+(、|\.[  ]|)|\.[^0-9./a-zA-Z_%><-]{4,})",
  102. r"[0-9]+\.[0-9.]+(、|\.[  ])",
  103. r"[⚫•➢①② ]",
  104. ]
  105. return any([re.match(p, b["text"]) for p in proj_patt])
  106. def _updown_concat_features(self, up, down):
  107. w = max(self.__char_width(up), self.__char_width(down))
  108. h = max(self.__height(up), self.__height(down))
  109. y_dis = self._y_dis(up, down)
  110. LEN = 6
  111. tks_down = rag_tokenizer.tokenize(down["text"][:LEN]).split()
  112. tks_up = rag_tokenizer.tokenize(up["text"][-LEN:]).split()
  113. tks_all = up["text"][-LEN:].strip() \
  114. + (" " if re.match(r"[a-zA-Z0-9]+",
  115. up["text"][-1] + down["text"][0]) else "") \
  116. + down["text"][:LEN].strip()
  117. tks_all = rag_tokenizer.tokenize(tks_all).split()
  118. fea = [
  119. up.get("R", -1) == down.get("R", -1),
  120. y_dis / h,
  121. down["page_number"] - up["page_number"],
  122. up["layout_type"] == down["layout_type"],
  123. up["layout_type"] == "text",
  124. down["layout_type"] == "text",
  125. up["layout_type"] == "table",
  126. down["layout_type"] == "table",
  127. True if re.search(
  128. r"([。?!;!?;+))]|[a-z]\.)$",
  129. up["text"]) else False,
  130. True if re.search(r"[,:‘“、0-9(+-]$", up["text"]) else False,
  131. True if re.search(
  132. r"(^.?[/,?;:\],。;:’”?!》】)-])",
  133. down["text"]) else False,
  134. True if re.match(r"[\((][^\(\)()]+[)\)]$", up["text"]) else False,
  135. True if re.search(r"[,,][^。.]+$", up["text"]) else False,
  136. True if re.search(r"[,,][^。.]+$", up["text"]) else False,
  137. True if re.search(r"[\((][^\))]+$", up["text"])
  138. and re.search(r"[\))]", down["text"]) else False,
  139. self._match_proj(down),
  140. True if re.match(r"[A-Z]", down["text"]) else False,
  141. True if re.match(r"[A-Z]", up["text"][-1]) else False,
  142. True if re.match(r"[a-z0-9]", up["text"][-1]) else False,
  143. True if re.match(r"[0-9.%,-]+$", down["text"]) else False,
  144. up["text"].strip()[-2:] == down["text"].strip()[-2:] if len(up["text"].strip()
  145. ) > 1 and len(
  146. down["text"].strip()) > 1 else False,
  147. up["x0"] > down["x1"],
  148. abs(self.__height(up) - self.__height(down)) / min(self.__height(up),
  149. self.__height(down)),
  150. self._x_dis(up, down) / max(w, 0.000001),
  151. (len(up["text"]) - len(down["text"])) /
  152. max(len(up["text"]), len(down["text"])),
  153. len(tks_all) - len(tks_up) - len(tks_down),
  154. len(tks_down) - len(tks_up),
  155. tks_down[-1] == tks_up[-1] if tks_down and tks_up else False,
  156. max(down["in_row"], up["in_row"]),
  157. abs(down["in_row"] - up["in_row"]),
  158. len(tks_down) == 1 and rag_tokenizer.tag(tks_down[0]).find("n") >= 0,
  159. len(tks_up) == 1 and rag_tokenizer.tag(tks_up[0]).find("n") >= 0
  160. ]
  161. return fea
  162. @staticmethod
  163. def sort_X_by_page(arr, threashold):
  164. # sort using y1 first and then x1
  165. arr = sorted(arr, key=lambda r: (r["page_number"], r["x0"], r["top"]))
  166. for i in range(len(arr) - 1):
  167. for j in range(i, -1, -1):
  168. # restore the order using th
  169. if abs(arr[j + 1]["x0"] - arr[j]["x0"]) < threashold \
  170. and arr[j + 1]["top"] < arr[j]["top"] \
  171. and arr[j + 1]["page_number"] == arr[j]["page_number"]:
  172. tmp = arr[j]
  173. arr[j] = arr[j + 1]
  174. arr[j + 1] = tmp
  175. return arr
  176. def _has_color(self, o):
  177. if o.get("ncs", "") == "DeviceGray":
  178. if o["stroking_color"] and o["stroking_color"][0] == 1 and o["non_stroking_color"] and \
  179. o["non_stroking_color"][0] == 1:
  180. if re.match(r"[a-zT_\[\]\(\)-]+", o.get("text", "")):
  181. return False
  182. return True
  183. def _table_transformer_job(self, ZM):
  184. logging.debug("Table processing...")
  185. imgs, pos = [], []
  186. tbcnt = [0]
  187. MARGIN = 10
  188. self.tb_cpns = []
  189. assert len(self.page_layout) == len(self.page_images)
  190. for p, tbls in enumerate(self.page_layout): # for page
  191. tbls = [f for f in tbls if f["type"] == "table"]
  192. tbcnt.append(len(tbls))
  193. if not tbls:
  194. continue
  195. for tb in tbls: # for table
  196. left, top, right, bott = tb["x0"] - MARGIN, tb["top"] - MARGIN, \
  197. tb["x1"] + MARGIN, tb["bottom"] + MARGIN
  198. left *= ZM
  199. top *= ZM
  200. right *= ZM
  201. bott *= ZM
  202. pos.append((left, top))
  203. imgs.append(self.page_images[p].crop((left, top, right, bott)))
  204. assert len(self.page_images) == len(tbcnt) - 1
  205. if not imgs:
  206. return
  207. recos = self.tbl_det(imgs)
  208. tbcnt = np.cumsum(tbcnt)
  209. for i in range(len(tbcnt) - 1): # for page
  210. pg = []
  211. for j, tb_items in enumerate(
  212. recos[tbcnt[i]: tbcnt[i + 1]]): # for table
  213. poss = pos[tbcnt[i]: tbcnt[i + 1]]
  214. for it in tb_items: # for table components
  215. it["x0"] = (it["x0"] + poss[j][0])
  216. it["x1"] = (it["x1"] + poss[j][0])
  217. it["top"] = (it["top"] + poss[j][1])
  218. it["bottom"] = (it["bottom"] + poss[j][1])
  219. for n in ["x0", "x1", "top", "bottom"]:
  220. it[n] /= ZM
  221. it["top"] += self.page_cum_height[i]
  222. it["bottom"] += self.page_cum_height[i]
  223. it["pn"] = i
  224. it["layoutno"] = j
  225. pg.append(it)
  226. self.tb_cpns.extend(pg)
  227. def gather(kwd, fzy=10, ption=0.6):
  228. eles = Recognizer.sort_Y_firstly(
  229. [r for r in self.tb_cpns if re.match(kwd, r["label"])], fzy)
  230. eles = Recognizer.layouts_cleanup(self.boxes, eles, 5, ption)
  231. return Recognizer.sort_Y_firstly(eles, 0)
  232. # add R,H,C,SP tag to boxes within table layout
  233. headers = gather(r".*header$")
  234. rows = gather(r".* (row|header)")
  235. spans = gather(r".*spanning")
  236. clmns = sorted([r for r in self.tb_cpns if re.match(
  237. r"table column$", r["label"])], key=lambda x: (x["pn"], x["layoutno"], x["x0"]))
  238. clmns = Recognizer.layouts_cleanup(self.boxes, clmns, 5, 0.5)
  239. for b in self.boxes:
  240. if b.get("layout_type", "") != "table":
  241. continue
  242. ii = Recognizer.find_overlapped_with_threashold(b, rows, thr=0.3)
  243. if ii is not None:
  244. b["R"] = ii
  245. b["R_top"] = rows[ii]["top"]
  246. b["R_bott"] = rows[ii]["bottom"]
  247. ii = Recognizer.find_overlapped_with_threashold(
  248. b, headers, thr=0.3)
  249. if ii is not None:
  250. b["H_top"] = headers[ii]["top"]
  251. b["H_bott"] = headers[ii]["bottom"]
  252. b["H_left"] = headers[ii]["x0"]
  253. b["H_right"] = headers[ii]["x1"]
  254. b["H"] = ii
  255. ii = Recognizer.find_horizontally_tightest_fit(b, clmns)
  256. if ii is not None:
  257. b["C"] = ii
  258. b["C_left"] = clmns[ii]["x0"]
  259. b["C_right"] = clmns[ii]["x1"]
  260. ii = Recognizer.find_overlapped_with_threashold(b, spans, thr=0.3)
  261. if ii is not None:
  262. b["H_top"] = spans[ii]["top"]
  263. b["H_bott"] = spans[ii]["bottom"]
  264. b["H_left"] = spans[ii]["x0"]
  265. b["H_right"] = spans[ii]["x1"]
  266. b["SP"] = ii
  267. def __ocr(self, pagenum, img, chars, ZM=3, device_id: int | None = None):
  268. start = timer()
  269. bxs = self.ocr.detect(np.array(img), device_id)
  270. logging.info(f"__ocr detecting boxes of a image cost ({timer() - start}s)")
  271. start = timer()
  272. if not bxs:
  273. self.boxes.append([])
  274. return
  275. bxs = [(line[0], line[1][0]) for line in bxs]
  276. bxs = Recognizer.sort_Y_firstly(
  277. [{"x0": b[0][0] / ZM, "x1": b[1][0] / ZM,
  278. "top": b[0][1] / ZM, "text": "", "txt": t,
  279. "bottom": b[-1][1] / ZM,
  280. "page_number": pagenum} for b, t in bxs if b[0][0] <= b[1][0] and b[0][1] <= b[-1][1]],
  281. self.mean_height[-1] / 3
  282. )
  283. # merge chars in the same rect
  284. for c in Recognizer.sort_Y_firstly(
  285. chars, self.mean_height[pagenum - 1] // 4):
  286. ii = Recognizer.find_overlapped(c, bxs)
  287. if ii is None:
  288. self.lefted_chars.append(c)
  289. continue
  290. ch = c["bottom"] - c["top"]
  291. bh = bxs[ii]["bottom"] - bxs[ii]["top"]
  292. if abs(ch - bh) / max(ch, bh) >= 0.7 and c["text"] != ' ':
  293. self.lefted_chars.append(c)
  294. continue
  295. if c["text"] == " " and bxs[ii]["text"]:
  296. if re.match(r"[0-9a-zA-Zа-яА-Я,.?;:!%%]", bxs[ii]["text"][-1]):
  297. bxs[ii]["text"] += " "
  298. else:
  299. bxs[ii]["text"] += c["text"]
  300. logging.info(f"__ocr sorting {len(chars)} chars cost {timer() - start}s")
  301. start = timer()
  302. boxes_to_reg = []
  303. img_np = np.array(img)
  304. for b in bxs:
  305. if not b["text"]:
  306. left, right, top, bott = b["x0"] * ZM, b["x1"] * \
  307. ZM, b["top"] * ZM, b["bottom"] * ZM
  308. b["box_image"] = self.ocr.get_rotate_crop_image(img_np, np.array([[left, top], [right, top], [right, bott], [left, bott]], dtype=np.float32))
  309. boxes_to_reg.append(b)
  310. del b["txt"]
  311. texts = self.ocr.recognize_batch([b["box_image"] for b in boxes_to_reg], device_id)
  312. for i in range(len(boxes_to_reg)):
  313. boxes_to_reg[i]["text"] = texts[i]
  314. del boxes_to_reg[i]["box_image"]
  315. logging.info(f"__ocr recognize {len(bxs)} boxes cost {timer() - start}s")
  316. bxs = [b for b in bxs if b["text"]]
  317. if self.mean_height[-1] == 0:
  318. self.mean_height[-1] = np.median([b["bottom"] - b["top"]
  319. for b in bxs])
  320. self.boxes.append(bxs)
  321. def _layouts_rec(self, ZM, drop=True):
  322. assert len(self.page_images) == len(self.boxes)
  323. self.boxes, self.page_layout = self.layouter(
  324. self.page_images, self.boxes, ZM, drop=drop)
  325. # cumlative Y
  326. for i in range(len(self.boxes)):
  327. self.boxes[i]["top"] += \
  328. self.page_cum_height[self.boxes[i]["page_number"] - 1]
  329. self.boxes[i]["bottom"] += \
  330. self.page_cum_height[self.boxes[i]["page_number"] - 1]
  331. def _text_merge(self):
  332. # merge adjusted boxes
  333. bxs = self.boxes
  334. def end_with(b, txt):
  335. txt = txt.strip()
  336. tt = b.get("text", "").strip()
  337. return tt and tt.find(txt) == len(tt) - len(txt)
  338. def start_with(b, txts):
  339. tt = b.get("text", "").strip()
  340. return tt and any([tt.find(t.strip()) == 0 for t in txts])
  341. # horizontally merge adjacent box with the same layout
  342. i = 0
  343. while i < len(bxs) - 1:
  344. b = bxs[i]
  345. b_ = bxs[i + 1]
  346. if b.get("layoutno", "0") != b_.get("layoutno", "1") or b.get("layout_type", "") in ["table", "figure",
  347. "equation"]:
  348. i += 1
  349. continue
  350. if abs(self._y_dis(b, b_)
  351. ) < self.mean_height[bxs[i]["page_number"] - 1] / 3:
  352. # merge
  353. bxs[i]["x1"] = b_["x1"]
  354. bxs[i]["top"] = (b["top"] + b_["top"]) / 2
  355. bxs[i]["bottom"] = (b["bottom"] + b_["bottom"]) / 2
  356. bxs[i]["text"] += b_["text"]
  357. bxs.pop(i + 1)
  358. continue
  359. i += 1
  360. continue
  361. dis_thr = 1
  362. dis = b["x1"] - b_["x0"]
  363. if b.get("layout_type", "") != "text" or b_.get(
  364. "layout_type", "") != "text":
  365. if end_with(b, ",") or start_with(b_, "(,"):
  366. dis_thr = -8
  367. else:
  368. i += 1
  369. continue
  370. if abs(self._y_dis(b, b_)) < self.mean_height[bxs[i]["page_number"] - 1] / 5 \
  371. and dis >= dis_thr and b["x1"] < b_["x1"]:
  372. # merge
  373. bxs[i]["x1"] = b_["x1"]
  374. bxs[i]["top"] = (b["top"] + b_["top"]) / 2
  375. bxs[i]["bottom"] = (b["bottom"] + b_["bottom"]) / 2
  376. bxs[i]["text"] += b_["text"]
  377. bxs.pop(i + 1)
  378. continue
  379. i += 1
  380. self.boxes = bxs
  381. def _naive_vertical_merge(self):
  382. bxs = Recognizer.sort_Y_firstly(
  383. self.boxes, np.median(
  384. self.mean_height) / 3)
  385. i = 0
  386. while i + 1 < len(bxs):
  387. b = bxs[i]
  388. b_ = bxs[i + 1]
  389. if b["page_number"] < b_["page_number"] and re.match(
  390. r"[0-9 •一—-]+$", b["text"]):
  391. bxs.pop(i)
  392. continue
  393. if not b["text"].strip():
  394. bxs.pop(i)
  395. continue
  396. concatting_feats = [
  397. b["text"].strip()[-1] in ",;:'\",、‘“;:-",
  398. len(b["text"].strip()) > 1 and b["text"].strip(
  399. )[-2] in ",;:'\",‘“、;:",
  400. b_["text"].strip() and b_["text"].strip()[0] in "。;?!?”)),,、:",
  401. ]
  402. # features for not concating
  403. feats = [
  404. b.get("layoutno", 0) != b_.get("layoutno", 0),
  405. b["text"].strip()[-1] in "。?!?",
  406. self.is_english and b["text"].strip()[-1] in ".!?",
  407. b["page_number"] == b_["page_number"] and b_["top"] -
  408. b["bottom"] > self.mean_height[b["page_number"] - 1] * 1.5,
  409. b["page_number"] < b_["page_number"] and abs(
  410. b["x0"] - b_["x0"]) > self.mean_width[b["page_number"] - 1] * 4,
  411. ]
  412. # split features
  413. detach_feats = [b["x1"] < b_["x0"],
  414. b["x0"] > b_["x1"]]
  415. if (any(feats) and not any(concatting_feats)) or any(detach_feats):
  416. logging.debug("{} {} {} {}".format(
  417. b["text"],
  418. b_["text"],
  419. any(feats),
  420. any(concatting_feats),
  421. ))
  422. i += 1
  423. continue
  424. # merge up and down
  425. b["bottom"] = b_["bottom"]
  426. b["text"] += b_["text"]
  427. b["x0"] = min(b["x0"], b_["x0"])
  428. b["x1"] = max(b["x1"], b_["x1"])
  429. bxs.pop(i + 1)
  430. self.boxes = bxs
  431. def _concat_downward(self, concat_between_pages=True):
  432. # count boxes in the same row as a feature
  433. for i in range(len(self.boxes)):
  434. mh = self.mean_height[self.boxes[i]["page_number"] - 1]
  435. self.boxes[i]["in_row"] = 0
  436. j = max(0, i - 12)
  437. while j < min(i + 12, len(self.boxes)):
  438. if j == i:
  439. j += 1
  440. continue
  441. ydis = self._y_dis(self.boxes[i], self.boxes[j]) / mh
  442. if abs(ydis) < 1:
  443. self.boxes[i]["in_row"] += 1
  444. elif ydis > 0:
  445. break
  446. j += 1
  447. # concat between rows
  448. boxes = deepcopy(self.boxes)
  449. blocks = []
  450. while boxes:
  451. chunks = []
  452. def dfs(up, dp):
  453. chunks.append(up)
  454. i = dp
  455. while i < min(dp + 12, len(boxes)):
  456. ydis = self._y_dis(up, boxes[i])
  457. smpg = up["page_number"] == boxes[i]["page_number"]
  458. mh = self.mean_height[up["page_number"] - 1]
  459. mw = self.mean_width[up["page_number"] - 1]
  460. if smpg and ydis > mh * 4:
  461. break
  462. if not smpg and ydis > mh * 16:
  463. break
  464. down = boxes[i]
  465. if not concat_between_pages and down["page_number"] > up["page_number"]:
  466. break
  467. if up.get("R", "") != down.get(
  468. "R", "") and up["text"][-1] != ",":
  469. i += 1
  470. continue
  471. if re.match(r"[0-9]{2,3}/[0-9]{3}$", up["text"]) \
  472. or re.match(r"[0-9]{2,3}/[0-9]{3}$", down["text"]) \
  473. or not down["text"].strip():
  474. i += 1
  475. continue
  476. if not down["text"].strip() or not up["text"].strip():
  477. i += 1
  478. continue
  479. if up["x1"] < down["x0"] - 10 * \
  480. mw or up["x0"] > down["x1"] + 10 * mw:
  481. i += 1
  482. continue
  483. if i - dp < 5 and up.get("layout_type") == "text":
  484. if up.get("layoutno", "1") == down.get(
  485. "layoutno", "2"):
  486. dfs(down, i + 1)
  487. boxes.pop(i)
  488. return
  489. i += 1
  490. continue
  491. fea = self._updown_concat_features(up, down)
  492. if self.updown_cnt_mdl.predict(
  493. xgb.DMatrix([fea]))[0] <= 0.5:
  494. i += 1
  495. continue
  496. dfs(down, i + 1)
  497. boxes.pop(i)
  498. return
  499. dfs(boxes[0], 1)
  500. boxes.pop(0)
  501. if chunks:
  502. blocks.append(chunks)
  503. # concat within each block
  504. boxes = []
  505. for b in blocks:
  506. if len(b) == 1:
  507. boxes.append(b[0])
  508. continue
  509. t = b[0]
  510. for c in b[1:]:
  511. t["text"] = t["text"].strip()
  512. c["text"] = c["text"].strip()
  513. if not c["text"]:
  514. continue
  515. if t["text"] and re.match(
  516. r"[0-9\.a-zA-Z]+$", t["text"][-1] + c["text"][-1]):
  517. t["text"] += " "
  518. t["text"] += c["text"]
  519. t["x0"] = min(t["x0"], c["x0"])
  520. t["x1"] = max(t["x1"], c["x1"])
  521. t["page_number"] = min(t["page_number"], c["page_number"])
  522. t["bottom"] = c["bottom"]
  523. if not t["layout_type"] \
  524. and c["layout_type"]:
  525. t["layout_type"] = c["layout_type"]
  526. boxes.append(t)
  527. self.boxes = Recognizer.sort_Y_firstly(boxes, 0)
  528. def _filter_forpages(self):
  529. if not self.boxes:
  530. return
  531. findit = False
  532. i = 0
  533. while i < len(self.boxes):
  534. if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$",
  535. re.sub(r"( | |\u3000)+", "", self.boxes[i]["text"].lower())):
  536. i += 1
  537. continue
  538. findit = True
  539. eng = re.match(
  540. r"[0-9a-zA-Z :'.-]{5,}",
  541. self.boxes[i]["text"].strip())
  542. self.boxes.pop(i)
  543. if i >= len(self.boxes):
  544. break
  545. prefix = self.boxes[i]["text"].strip()[:3] if not eng else " ".join(
  546. self.boxes[i]["text"].strip().split()[:2])
  547. while not prefix:
  548. self.boxes.pop(i)
  549. if i >= len(self.boxes):
  550. break
  551. prefix = self.boxes[i]["text"].strip()[:3] if not eng else " ".join(
  552. self.boxes[i]["text"].strip().split()[:2])
  553. self.boxes.pop(i)
  554. if i >= len(self.boxes) or not prefix:
  555. break
  556. for j in range(i, min(i + 128, len(self.boxes))):
  557. if not re.match(prefix, self.boxes[j]["text"]):
  558. continue
  559. for k in range(i, j):
  560. self.boxes.pop(i)
  561. break
  562. if findit:
  563. return
  564. page_dirty = [0] * len(self.page_images)
  565. for b in self.boxes:
  566. if re.search(r"(··|··|··)", b["text"]):
  567. page_dirty[b["page_number"] - 1] += 1
  568. page_dirty = set([i + 1 for i, t in enumerate(page_dirty) if t > 3])
  569. if not page_dirty:
  570. return
  571. i = 0
  572. while i < len(self.boxes):
  573. if self.boxes[i]["page_number"] in page_dirty:
  574. self.boxes.pop(i)
  575. continue
  576. i += 1
  577. def _merge_with_same_bullet(self):
  578. i = 0
  579. while i + 1 < len(self.boxes):
  580. b = self.boxes[i]
  581. b_ = self.boxes[i + 1]
  582. if not b["text"].strip():
  583. self.boxes.pop(i)
  584. continue
  585. if not b_["text"].strip():
  586. self.boxes.pop(i + 1)
  587. continue
  588. if b["text"].strip()[0] != b_["text"].strip()[0] \
  589. or b["text"].strip()[0].lower() in set("qwertyuopasdfghjklzxcvbnm") \
  590. or rag_tokenizer.is_chinese(b["text"].strip()[0]) \
  591. or b["top"] > b_["bottom"]:
  592. i += 1
  593. continue
  594. b_["text"] = b["text"] + "\n" + b_["text"]
  595. b_["x0"] = min(b["x0"], b_["x0"])
  596. b_["x1"] = max(b["x1"], b_["x1"])
  597. b_["top"] = b["top"]
  598. self.boxes.pop(i)
  599. def _extract_table_figure(self, need_image, ZM,
  600. return_html, need_position):
  601. tables = {}
  602. figures = {}
  603. # extract figure and table boxes
  604. i = 0
  605. lst_lout_no = ""
  606. nomerge_lout_no = []
  607. while i < len(self.boxes):
  608. if "layoutno" not in self.boxes[i]:
  609. i += 1
  610. continue
  611. lout_no = str(self.boxes[i]["page_number"]) + \
  612. "-" + str(self.boxes[i]["layoutno"])
  613. if TableStructureRecognizer.is_caption(self.boxes[i]) or self.boxes[i]["layout_type"] in ["table caption",
  614. "title",
  615. "figure caption",
  616. "reference"]:
  617. nomerge_lout_no.append(lst_lout_no)
  618. if self.boxes[i]["layout_type"] == "table":
  619. if re.match(r"(数据|资料|图表)*来源[:: ]", self.boxes[i]["text"]):
  620. self.boxes.pop(i)
  621. continue
  622. if lout_no not in tables:
  623. tables[lout_no] = []
  624. tables[lout_no].append(self.boxes[i])
  625. self.boxes.pop(i)
  626. lst_lout_no = lout_no
  627. continue
  628. if need_image and self.boxes[i]["layout_type"] == "figure":
  629. if re.match(r"(数据|资料|图表)*来源[:: ]", self.boxes[i]["text"]):
  630. self.boxes.pop(i)
  631. continue
  632. if lout_no not in figures:
  633. figures[lout_no] = []
  634. figures[lout_no].append(self.boxes[i])
  635. self.boxes.pop(i)
  636. lst_lout_no = lout_no
  637. continue
  638. i += 1
  639. # merge table on different pages
  640. nomerge_lout_no = set(nomerge_lout_no)
  641. tbls = sorted([(k, bxs) for k, bxs in tables.items()],
  642. key=lambda x: (x[1][0]["top"], x[1][0]["x0"]))
  643. i = len(tbls) - 1
  644. while i - 1 >= 0:
  645. k0, bxs0 = tbls[i - 1]
  646. k, bxs = tbls[i]
  647. i -= 1
  648. if k0 in nomerge_lout_no:
  649. continue
  650. if bxs[0]["page_number"] == bxs0[0]["page_number"]:
  651. continue
  652. if bxs[0]["page_number"] - bxs0[0]["page_number"] > 1:
  653. continue
  654. mh = self.mean_height[bxs[0]["page_number"] - 1]
  655. if self._y_dis(bxs0[-1], bxs[0]) > mh * 23:
  656. continue
  657. tables[k0].extend(tables[k])
  658. del tables[k]
  659. def x_overlapped(a, b):
  660. return not any([a["x1"] < b["x0"], a["x0"] > b["x1"]])
  661. # find captions and pop out
  662. i = 0
  663. while i < len(self.boxes):
  664. c = self.boxes[i]
  665. # mh = self.mean_height[c["page_number"]-1]
  666. if not TableStructureRecognizer.is_caption(c):
  667. i += 1
  668. continue
  669. # find the nearest layouts
  670. def nearest(tbls):
  671. nonlocal c
  672. mink = ""
  673. minv = 1000000000
  674. for k, bxs in tbls.items():
  675. for b in bxs:
  676. if b.get("layout_type", "").find("caption") >= 0:
  677. continue
  678. y_dis = self._y_dis(c, b)
  679. x_dis = self._x_dis(
  680. c, b) if not x_overlapped(
  681. c, b) else 0
  682. dis = y_dis * y_dis + x_dis * x_dis
  683. if dis < minv:
  684. mink = k
  685. minv = dis
  686. return mink, minv
  687. tk, tv = nearest(tables)
  688. fk, fv = nearest(figures)
  689. # if min(tv, fv) > 2000:
  690. # i += 1
  691. # continue
  692. if tv < fv and tk:
  693. tables[tk].insert(0, c)
  694. logging.debug(
  695. "TABLE:" +
  696. self.boxes[i]["text"] +
  697. "; Cap: " +
  698. tk)
  699. elif fk:
  700. figures[fk].insert(0, c)
  701. logging.debug(
  702. "FIGURE:" +
  703. self.boxes[i]["text"] +
  704. "; Cap: " +
  705. tk)
  706. self.boxes.pop(i)
  707. res = []
  708. positions = []
  709. def cropout(bxs, ltype, poss):
  710. nonlocal ZM
  711. pn = set([b["page_number"] - 1 for b in bxs])
  712. if len(pn) < 2:
  713. pn = list(pn)[0]
  714. ht = self.page_cum_height[pn]
  715. b = {
  716. "x0": np.min([b["x0"] for b in bxs]),
  717. "top": np.min([b["top"] for b in bxs]) - ht,
  718. "x1": np.max([b["x1"] for b in bxs]),
  719. "bottom": np.max([b["bottom"] for b in bxs]) - ht
  720. }
  721. louts = [layout for layout in self.page_layout[pn] if layout["type"] == ltype]
  722. ii = Recognizer.find_overlapped(b, louts, naive=True)
  723. if ii is not None:
  724. b = louts[ii]
  725. else:
  726. logging.warning(
  727. f"Missing layout match: {pn + 1},%s" %
  728. (bxs[0].get(
  729. "layoutno", "")))
  730. left, top, right, bott = b["x0"], b["top"], b["x1"], b["bottom"]
  731. if right < left:
  732. right = left + 1
  733. poss.append((pn + self.page_from, left, right, top, bott))
  734. return self.page_images[pn] \
  735. .crop((left * ZM, top * ZM,
  736. right * ZM, bott * ZM))
  737. pn = {}
  738. for b in bxs:
  739. p = b["page_number"] - 1
  740. if p not in pn:
  741. pn[p] = []
  742. pn[p].append(b)
  743. pn = sorted(pn.items(), key=lambda x: x[0])
  744. imgs = [cropout(arr, ltype, poss) for p, arr in pn]
  745. pic = Image.new("RGB",
  746. (int(np.max([i.size[0] for i in imgs])),
  747. int(np.sum([m.size[1] for m in imgs]))),
  748. (245, 245, 245))
  749. height = 0
  750. for img in imgs:
  751. pic.paste(img, (0, int(height)))
  752. height += img.size[1]
  753. return pic
  754. # crop figure out and add caption
  755. for k, bxs in figures.items():
  756. txt = "\n".join([b["text"] for b in bxs])
  757. if not txt:
  758. continue
  759. poss = []
  760. res.append(
  761. (cropout(
  762. bxs,
  763. "figure", poss),
  764. [txt]))
  765. positions.append(poss)
  766. for k, bxs in tables.items():
  767. if not bxs:
  768. continue
  769. bxs = Recognizer.sort_Y_firstly(bxs, np.mean(
  770. [(b["bottom"] - b["top"]) / 2 for b in bxs]))
  771. poss = []
  772. res.append((cropout(bxs, "table", poss),
  773. self.tbl_det.construct_table(bxs, html=return_html, is_english=self.is_english)))
  774. positions.append(poss)
  775. assert len(positions) == len(res)
  776. if need_position:
  777. return list(zip(res, positions))
  778. return res
  779. def proj_match(self, line):
  780. if len(line) <= 2:
  781. return
  782. if re.match(r"[0-9 ().,%%+/-]+$", line):
  783. return False
  784. for p, j in [
  785. (r"第[零一二三四五六七八九十百]+章", 1),
  786. (r"第[零一二三四五六七八九十百]+[条节]", 2),
  787. (r"[零一二三四五六七八九十百]+[、  ]", 3),
  788. (r"[\((][零一二三四五六七八九十百]+[)\)]", 4),
  789. (r"[0-9]+(、|\.[  ]|\.[^0-9])", 5),
  790. (r"[0-9]+\.[0-9]+(、|[.  ]|[^0-9])", 6),
  791. (r"[0-9]+\.[0-9]+\.[0-9]+(、|[  ]|[^0-9])", 7),
  792. (r"[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+(、|[  ]|[^0-9])", 8),
  793. (r".{,48}[::??]$", 9),
  794. (r"[0-9]+)", 10),
  795. (r"[\((][0-9]+[)\)]", 11),
  796. (r"[零一二三四五六七八九十百]+是", 12),
  797. (r"[⚫•➢✓]", 12)
  798. ]:
  799. if re.match(p, line):
  800. return j
  801. return
  802. def _line_tag(self, bx, ZM):
  803. pn = [bx["page_number"]]
  804. top = bx["top"] - self.page_cum_height[pn[0] - 1]
  805. bott = bx["bottom"] - self.page_cum_height[pn[0] - 1]
  806. page_images_cnt = len(self.page_images)
  807. if pn[-1] - 1 >= page_images_cnt:
  808. return ""
  809. while bott * ZM > self.page_images[pn[-1] - 1].size[1]:
  810. bott -= self.page_images[pn[-1] - 1].size[1] / ZM
  811. pn.append(pn[-1] + 1)
  812. if pn[-1] - 1 >= page_images_cnt:
  813. return ""
  814. return "@@{}\t{:.1f}\t{:.1f}\t{:.1f}\t{:.1f}##" \
  815. .format("-".join([str(p) for p in pn]),
  816. bx["x0"], bx["x1"], top, bott)
  817. def __filterout_scraps(self, boxes, ZM):
  818. def width(b):
  819. return b["x1"] - b["x0"]
  820. def height(b):
  821. return b["bottom"] - b["top"]
  822. def usefull(b):
  823. if b.get("layout_type"):
  824. return True
  825. if width(
  826. b) > self.page_images[b["page_number"] - 1].size[0] / ZM / 3:
  827. return True
  828. if b["bottom"] - b["top"] > self.mean_height[b["page_number"] - 1]:
  829. return True
  830. return False
  831. res = []
  832. while boxes:
  833. lines = []
  834. widths = []
  835. pw = self.page_images[boxes[0]["page_number"] - 1].size[0] / ZM
  836. mh = self.mean_height[boxes[0]["page_number"] - 1]
  837. mj = self.proj_match(
  838. boxes[0]["text"]) or boxes[0].get(
  839. "layout_type",
  840. "") == "title"
  841. def dfs(line, st):
  842. nonlocal mh, pw, lines, widths
  843. lines.append(line)
  844. widths.append(width(line))
  845. mmj = self.proj_match(
  846. line["text"]) or line.get(
  847. "layout_type",
  848. "") == "title"
  849. for i in range(st + 1, min(st + 20, len(boxes))):
  850. if (boxes[i]["page_number"] - line["page_number"]) > 0:
  851. break
  852. if not mmj and self._y_dis(
  853. line, boxes[i]) >= 3 * mh and height(line) < 1.5 * mh:
  854. break
  855. if not usefull(boxes[i]):
  856. continue
  857. if mmj or \
  858. (self._x_dis(boxes[i], line) < pw / 10): \
  859. # and abs(width(boxes[i])-width_mean)/max(width(boxes[i]),width_mean)<0.5):
  860. # concat following
  861. dfs(boxes[i], i)
  862. boxes.pop(i)
  863. break
  864. try:
  865. if usefull(boxes[0]):
  866. dfs(boxes[0], 0)
  867. else:
  868. logging.debug("WASTE: " + boxes[0]["text"])
  869. except Exception:
  870. pass
  871. boxes.pop(0)
  872. mw = np.mean(widths)
  873. if mj or mw / pw >= 0.35 or mw > 200:
  874. res.append(
  875. "\n".join([c["text"] + self._line_tag(c, ZM) for c in lines]))
  876. else:
  877. logging.debug("REMOVED: " +
  878. "<<".join([c["text"] for c in lines]))
  879. return "\n\n".join(res)
  880. @staticmethod
  881. def total_page_number(fnm, binary=None):
  882. try:
  883. with sys.modules[LOCK_KEY_pdfplumber]:
  884. pdf = pdfplumber.open(
  885. fnm) if not binary else pdfplumber.open(BytesIO(binary))
  886. total_page = len(pdf.pages)
  887. pdf.close()
  888. return total_page
  889. except Exception:
  890. logging.exception("total_page_number")
  891. def __images__(self, fnm, zoomin=3, page_from=0,
  892. page_to=299, callback=None):
  893. self.lefted_chars = []
  894. self.mean_height = []
  895. self.mean_width = []
  896. self.boxes = []
  897. self.garbages = {}
  898. self.page_cum_height = [0]
  899. self.page_layout = []
  900. self.page_from = page_from
  901. start = timer()
  902. try:
  903. with sys.modules[LOCK_KEY_pdfplumber]:
  904. self.pdf = pdfplumber.open(fnm) if isinstance(
  905. fnm, str) else pdfplumber.open(BytesIO(fnm))
  906. self.page_images = [p.to_image(resolution=72 * zoomin).annotated for i, p in
  907. enumerate(self.pdf.pages[page_from:page_to])]
  908. try:
  909. self.page_chars = [[c for c in page.dedupe_chars().chars if self._has_color(c)] for page in self.pdf.pages[page_from:page_to]]
  910. except Exception as e:
  911. logging.warning(f"Failed to extract characters for pages {page_from}-{page_to}: {str(e)}")
  912. self.page_chars = [[] for _ in range(page_to - page_from)] # If failed to extract, using empty list instead.
  913. self.total_page = len(self.pdf.pages)
  914. except Exception:
  915. logging.exception("RAGFlowPdfParser __images__")
  916. logging.info(f"__images__ dedupe_chars cost {timer() - start}s")
  917. self.outlines = []
  918. try:
  919. self.pdf = pdf2_read(fnm if isinstance(fnm, str) else BytesIO(fnm))
  920. outlines = self.pdf.outline
  921. def dfs(arr, depth):
  922. for a in arr:
  923. if isinstance(a, dict):
  924. self.outlines.append((a["/Title"], depth))
  925. continue
  926. dfs(a, depth + 1)
  927. dfs(outlines, 0)
  928. except Exception as e:
  929. logging.warning(f"Outlines exception: {e}")
  930. finally:
  931. self.pdf.close()
  932. if not self.outlines:
  933. logging.warning("Miss outlines")
  934. logging.debug("Images converted.")
  935. self.is_english = [re.search(r"[a-zA-Z0-9,/¸;:'\[\]\(\)!@#$%^&*\"?<>._-]{30,}", "".join(
  936. random.choices([c["text"] for c in self.page_chars[i]], k=min(100, len(self.page_chars[i]))))) for i in
  937. range(len(self.page_chars))]
  938. if sum([1 if e else 0 for e in self.is_english]) > len(
  939. self.page_images) / 2:
  940. self.is_english = True
  941. else:
  942. self.is_english = False
  943. async def __img_ocr(i, id, img, chars, limiter):
  944. j = 0
  945. while j + 1 < len(chars):
  946. if chars[j]["text"] and chars[j + 1]["text"] \
  947. and re.match(r"[0-9a-zA-Z,.:;!%]+", chars[j]["text"] + chars[j + 1]["text"]) \
  948. and chars[j + 1]["x0"] - chars[j]["x1"] >= min(chars[j + 1]["width"],
  949. chars[j]["width"]) / 2:
  950. chars[j]["text"] += " "
  951. j += 1
  952. if limiter:
  953. async with limiter:
  954. await trio.to_thread.run_sync(lambda: self.__ocr(i + 1, img, chars, zoomin, id))
  955. else:
  956. self.__ocr(i + 1, img, chars, zoomin, id)
  957. if callback and i % 6 == 5:
  958. callback(prog=(i + 1) * 0.6 / len(self.page_images), msg="")
  959. async def __img_ocr_launcher():
  960. def __ocr_preprocess():
  961. chars = self.page_chars[i] if not self.is_english else []
  962. self.mean_height.append(
  963. np.median(sorted([c["height"] for c in chars])) if chars else 0
  964. )
  965. self.mean_width.append(
  966. np.median(sorted([c["width"] for c in chars])) if chars else 8
  967. )
  968. self.page_cum_height.append(img.size[1] / zoomin)
  969. return chars
  970. if self.parallel_limiter:
  971. async with trio.open_nursery() as nursery:
  972. for i, img in enumerate(self.page_images):
  973. chars = __ocr_preprocess()
  974. nursery.start_soon(__img_ocr, i, i % PARALLEL_DEVICES, img, chars,
  975. self.parallel_limiter[i % PARALLEL_DEVICES])
  976. await trio.sleep(0.1)
  977. else:
  978. for i, img in enumerate(self.page_images):
  979. chars = __ocr_preprocess()
  980. await __img_ocr(i, 0, img, chars, None)
  981. start = timer()
  982. trio.run(__img_ocr_launcher)
  983. logging.info(f"__images__ {len(self.page_images)} pages cost {timer() - start}s")
  984. if not self.is_english and not any(
  985. [c for c in self.page_chars]) and self.boxes:
  986. bxes = [b for bxs in self.boxes for b in bxs]
  987. self.is_english = re.search(r"[\na-zA-Z0-9,/¸;:'\[\]\(\)!@#$%^&*\"?<>._-]{30,}",
  988. "".join([b["text"] for b in random.choices(bxes, k=min(30, len(bxes)))]))
  989. logging.debug("Is it English:", self.is_english)
  990. self.page_cum_height = np.cumsum(self.page_cum_height)
  991. assert len(self.page_cum_height) == len(self.page_images) + 1
  992. if len(self.boxes) == 0 and zoomin < 9:
  993. self.__images__(fnm, zoomin * 3, page_from, page_to, callback)
  994. def __call__(self, fnm, need_image=True, zoomin=3, return_html=False):
  995. self.__images__(fnm, zoomin)
  996. self._layouts_rec(zoomin)
  997. self._table_transformer_job(zoomin)
  998. self._text_merge()
  999. self._concat_downward()
  1000. self._filter_forpages()
  1001. tbls = self._extract_table_figure(
  1002. need_image, zoomin, return_html, False)
  1003. return self.__filterout_scraps(deepcopy(self.boxes), zoomin), tbls
  1004. def remove_tag(self, txt):
  1005. return re.sub(r"@@[\t0-9.-]+?##", "", txt)
  1006. def crop(self, text, ZM=3, need_position=False):
  1007. imgs = []
  1008. poss = []
  1009. for tag in re.findall(r"@@[0-9-]+\t[0-9.\t]+##", text):
  1010. pn, left, right, top, bottom = tag.strip(
  1011. "#").strip("@").split("\t")
  1012. left, right, top, bottom = float(left), float(
  1013. right), float(top), float(bottom)
  1014. poss.append(([int(p) - 1 for p in pn.split("-")],
  1015. left, right, top, bottom))
  1016. if not poss:
  1017. if need_position:
  1018. return None, None
  1019. return
  1020. max_width = max(
  1021. np.max([right - left for (_, left, right, _, _) in poss]), 6)
  1022. GAP = 6
  1023. pos = poss[0]
  1024. poss.insert(0, ([pos[0][0]], pos[1], pos[2], max(
  1025. 0, pos[3] - 120), max(pos[3] - GAP, 0)))
  1026. pos = poss[-1]
  1027. poss.append(([pos[0][-1]], pos[1], pos[2], min(self.page_images[pos[0][-1]].size[1] / ZM, pos[4] + GAP),
  1028. min(self.page_images[pos[0][-1]].size[1] / ZM, pos[4] + 120)))
  1029. positions = []
  1030. for ii, (pns, left, right, top, bottom) in enumerate(poss):
  1031. right = left + max_width
  1032. bottom *= ZM
  1033. for pn in pns[1:]:
  1034. bottom += self.page_images[pn - 1].size[1]
  1035. imgs.append(
  1036. self.page_images[pns[0]].crop((left * ZM, top * ZM,
  1037. right *
  1038. ZM, min(
  1039. bottom, self.page_images[pns[0]].size[1])
  1040. ))
  1041. )
  1042. if 0 < ii < len(poss) - 1:
  1043. positions.append((pns[0] + self.page_from, left, right, top, min(
  1044. bottom, self.page_images[pns[0]].size[1]) / ZM))
  1045. bottom -= self.page_images[pns[0]].size[1]
  1046. for pn in pns[1:]:
  1047. imgs.append(
  1048. self.page_images[pn].crop((left * ZM, 0,
  1049. right * ZM,
  1050. min(bottom,
  1051. self.page_images[pn].size[1])
  1052. ))
  1053. )
  1054. if 0 < ii < len(poss) - 1:
  1055. positions.append((pn + self.page_from, left, right, 0, min(
  1056. bottom, self.page_images[pn].size[1]) / ZM))
  1057. bottom -= self.page_images[pn].size[1]
  1058. if not imgs:
  1059. if need_position:
  1060. return None, None
  1061. return
  1062. height = 0
  1063. for img in imgs:
  1064. height += img.size[1] + GAP
  1065. height = int(height)
  1066. width = int(np.max([i.size[0] for i in imgs]))
  1067. pic = Image.new("RGB",
  1068. (width, height),
  1069. (245, 245, 245))
  1070. height = 0
  1071. for ii, img in enumerate(imgs):
  1072. if ii == 0 or ii + 1 == len(imgs):
  1073. img = img.convert('RGBA')
  1074. overlay = Image.new('RGBA', img.size, (0, 0, 0, 0))
  1075. overlay.putalpha(128)
  1076. img = Image.alpha_composite(img, overlay).convert("RGB")
  1077. pic.paste(img, (0, int(height)))
  1078. height += img.size[1] + GAP
  1079. if need_position:
  1080. return pic, positions
  1081. return pic
  1082. def get_position(self, bx, ZM):
  1083. poss = []
  1084. pn = bx["page_number"]
  1085. top = bx["top"] - self.page_cum_height[pn - 1]
  1086. bott = bx["bottom"] - self.page_cum_height[pn - 1]
  1087. poss.append((pn, bx["x0"], bx["x1"], top, min(
  1088. bott, self.page_images[pn - 1].size[1] / ZM)))
  1089. while bott * ZM > self.page_images[pn - 1].size[1]:
  1090. bott -= self.page_images[pn - 1].size[1] / ZM
  1091. top = 0
  1092. pn += 1
  1093. poss.append((pn, bx["x0"], bx["x1"], top, min(
  1094. bott, self.page_images[pn - 1].size[1] / ZM)))
  1095. return poss
  1096. class PlainParser:
  1097. def __call__(self, filename, from_page=0, to_page=100000, **kwargs):
  1098. self.outlines = []
  1099. lines = []
  1100. try:
  1101. self.pdf = pdf2_read(
  1102. filename if isinstance(
  1103. filename, str) else BytesIO(filename))
  1104. for page in self.pdf.pages[from_page:to_page]:
  1105. lines.extend([t for t in page.extract_text().split("\n")])
  1106. outlines = self.pdf.outline
  1107. def dfs(arr, depth):
  1108. for a in arr:
  1109. if isinstance(a, dict):
  1110. self.outlines.append((a["/Title"], depth))
  1111. continue
  1112. dfs(a, depth + 1)
  1113. dfs(outlines, 0)
  1114. except Exception:
  1115. logging.exception("Outlines exception")
  1116. if not self.outlines:
  1117. logging.warning("Miss outlines")
  1118. return [(line, "") for line in lines], []
  1119. def crop(self, ck, need_position):
  1120. raise NotImplementedError
  1121. @staticmethod
  1122. def remove_tag(txt):
  1123. raise NotImplementedError
  1124. class VisionParser(RAGFlowPdfParser):
  1125. def __init__(self, vision_model, *args, **kwargs):
  1126. super().__init__(*args, **kwargs)
  1127. self.vision_model = vision_model
  1128. def __images__(self, fnm, zoomin=3, page_from=0, page_to=299, callback=None):
  1129. try:
  1130. with sys.modules[LOCK_KEY_pdfplumber]:
  1131. self.pdf = pdfplumber.open(fnm) if isinstance(
  1132. fnm, str) else pdfplumber.open(BytesIO(fnm))
  1133. self.page_images = [p.to_image(resolution=72 * zoomin).annotated for i, p in
  1134. enumerate(self.pdf.pages[page_from:page_to])]
  1135. self.total_page = len(self.pdf.pages)
  1136. except Exception:
  1137. self.page_images = None
  1138. self.total_page = 0
  1139. logging.exception("VisionParser __images__")
  1140. def __call__(self, filename, from_page=0, to_page=100000, **kwargs):
  1141. callback = kwargs.get("callback", lambda prog, msg: None)
  1142. self.__images__(fnm=filename, zoomin=3, page_from=from_page, page_to=to_page, **kwargs)
  1143. total_pdf_pages = self.total_page
  1144. start_page = max(0, from_page)
  1145. end_page = min(to_page, total_pdf_pages)
  1146. all_docs = []
  1147. for idx, img_binary in enumerate(self.page_images or []):
  1148. pdf_page_num = idx # 0-based
  1149. if pdf_page_num < start_page or pdf_page_num >= end_page:
  1150. continue
  1151. docs = picture_vision_llm_chunk(
  1152. binary=img_binary,
  1153. vision_model=self.vision_model,
  1154. prompt=vision_llm_describe_prompt(page=pdf_page_num+1),
  1155. callback=callback,
  1156. )
  1157. if docs:
  1158. all_docs.append(docs)
  1159. return [(doc, "") for doc in all_docs], []
  1160. if __name__ == "__main__":
  1161. pass