You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

paper.py 8.8KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. import copy
  2. import re
  3. from collections import Counter
  4. from rag.parser import tokenize
  5. from rag.nlp import huqie
  6. from rag.parser.pdf_parser import HuParser
  7. import numpy as np
  8. from rag.utils import num_tokens_from_string
  9. class Pdf(HuParser):
  10. def __call__(self, filename, binary=None, from_page=0,
  11. to_page=100000, zoomin=3, callback=None):
  12. self.__images__(
  13. filename if not binary else binary,
  14. zoomin,
  15. from_page,
  16. to_page)
  17. callback(0.2, "OCR finished.")
  18. from timeit import default_timer as timer
  19. start = timer()
  20. self._layouts_paddle(zoomin)
  21. callback(0.47, "Layout analysis finished")
  22. print("paddle layouts:", timer() - start)
  23. self._table_transformer_job(zoomin)
  24. callback(0.68, "Table analysis finished")
  25. self._text_merge()
  26. column_width = np.median([b["x1"] - b["x0"] for b in self.boxes])
  27. self._concat_downward(concat_between_pages=False)
  28. self._filter_forpages()
  29. callback(0.75, "Text merging finished.")
  30. tbls = self._extract_table_figure(True, zoomin, False)
  31. # clean mess
  32. if column_width < self.page_images[0].size[0] / zoomin / 2:
  33. print("two_column...................", column_width,
  34. self.page_images[0].size[0] / zoomin / 2)
  35. self.boxes = self.sort_X_by_page(self.boxes, column_width / 2)
  36. for b in self.boxes:
  37. b["text"] = re.sub(r"([\t  ]|\u3000){2,}", " ", b["text"].strip())
  38. freq = Counter([b["text"] for b in self.boxes])
  39. garbage = set([k for k, v in freq.items() if v > self.total_page * 0.6])
  40. i = 0
  41. while i < len(self.boxes):
  42. if self.boxes[i]["text"] in garbage \
  43. or (re.match(r"[a-zA-Z0-9]+$", self.boxes[i]["text"]) and not self.boxes[i].get("layoutno")) \
  44. or (i + 1 < len(self.boxes) and self.boxes[i]["text"] == self.boxes[i + 1]["text"]):
  45. self.boxes.pop(i)
  46. elif i + 1 < len(self.boxes) and self.boxes[i].get("layoutno", '0') == self.boxes[i + 1].get("layoutno",
  47. '1'):
  48. # merge within same layouts
  49. self.boxes[i + 1]["top"] = self.boxes[i]["top"]
  50. self.boxes[i + 1]["x0"] = min(self.boxes[i]["x0"], self.boxes[i + 1]["x0"])
  51. self.boxes[i + 1]["x1"] = max(self.boxes[i]["x1"], self.boxes[i + 1]["x1"])
  52. self.boxes[i + 1]["text"] = self.boxes[i]["text"] + " " + self.boxes[i + 1]["text"]
  53. self.boxes.pop(i)
  54. else:
  55. i += 1
  56. def _begin(txt):
  57. return re.match(
  58. "[0-9. 一、i]*(introduction|abstract|摘要|引言|keywords|key words|关键词|background|背景|目录|前言|contents)",
  59. txt.lower().strip())
  60. # get title and authors
  61. title = ""
  62. authors = []
  63. i = 0
  64. while i < min(32, len(self.boxes)):
  65. b = self.boxes[i]
  66. i += 1
  67. if b.get("layoutno", "").find("title") >= 0:
  68. title = b["text"]
  69. if _begin(title):
  70. title = ""
  71. break
  72. for j in range(3):
  73. if _begin(self.boxes[i + j]["text"]): break
  74. authors.append(self.boxes[i + j]["text"])
  75. break
  76. break
  77. # get abstract
  78. abstr = ""
  79. i = 0
  80. while i + 1 < min(32, len(self.boxes)):
  81. b = self.boxes[i]
  82. i += 1
  83. txt = b["text"].lower().strip()
  84. if re.match("(abstract|摘要)", txt):
  85. if len(txt.split(" ")) > 32 or len(txt) > 64:
  86. abstr = txt + self._line_tag(b, zoomin)
  87. i += 1
  88. break
  89. txt = self.boxes[i + 1]["text"].lower().strip()
  90. if len(txt.split(" ")) > 32 or len(txt) > 64:
  91. abstr = txt + self._line_tag(self.boxes[i + 1], zoomin)
  92. i += 1
  93. break
  94. if not abstr: i = 0
  95. callback(0.8, "Page {}~{}: Text merging finished".format(from_page, min(to_page, self.total_page)))
  96. for b in self.boxes: print(b["text"], b.get("layoutno"))
  97. print(tbls)
  98. return {
  99. "title": title if title else filename,
  100. "authors": " ".join(authors),
  101. "abstract": abstr,
  102. "lines": [(b["text"] + self._line_tag(b, zoomin), b.get("layoutno", "")) for b in self.boxes[i:] if
  103. re.match(r"(text|title)", b.get("layoutno", "text"))],
  104. "tables": tbls
  105. }
  106. def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None, **kwargs):
  107. pdf_parser = None
  108. paper = {}
  109. if re.search(r"\.pdf$", filename, re.IGNORECASE):
  110. pdf_parser = Pdf()
  111. paper = pdf_parser(filename if not binary else binary,
  112. from_page=from_page, to_page=to_page, callback=callback)
  113. else: raise NotImplementedError("file type not supported yet(pdf supported)")
  114. doc = {
  115. "docnm_kwd": paper["title"] if paper["title"] else filename,
  116. "authors_tks": paper["authors"]
  117. }
  118. doc["title_tks"] = huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", doc["docnm_kwd"]))
  119. doc["title_sm_tks"] = huqie.qieqie(doc["title_tks"])
  120. doc["authors_sm_tks"] = huqie.qieqie(doc["authors_tks"])
  121. # is it English
  122. eng = pdf_parser.is_english
  123. print("It's English.....", eng)
  124. res = []
  125. # add tables
  126. for img, rows in paper["tables"]:
  127. bs = 10
  128. de = ";" if eng else ";"
  129. for i in range(0, len(rows), bs):
  130. d = copy.deepcopy(doc)
  131. r = de.join(rows[i:i + bs])
  132. r = re.sub(r"\t——(来自| in ).*”%s" % de, "", r)
  133. tokenize(d, r)
  134. d["image"] = img
  135. res.append(d)
  136. if paper["abstract"]:
  137. d = copy.deepcopy(doc)
  138. txt = pdf_parser.remove_tag(paper["abstract"])
  139. d["important_kwd"] = ["abstract", "总结", "概括", "summary", "summarize"]
  140. d["important_tks"] = " ".join(d["important_kwd"])
  141. d["image"] = pdf_parser.crop(paper["abstract"])
  142. tokenize(d, txt, eng)
  143. res.append(d)
  144. readed = [0] * len(paper["lines"])
  145. # find colon firstly
  146. i = 0
  147. while i + 1 < len(paper["lines"]):
  148. txt = pdf_parser.remove_tag(paper["lines"][i][0])
  149. j = i
  150. if txt.strip("\n").strip()[-1] not in "::":
  151. i += 1
  152. continue
  153. i += 1
  154. while i < len(paper["lines"]) and not paper["lines"][i][0]:
  155. i += 1
  156. if i >= len(paper["lines"]): break
  157. proj = [paper["lines"][i][0].strip()]
  158. i += 1
  159. while i < len(paper["lines"]) and paper["lines"][i][0].strip()[0] == proj[-1][0]:
  160. proj.append(paper["lines"][i])
  161. i += 1
  162. for k in range(j, i): readed[k] = True
  163. txt = txt[::-1]
  164. if eng:
  165. r = re.search(r"(.*?) ([\.;?!]|$)", txt)
  166. txt = r.group(1)[::-1] if r else txt[::-1]
  167. else:
  168. r = re.search(r"(.*?) ([。?;!]|$)", txt)
  169. txt = r.group(1)[::-1] if r else txt[::-1]
  170. for p in proj:
  171. d = copy.deepcopy(doc)
  172. txt += "\n" + pdf_parser.remove_tag(p)
  173. d["image"] = pdf_parser.crop(p)
  174. tokenize(d, txt)
  175. res.append(d)
  176. i = 0
  177. chunk = []
  178. tk_cnt = 0
  179. def add_chunk():
  180. nonlocal chunk, res, doc, pdf_parser, tk_cnt
  181. d = copy.deepcopy(doc)
  182. ck = "\n".join(chunk)
  183. tokenize(d, pdf_parser.remove_tag(ck), pdf_parser.is_english)
  184. d["image"] = pdf_parser.crop(ck)
  185. res.append(d)
  186. chunk = []
  187. tk_cnt = 0
  188. while i < len(paper["lines"]):
  189. if tk_cnt > 128:
  190. add_chunk()
  191. if readed[i]:
  192. i += 1
  193. continue
  194. readed[i] = True
  195. txt, layouts = paper["lines"][i]
  196. txt_ = pdf_parser.remove_tag(txt)
  197. i += 1
  198. cnt = num_tokens_from_string(txt_)
  199. if any([
  200. layouts.find("title") >= 0 and chunk,
  201. cnt + tk_cnt > 128 and tk_cnt > 32,
  202. ]):
  203. add_chunk()
  204. chunk = [txt]
  205. tk_cnt = cnt
  206. else:
  207. chunk.append(txt)
  208. tk_cnt += cnt
  209. if chunk: add_chunk()
  210. for i, d in enumerate(res):
  211. print(d)
  212. # d["image"].save(f"./logs/{i}.jpg")
  213. return res
  214. if __name__ == "__main__":
  215. import sys
  216. def dummy(a, b):
  217. pass
  218. chunk(sys.argv[1], callback=dummy)