You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

manual.py 10KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import copy
  17. import re
  18. from api.db import ParserType
  19. from io import BytesIO
  20. from rag.nlp import rag_tokenizer, tokenize, tokenize_table, bullets_category, title_frequency, tokenize_chunks, docx_question_level
  21. from rag.utils import num_tokens_from_string
  22. from deepdoc.parser import PdfParser, PlainParser, DocxParser
  23. from docx import Document
  24. from PIL import Image
  25. class Pdf(PdfParser):
  26. def __init__(self):
  27. self.model_speciess = ParserType.MANUAL.value
  28. super().__init__()
  29. def __call__(self, filename, binary=None, from_page=0,
  30. to_page=100000, zoomin=3, callback=None):
  31. from timeit import default_timer as timer
  32. start = timer()
  33. callback(msg="OCR is running...")
  34. self.__images__(
  35. filename if not binary else binary,
  36. zoomin,
  37. from_page,
  38. to_page,
  39. callback
  40. )
  41. callback(msg="OCR finished.")
  42. # for bb in self.boxes:
  43. # for b in bb:
  44. # print(b)
  45. print("OCR:", timer() - start)
  46. self._layouts_rec(zoomin)
  47. callback(0.65, "Layout analysis finished.")
  48. print("layouts:", timer() - start)
  49. self._table_transformer_job(zoomin)
  50. callback(0.67, "Table analysis finished.")
  51. self._text_merge()
  52. tbls = self._extract_table_figure(True, zoomin, True, True)
  53. self._concat_downward()
  54. self._filter_forpages()
  55. callback(0.68, "Text merging finished")
  56. # clean mess
  57. for b in self.boxes:
  58. b["text"] = re.sub(r"([\t  ]|\u3000){2,}", " ", b["text"].strip())
  59. return [(b["text"], b.get("layout_no", ""), self.get_position(b, zoomin))
  60. for i, b in enumerate(self.boxes)], tbls
  61. class Docx(DocxParser):
  62. def __init__(self):
  63. pass
  64. def get_picture(self, document, paragraph):
  65. img = paragraph._element.xpath('.//pic:pic')
  66. if not img:
  67. return None
  68. img = img[0]
  69. embed = img.xpath('.//a:blip/@r:embed')[0]
  70. related_part = document.part.related_parts[embed]
  71. image = related_part.image
  72. image = Image.open(BytesIO(image.blob))
  73. return image
  74. def concat_img(self, img1, img2):
  75. if img1 and not img2:
  76. return img1
  77. if not img1 and img2:
  78. return img2
  79. if not img1 and not img2:
  80. return None
  81. width1, height1 = img1.size
  82. width2, height2 = img2.size
  83. new_width = max(width1, width2)
  84. new_height = height1 + height2
  85. new_image = Image.new('RGB', (new_width, new_height))
  86. new_image.paste(img1, (0, 0))
  87. new_image.paste(img2, (0, height1))
  88. return new_image
  89. def __call__(self, filename, binary=None, from_page=0, to_page=100000, callback=None):
  90. self.doc = Document(
  91. filename) if not binary else Document(BytesIO(binary))
  92. pn = 0
  93. last_answer, last_image = "", None
  94. question_stack, level_stack = [], []
  95. ti_list = []
  96. for p in self.doc.paragraphs:
  97. if pn > to_page:
  98. break
  99. question_level, p_text = 0, ''
  100. if from_page <= pn < to_page and p.text.strip():
  101. question_level, p_text = docx_question_level(p)
  102. if not question_level or question_level > 6: # not a question
  103. last_answer = f'{last_answer}\n{p_text}'
  104. current_image = self.get_picture(self.doc, p)
  105. last_image = self.concat_img(last_image, current_image)
  106. else: # is a question
  107. if last_answer or last_image:
  108. sum_question = '\n'.join(question_stack)
  109. if sum_question:
  110. ti_list.append((f'{sum_question}\n{last_answer}', last_image))
  111. last_answer, last_image = '', None
  112. i = question_level
  113. while question_stack and i <= level_stack[-1]:
  114. question_stack.pop()
  115. level_stack.pop()
  116. question_stack.append(p_text)
  117. level_stack.append(question_level)
  118. for run in p.runs:
  119. if 'lastRenderedPageBreak' in run._element.xml:
  120. pn += 1
  121. continue
  122. if 'w:br' in run._element.xml and 'type="page"' in run._element.xml:
  123. pn += 1
  124. if last_answer:
  125. sum_question = '\n'.join(question_stack)
  126. if sum_question:
  127. ti_list.append((f'{sum_question}\n{last_answer}', last_image))
  128. tbls = []
  129. for tb in self.doc.tables:
  130. html= "<table>"
  131. for r in tb.rows:
  132. html += "<tr>"
  133. i = 0
  134. while i < len(r.cells):
  135. span = 1
  136. c = r.cells[i]
  137. for j in range(i+1, len(r.cells)):
  138. if c.text == r.cells[j].text:
  139. span += 1
  140. i = j
  141. i += 1
  142. html += f"<td>{c.text}</td>" if span == 1 else f"<td colspan='{span}'>{c.text}</td>"
  143. html += "</tr>"
  144. html += "</table>"
  145. tbls.append(((None, html), ""))
  146. return ti_list, tbls
  147. def chunk(filename, binary=None, from_page=0, to_page=100000,
  148. lang="Chinese", callback=None, **kwargs):
  149. """
  150. Only pdf is supported.
  151. """
  152. pdf_parser = None
  153. doc = {
  154. "docnm_kwd": filename
  155. }
  156. doc["title_tks"] = rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", doc["docnm_kwd"]))
  157. doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])
  158. # is it English
  159. eng = lang.lower() == "english" # pdf_parser.is_english
  160. if re.search(r"\.pdf$", filename, re.IGNORECASE):
  161. pdf_parser = Pdf() if kwargs.get(
  162. "parser_config", {}).get(
  163. "layout_recognize", True) else PlainParser()
  164. sections, tbls = pdf_parser(filename if not binary else binary,
  165. from_page=from_page, to_page=to_page, callback=callback)
  166. if sections and len(sections[0]) < 3:
  167. sections = [(t, l, [[0] * 5]) for t, l in sections]
  168. # set pivot using the most frequent type of title,
  169. # then merge between 2 pivot
  170. if len(sections) > 0 and len(pdf_parser.outlines) / len(sections) > 0.1:
  171. max_lvl = max([lvl for _, lvl in pdf_parser.outlines])
  172. most_level = max(0, max_lvl - 1)
  173. levels = []
  174. for txt, _, _ in sections:
  175. for t, lvl in pdf_parser.outlines:
  176. tks = set([t[i] + t[i + 1] for i in range(len(t) - 1)])
  177. tks_ = set([txt[i] + txt[i + 1]
  178. for i in range(min(len(t), len(txt) - 1))])
  179. if len(set(tks & tks_)) / max([len(tks), len(tks_), 1]) > 0.8:
  180. levels.append(lvl)
  181. break
  182. else:
  183. levels.append(max_lvl + 1)
  184. else:
  185. bull = bullets_category([txt for txt, _, _ in sections])
  186. most_level, levels = title_frequency(
  187. bull, [(txt, l) for txt, l, poss in sections])
  188. assert len(sections) == len(levels)
  189. sec_ids = []
  190. sid = 0
  191. for i, lvl in enumerate(levels):
  192. if lvl <= most_level and i > 0 and lvl != levels[i - 1]:
  193. sid += 1
  194. sec_ids.append(sid)
  195. # print(lvl, self.boxes[i]["text"], most_level, sid)
  196. sections = [(txt, sec_ids[i], poss)
  197. for i, (txt, _, poss) in enumerate(sections)]
  198. for (img, rows), poss in tbls:
  199. if not rows: continue
  200. sections.append((rows if isinstance(rows, str) else rows[0], -1,
  201. [(p[0] + 1 - from_page, p[1], p[2], p[3], p[4]) for p in poss]))
  202. def tag(pn, left, right, top, bottom):
  203. if pn + left + right + top + bottom == 0:
  204. return ""
  205. return "@@{}\t{:.1f}\t{:.1f}\t{:.1f}\t{:.1f}##" \
  206. .format(pn, left, right, top, bottom)
  207. chunks = []
  208. last_sid = -2
  209. tk_cnt = 0
  210. for txt, sec_id, poss in sorted(sections, key=lambda x: (
  211. x[-1][0][0], x[-1][0][3], x[-1][0][1])):
  212. poss = "\t".join([tag(*pos) for pos in poss])
  213. if tk_cnt < 32 or (tk_cnt < 1024 and (sec_id == last_sid or sec_id == -1)):
  214. if chunks:
  215. chunks[-1] += "\n" + txt + poss
  216. tk_cnt += num_tokens_from_string(txt)
  217. continue
  218. chunks.append(txt + poss)
  219. tk_cnt = num_tokens_from_string(txt)
  220. if sec_id > -1:
  221. last_sid = sec_id
  222. res = tokenize_table(tbls, doc, eng)
  223. res.extend(tokenize_chunks(chunks, doc, eng, pdf_parser))
  224. return res
  225. if re.search(r"\.docx$", filename, re.IGNORECASE):
  226. docx_parser = Docx()
  227. ti_list, tbls = docx_parser(filename, binary,
  228. from_page=0, to_page=10000, callback=callback)
  229. res = tokenize_table(tbls, doc, eng)
  230. for text, image in ti_list:
  231. d = copy.deepcopy(doc)
  232. d['image'] = image
  233. tokenize(d, text, eng)
  234. res.append(d)
  235. return res
  236. else:
  237. raise NotImplementedError("file type not supported yet(pdf and docx supported)")
  238. if __name__ == "__main__":
  239. import sys
  240. def dummy(prog=None, msg=""):
  241. pass
  242. chunk(sys.argv[1], callback=dummy)