You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

naive.py 12KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. #
  2. # Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import logging
  17. from tika import parser
  18. from io import BytesIO
  19. from docx import Document
  20. from timeit import default_timer as timer
  21. import re
  22. from deepdoc.parser.pdf_parser import PlainParser
  23. from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec, concat_img, \
  24. naive_merge_docx, tokenize_chunks_docx
  25. from deepdoc.parser import PdfParser, ExcelParser, DocxParser, HtmlParser, JsonParser, MarkdownParser, TxtParser
  26. from rag.utils import num_tokens_from_string
  27. from PIL import Image
  28. from functools import reduce
  29. from markdown import markdown
  30. from docx.image.exceptions import UnrecognizedImageError, UnexpectedEndOfFileError, InvalidImageStreamError
  31. class Docx(DocxParser):
  32. def __init__(self):
  33. pass
  34. def get_picture(self, document, paragraph):
  35. img = paragraph._element.xpath('.//pic:pic')
  36. if not img:
  37. return None
  38. img = img[0]
  39. embed = img.xpath('.//a:blip/@r:embed')[0]
  40. related_part = document.part.related_parts[embed]
  41. try:
  42. image_blob = related_part.image.blob
  43. except UnrecognizedImageError:
  44. logging.info("Unrecognized image format. Skipping image.")
  45. return None
  46. except UnexpectedEndOfFileError:
  47. logging.info("EOF was unexpectedly encountered while reading an image stream. Skipping image.")
  48. return None
  49. except InvalidImageStreamError:
  50. logging.info("The recognized image stream appears to be corrupted. Skipping image.")
  51. return None
  52. try:
  53. image = Image.open(BytesIO(image_blob)).convert('RGB')
  54. return image
  55. except Exception:
  56. return None
  57. def __clean(self, line):
  58. line = re.sub(r"\u3000", " ", line).strip()
  59. return line
  60. def __call__(self, filename, binary=None, from_page=0, to_page=100000):
  61. self.doc = Document(
  62. filename) if not binary else Document(BytesIO(binary))
  63. pn = 0
  64. lines = []
  65. last_image = None
  66. for p in self.doc.paragraphs:
  67. if pn > to_page:
  68. break
  69. if from_page <= pn < to_page:
  70. if p.text.strip():
  71. if p.style and p.style.name == 'Caption':
  72. former_image = None
  73. if lines and lines[-1][1] and lines[-1][2] != 'Caption':
  74. former_image = lines[-1][1].pop()
  75. elif last_image:
  76. former_image = last_image
  77. last_image = None
  78. lines.append((self.__clean(p.text), [former_image], p.style.name))
  79. else:
  80. current_image = self.get_picture(self.doc, p)
  81. image_list = [current_image]
  82. if last_image:
  83. image_list.insert(0, last_image)
  84. last_image = None
  85. lines.append((self.__clean(p.text), image_list, p.style.name if p.style else ""))
  86. else:
  87. if current_image := self.get_picture(self.doc, p):
  88. if lines:
  89. lines[-1][1].append(current_image)
  90. else:
  91. last_image = current_image
  92. for run in p.runs:
  93. if 'lastRenderedPageBreak' in run._element.xml:
  94. pn += 1
  95. continue
  96. if 'w:br' in run._element.xml and 'type="page"' in run._element.xml:
  97. pn += 1
  98. new_line = [(line[0], reduce(concat_img, line[1]) if line[1] else None) for line in lines]
  99. tbls = []
  100. for tb in self.doc.tables:
  101. html = "<table>"
  102. for r in tb.rows:
  103. html += "<tr>"
  104. i = 0
  105. while i < len(r.cells):
  106. span = 1
  107. c = r.cells[i]
  108. for j in range(i + 1, len(r.cells)):
  109. if c.text == r.cells[j].text:
  110. span += 1
  111. i = j
  112. i += 1
  113. html += f"<td>{c.text}</td>" if span == 1 else f"<td colspan='{span}'>{c.text}</td>"
  114. html += "</tr>"
  115. html += "</table>"
  116. tbls.append(((None, html), ""))
  117. return new_line, tbls
  118. class Pdf(PdfParser):
  119. def __call__(self, filename, binary=None, from_page=0,
  120. to_page=100000, zoomin=3, callback=None):
  121. start = timer()
  122. first_start = start
  123. callback(msg="OCR started")
  124. self.__images__(
  125. filename if not binary else binary,
  126. zoomin,
  127. from_page,
  128. to_page,
  129. callback
  130. )
  131. callback(msg="OCR finished ({:.2f}s)".format(timer() - start))
  132. logging.info("OCR({}~{}): {:.2f}s".format(from_page, to_page, timer() - start))
  133. start = timer()
  134. self._layouts_rec(zoomin)
  135. callback(0.63, "Layout analysis ({:.2f}s)".format(timer() - start))
  136. start = timer()
  137. self._table_transformer_job(zoomin)
  138. callback(0.65, "Table analysis ({:.2f}s)".format(timer() - start))
  139. start = timer()
  140. self._text_merge()
  141. callback(0.67, "Text merged ({:.2f}s)".format(timer() - start))
  142. tbls = self._extract_table_figure(True, zoomin, True, True)
  143. # self._naive_vertical_merge()
  144. self._concat_downward()
  145. # self._filter_forpages()
  146. logging.info("layouts cost: {}s".format(timer() - first_start))
  147. return [(b["text"], self._line_tag(b, zoomin))
  148. for b in self.boxes], tbls
  149. class Markdown(MarkdownParser):
  150. def __call__(self, filename, binary=None):
  151. if binary:
  152. encoding = find_codec(binary)
  153. txt = binary.decode(encoding, errors="ignore")
  154. else:
  155. with open(filename, "r") as f:
  156. txt = f.read()
  157. remainder, tables = self.extract_tables_and_remainder(f'{txt}\n')
  158. sections = []
  159. tbls = []
  160. for sec in remainder.split("\n"):
  161. if num_tokens_from_string(sec) > 3 * self.chunk_token_num:
  162. sections.append((sec[:int(len(sec) / 2)], ""))
  163. sections.append((sec[int(len(sec) / 2):], ""))
  164. else:
  165. if sec.strip().find("#") == 0:
  166. sections.append((sec, ""))
  167. elif sections and sections[-1][0].strip().find("#") == 0:
  168. sec_, _ = sections.pop(-1)
  169. sections.append((sec_ + "\n" + sec, ""))
  170. else:
  171. sections.append((sec, ""))
  172. for table in tables:
  173. tbls.append(((None, markdown(table, extensions=['markdown.extensions.tables'])), ""))
  174. return sections, tbls
  175. def chunk(filename, binary=None, from_page=0, to_page=100000,
  176. lang="Chinese", callback=None, **kwargs):
  177. """
  178. Supported file formats are docx, pdf, excel, txt.
  179. This method apply the naive ways to chunk files.
  180. Successive text will be sliced into pieces using 'delimiter'.
  181. Next, these successive pieces are merge into chunks whose token number is no more than 'Max token number'.
  182. """
  183. is_english = lang.lower() == "english" # is_english(cks)
  184. parser_config = kwargs.get(
  185. "parser_config", {
  186. "chunk_token_num": 128, "delimiter": "\n!?。;!?", "layout_recognize": True})
  187. doc = {
  188. "docnm_kwd": filename,
  189. "title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename))
  190. }
  191. doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])
  192. res = []
  193. pdf_parser = None
  194. if re.search(r"\.docx$", filename, re.IGNORECASE):
  195. callback(0.1, "Start to parse.")
  196. sections, tables = Docx()(filename, binary)
  197. res = tokenize_table(tables, doc, is_english) # just for table
  198. callback(0.8, "Finish parsing.")
  199. st = timer()
  200. chunks, images = naive_merge_docx(
  201. sections, int(parser_config.get(
  202. "chunk_token_num", 128)), parser_config.get(
  203. "delimiter", "\n!?。;!?"))
  204. if kwargs.get("section_only", False):
  205. return chunks
  206. res.extend(tokenize_chunks_docx(chunks, doc, is_english, images))
  207. logging.info("naive_merge({}): {}".format(filename, timer() - st))
  208. return res
  209. elif re.search(r"\.pdf$", filename, re.IGNORECASE):
  210. pdf_parser = Pdf() if parser_config.get("layout_recognize", True) else PlainParser()
  211. sections, tables = pdf_parser(filename if not binary else binary, from_page=from_page, to_page=to_page, callback=callback)
  212. res = tokenize_table(tables, doc, is_english)
  213. elif re.search(r"\.xlsx?$", filename, re.IGNORECASE):
  214. callback(0.1, "Start to parse.")
  215. excel_parser = ExcelParser()
  216. if parser_config.get("html4excel"):
  217. sections = [(_, "") for _ in excel_parser.html(binary, 12) if _]
  218. else:
  219. sections = [(_, "") for _ in excel_parser(binary) if _]
  220. elif re.search(r"\.(txt|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt|sql)$", filename, re.IGNORECASE):
  221. callback(0.1, "Start to parse.")
  222. sections = TxtParser()(filename, binary,
  223. parser_config.get("chunk_token_num", 128),
  224. parser_config.get("delimiter", "\n!?;。;!?"))
  225. callback(0.8, "Finish parsing.")
  226. elif re.search(r"\.(md|markdown)$", filename, re.IGNORECASE):
  227. callback(0.1, "Start to parse.")
  228. sections, tables = Markdown(int(parser_config.get("chunk_token_num", 128)))(filename, binary)
  229. res = tokenize_table(tables, doc, is_english)
  230. callback(0.8, "Finish parsing.")
  231. elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
  232. callback(0.1, "Start to parse.")
  233. sections = HtmlParser()(filename, binary)
  234. sections = [(_, "") for _ in sections if _]
  235. callback(0.8, "Finish parsing.")
  236. elif re.search(r"\.json$", filename, re.IGNORECASE):
  237. callback(0.1, "Start to parse.")
  238. chunk_token_num = int(parser_config.get("chunk_token_num", 128))
  239. sections = JsonParser(chunk_token_num)(binary)
  240. sections = [(_, "") for _ in sections if _]
  241. callback(0.8, "Finish parsing.")
  242. elif re.search(r"\.doc$", filename, re.IGNORECASE):
  243. callback(0.1, "Start to parse.")
  244. binary = BytesIO(binary)
  245. doc_parsed = parser.from_buffer(binary)
  246. if doc_parsed.get('content', None) is not None:
  247. sections = doc_parsed['content'].split('\n')
  248. sections = [(_, "") for _ in sections if _]
  249. callback(0.8, "Finish parsing.")
  250. else:
  251. callback(0.8, f"tika.parser got empty content from {filename}.")
  252. logging.warning(f"tika.parser got empty content from {filename}.")
  253. return []
  254. else:
  255. raise NotImplementedError(
  256. "file type not supported yet(pdf, xlsx, doc, docx, txt supported)")
  257. st = timer()
  258. chunks = naive_merge(
  259. sections, int(parser_config.get(
  260. "chunk_token_num", 128)), parser_config.get(
  261. "delimiter", "\n!?。;!?"))
  262. if kwargs.get("section_only", False):
  263. return chunks
  264. res.extend(tokenize_chunks(chunks, doc, is_english, pdf_parser))
  265. logging.info("naive_merge({}): {}".format(filename, timer() - st))
  266. return res
  267. if __name__ == "__main__":
  268. import sys
  269. def dummy(prog=None, msg=""):
  270. pass
  271. chunk(sys.argv[1], from_page=0, to_page=10, callback=dummy)