You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

naive.py 12KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. # Licensed under the Apache License, Version 2.0 (the "License");
  2. # you may not use this file except in compliance with the License.
  3. # You may obtain a copy of the License at
  4. #
  5. # http://www.apache.org/licenses/LICENSE-2.0
  6. #
  7. # Unless required by applicable law or agreed to in writing, software
  8. # distributed under the License is distributed on an "AS IS" BASIS,
  9. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. # See the License for the specific language governing permissions and
  11. # limitations under the License.
  12. #
  13. import logging
  14. from tika import parser
  15. from io import BytesIO
  16. from docx import Document
  17. from timeit import default_timer as timer
  18. import re
  19. from deepdoc.parser.pdf_parser import PlainParser
  20. from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec, concat_img, \
  21. naive_merge_docx, tokenize_chunks_docx
  22. from deepdoc.parser import PdfParser, ExcelParser, DocxParser, HtmlParser, JsonParser, MarkdownParser, TxtParser
  23. from rag.utils import num_tokens_from_string
  24. from PIL import Image
  25. from functools import reduce
  26. from markdown import markdown
  27. from docx.image.exceptions import UnrecognizedImageError, UnexpectedEndOfFileError, InvalidImageStreamError
  28. class Docx(DocxParser):
  29. def __init__(self):
  30. pass
  31. def get_picture(self, document, paragraph):
  32. img = paragraph._element.xpath('.//pic:pic')
  33. if not img:
  34. return None
  35. img = img[0]
  36. embed = img.xpath('.//a:blip/@r:embed')[0]
  37. related_part = document.part.related_parts[embed]
  38. try:
  39. image_blob = related_part.image.blob
  40. except UnrecognizedImageError:
  41. logging.info("Unrecognized image format. Skipping image.")
  42. return None
  43. except UnexpectedEndOfFileError:
  44. logging.info("EOF was unexpectedly encountered while reading an image stream. Skipping image.")
  45. return None
  46. except InvalidImageStreamError:
  47. logging.info("The recognized image stream appears to be corrupted. Skipping image.")
  48. return None
  49. try:
  50. image = Image.open(BytesIO(image_blob)).convert('RGB')
  51. return image
  52. except Exception:
  53. return None
  54. def __clean(self, line):
  55. line = re.sub(r"\u3000", " ", line).strip()
  56. return line
  57. def __call__(self, filename, binary=None, from_page=0, to_page=100000):
  58. self.doc = Document(
  59. filename) if not binary else Document(BytesIO(binary))
  60. pn = 0
  61. lines = []
  62. last_image = None
  63. for p in self.doc.paragraphs:
  64. if pn > to_page:
  65. break
  66. if from_page <= pn < to_page:
  67. if p.text.strip():
  68. if p.style and p.style.name == 'Caption':
  69. former_image = None
  70. if lines and lines[-1][1] and lines[-1][2] != 'Caption':
  71. former_image = lines[-1][1].pop()
  72. elif last_image:
  73. former_image = last_image
  74. last_image = None
  75. lines.append((self.__clean(p.text), [former_image], p.style.name))
  76. else:
  77. current_image = self.get_picture(self.doc, p)
  78. image_list = [current_image]
  79. if last_image:
  80. image_list.insert(0, last_image)
  81. last_image = None
  82. lines.append((self.__clean(p.text), image_list, p.style.name if p.style else ""))
  83. else:
  84. if current_image := self.get_picture(self.doc, p):
  85. if lines:
  86. lines[-1][1].append(current_image)
  87. else:
  88. last_image = current_image
  89. for run in p.runs:
  90. if 'lastRenderedPageBreak' in run._element.xml:
  91. pn += 1
  92. continue
  93. if 'w:br' in run._element.xml and 'type="page"' in run._element.xml:
  94. pn += 1
  95. new_line = [(line[0], reduce(concat_img, line[1]) if line[1] else None) for line in lines]
  96. tbls = []
  97. for tb in self.doc.tables:
  98. html = "<table>"
  99. for r in tb.rows:
  100. html += "<tr>"
  101. i = 0
  102. while i < len(r.cells):
  103. span = 1
  104. c = r.cells[i]
  105. for j in range(i + 1, len(r.cells)):
  106. if c.text == r.cells[j].text:
  107. span += 1
  108. i = j
  109. i += 1
  110. html += f"<td>{c.text}</td>" if span == 1 else f"<td colspan='{span}'>{c.text}</td>"
  111. html += "</tr>"
  112. html += "</table>"
  113. tbls.append(((None, html), ""))
  114. return new_line, tbls
  115. class Pdf(PdfParser):
  116. def __call__(self, filename, binary=None, from_page=0,
  117. to_page=100000, zoomin=3, callback=None):
  118. start = timer()
  119. first_start = start
  120. callback(msg="OCR started")
  121. self.__images__(
  122. filename if not binary else binary,
  123. zoomin,
  124. from_page,
  125. to_page,
  126. callback
  127. )
  128. callback(msg="OCR finished ({:.2f}s)".format(timer() - start))
  129. logging.info("OCR({}~{}): {:.2f}s".format(from_page, to_page, timer() - start))
  130. start = timer()
  131. self._layouts_rec(zoomin)
  132. callback(0.63, "Layout analysis ({:.2f}s)".format(timer() - start))
  133. start = timer()
  134. self._table_transformer_job(zoomin)
  135. callback(0.65, "Table analysis ({:.2f}s)".format(timer() - start))
  136. start = timer()
  137. self._text_merge()
  138. callback(0.67, "Text merged ({:.2f}s)".format(timer() - start))
  139. tbls = self._extract_table_figure(True, zoomin, True, True)
  140. # self._naive_vertical_merge()
  141. self._concat_downward()
  142. # self._filter_forpages()
  143. logging.info("layouts cost: {}s".format(timer() - first_start))
  144. return [(b["text"], self._line_tag(b, zoomin))
  145. for b in self.boxes], tbls
  146. class Markdown(MarkdownParser):
  147. def __call__(self, filename, binary=None):
  148. if binary:
  149. encoding = find_codec(binary)
  150. txt = binary.decode(encoding, errors="ignore")
  151. else:
  152. with open(filename, "r") as f:
  153. txt = f.read()
  154. remainder, tables = self.extract_tables_and_remainder(f'{txt}\n')
  155. sections = []
  156. tbls = []
  157. for sec in remainder.split("\n"):
  158. if num_tokens_from_string(sec) > 10 * self.chunk_token_num:
  159. sections.append((sec[:int(len(sec) / 2)], ""))
  160. sections.append((sec[int(len(sec) / 2):], ""))
  161. else:
  162. if sections and sections[-1][0].strip().find("#") == 0:
  163. sec_, _ = sections.pop(-1)
  164. sections.append((sec_ + "\n" + sec, ""))
  165. else:
  166. sections.append((sec, ""))
  167. for table in tables:
  168. tbls.append(((None, markdown(table, extensions=['markdown.extensions.tables'])), ""))
  169. return sections, tbls
  170. def chunk(filename, binary=None, from_page=0, to_page=100000,
  171. lang="Chinese", callback=None, **kwargs):
  172. """
  173. Supported file formats are docx, pdf, excel, txt.
  174. This method apply the naive ways to chunk files.
  175. Successive text will be sliced into pieces using 'delimiter'.
  176. Next, these successive pieces are merge into chunks whose token number is no more than 'Max token number'.
  177. """
  178. eng = lang.lower() == "english" # is_english(cks)
  179. parser_config = kwargs.get(
  180. "parser_config", {
  181. "chunk_token_num": 128, "delimiter": "\n!?。;!?", "layout_recognize": True})
  182. doc = {
  183. "docnm_kwd": filename,
  184. "title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename))
  185. }
  186. doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])
  187. res = []
  188. pdf_parser = None
  189. if re.search(r"\.docx$", filename, re.IGNORECASE):
  190. callback(0.1, "Start to parse.")
  191. sections, tbls = Docx()(filename, binary)
  192. res = tokenize_table(tbls, doc, eng) # just for table
  193. callback(0.8, "Finish parsing.")
  194. st = timer()
  195. chunks, images = naive_merge_docx(
  196. sections, int(parser_config.get(
  197. "chunk_token_num", 128)), parser_config.get(
  198. "delimiter", "\n!?。;!?"))
  199. if kwargs.get("section_only", False):
  200. return chunks
  201. res.extend(tokenize_chunks_docx(chunks, doc, eng, images))
  202. logging.info("naive_merge({}): {}".format(filename, timer() - st))
  203. return res
  204. elif re.search(r"\.pdf$", filename, re.IGNORECASE):
  205. pdf_parser = Pdf(
  206. ) if parser_config.get("layout_recognize", True) else PlainParser()
  207. sections, tbls = pdf_parser(filename if not binary else binary,
  208. from_page=from_page, to_page=to_page, callback=callback)
  209. res = tokenize_table(tbls, doc, eng)
  210. elif re.search(r"\.xlsx?$", filename, re.IGNORECASE):
  211. callback(0.1, "Start to parse.")
  212. excel_parser = ExcelParser()
  213. if parser_config.get("html4excel"):
  214. sections = [(_, "") for _ in excel_parser.html(binary, 12) if _]
  215. else:
  216. sections = [(_, "") for _ in excel_parser(binary) if _]
  217. elif re.search(r"\.(txt|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt|sql)$", filename, re.IGNORECASE):
  218. callback(0.1, "Start to parse.")
  219. sections = TxtParser()(filename, binary,
  220. parser_config.get("chunk_token_num", 128),
  221. parser_config.get("delimiter", "\n!?;。;!?"))
  222. callback(0.8, "Finish parsing.")
  223. elif re.search(r"\.(md|markdown)$", filename, re.IGNORECASE):
  224. callback(0.1, "Start to parse.")
  225. sections, tbls = Markdown(int(parser_config.get("chunk_token_num", 128)))(filename, binary)
  226. res = tokenize_table(tbls, doc, eng)
  227. callback(0.8, "Finish parsing.")
  228. elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
  229. callback(0.1, "Start to parse.")
  230. sections = HtmlParser()(filename, binary)
  231. sections = [(_, "") for _ in sections if _]
  232. callback(0.8, "Finish parsing.")
  233. elif re.search(r"\.json$", filename, re.IGNORECASE):
  234. callback(0.1, "Start to parse.")
  235. sections = JsonParser(int(parser_config.get("chunk_token_num", 128)))(binary)
  236. sections = [(_, "") for _ in sections if _]
  237. callback(0.8, "Finish parsing.")
  238. elif re.search(r"\.doc$", filename, re.IGNORECASE):
  239. callback(0.1, "Start to parse.")
  240. binary = BytesIO(binary)
  241. doc_parsed = parser.from_buffer(binary)
  242. if doc_parsed.get('content', None) is not None:
  243. sections = doc_parsed['content'].split('\n')
  244. sections = [(_, "") for _ in sections if _]
  245. callback(0.8, "Finish parsing.")
  246. else:
  247. callback(0.8, f"tika.parser got empty content from {filename}.")
  248. logging.warning(f"tika.parser got empty content from {filename}.")
  249. return []
  250. else:
  251. raise NotImplementedError(
  252. "file type not supported yet(pdf, xlsx, doc, docx, txt supported)")
  253. st = timer()
  254. chunks = naive_merge(
  255. sections, int(parser_config.get(
  256. "chunk_token_num", 128)), parser_config.get(
  257. "delimiter", "\n!?。;!?"))
  258. if kwargs.get("section_only", False):
  259. return chunks
  260. res.extend(tokenize_chunks(chunks, doc, eng, pdf_parser))
  261. logging.info("naive_merge({}): {}".format(filename, timer() - st))
  262. return res
  263. if __name__ == "__main__":
  264. import sys
  265. def dummy(prog=None, msg=""):
  266. pass
  267. chunk(sys.argv[1], from_page=0, to_page=10, callback=dummy)