Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. # Licensed under the Apache License, Version 2.0 (the "License");
  2. # you may not use this file except in compliance with the License.
  3. # You may obtain a copy of the License at
  4. #
  5. # http://www.apache.org/licenses/LICENSE-2.0
  6. #
  7. # Unless required by applicable law or agreed to in writing, software
  8. # distributed under the License is distributed on an "AS IS" BASIS,
  9. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. # See the License for the specific language governing permissions and
  11. # limitations under the License.
  12. #
  13. import logging
  14. from tika import parser
  15. from io import BytesIO
  16. from docx import Document
  17. from timeit import default_timer as timer
  18. import re
  19. from deepdoc.parser.pdf_parser import PlainParser
  20. from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec, concat_img, \
  21. naive_merge_docx, tokenize_chunks_docx
  22. from deepdoc.parser import PdfParser, ExcelParser, DocxParser, HtmlParser, JsonParser, MarkdownParser, TxtParser
  23. from rag.utils import num_tokens_from_string
  24. from PIL import Image
  25. from functools import reduce
  26. from markdown import markdown
  27. from docx.image.exceptions import UnrecognizedImageError, UnexpectedEndOfFileError, InvalidImageStreamError
  28. class Docx(DocxParser):
  29. def __init__(self):
  30. pass
  31. def get_picture(self, document, paragraph):
  32. img = paragraph._element.xpath('.//pic:pic')
  33. if not img:
  34. return None
  35. img = img[0]
  36. embed = img.xpath('.//a:blip/@r:embed')[0]
  37. related_part = document.part.related_parts[embed]
  38. try:
  39. image_blob = related_part.image.blob
  40. except UnrecognizedImageError:
  41. logging.info("Unrecognized image format. Skipping image.")
  42. return None
  43. except UnexpectedEndOfFileError:
  44. logging.info("EOF was unexpectedly encountered while reading an image stream. Skipping image.")
  45. return None
  46. except InvalidImageStreamError:
  47. logging.info("The recognized image stream appears to be corrupted. Skipping image.")
  48. return None
  49. try:
  50. image = Image.open(BytesIO(image_blob)).convert('RGB')
  51. return image
  52. except Exception:
  53. return None
  54. def __clean(self, line):
  55. line = re.sub(r"\u3000", " ", line).strip()
  56. return line
  57. def __call__(self, filename, binary=None, from_page=0, to_page=100000):
  58. self.doc = Document(
  59. filename) if not binary else Document(BytesIO(binary))
  60. pn = 0
  61. lines = []
  62. last_image = None
  63. for p in self.doc.paragraphs:
  64. if pn > to_page:
  65. break
  66. if from_page <= pn < to_page:
  67. if p.text.strip():
  68. if p.style and p.style.name == 'Caption':
  69. former_image = None
  70. if lines and lines[-1][1] and lines[-1][2] != 'Caption':
  71. former_image = lines[-1][1].pop()
  72. elif last_image:
  73. former_image = last_image
  74. last_image = None
  75. lines.append((self.__clean(p.text), [former_image], p.style.name))
  76. else:
  77. current_image = self.get_picture(self.doc, p)
  78. image_list = [current_image]
  79. if last_image:
  80. image_list.insert(0, last_image)
  81. last_image = None
  82. lines.append((self.__clean(p.text), image_list, p.style.name if p.style else ""))
  83. else:
  84. if current_image := self.get_picture(self.doc, p):
  85. if lines:
  86. lines[-1][1].append(current_image)
  87. else:
  88. last_image = current_image
  89. for run in p.runs:
  90. if 'lastRenderedPageBreak' in run._element.xml:
  91. pn += 1
  92. continue
  93. if 'w:br' in run._element.xml and 'type="page"' in run._element.xml:
  94. pn += 1
  95. new_line = [(line[0], reduce(concat_img, line[1]) if line[1] else None) for line in lines]
  96. tbls = []
  97. for tb in self.doc.tables:
  98. html = "<table>"
  99. for r in tb.rows:
  100. html += "<tr>"
  101. i = 0
  102. while i < len(r.cells):
  103. span = 1
  104. c = r.cells[i]
  105. for j in range(i + 1, len(r.cells)):
  106. if c.text == r.cells[j].text:
  107. span += 1
  108. i = j
  109. i += 1
  110. html += f"<td>{c.text}</td>" if span == 1 else f"<td colspan='{span}'>{c.text}</td>"
  111. html += "</tr>"
  112. html += "</table>"
  113. tbls.append(((None, html), ""))
  114. return new_line, tbls
  115. class Pdf(PdfParser):
  116. def __call__(self, filename, binary=None, from_page=0,
  117. to_page=100000, zoomin=3, callback=None):
  118. start = timer()
  119. callback(msg="OCR is running...")
  120. self.__images__(
  121. filename if not binary else binary,
  122. zoomin,
  123. from_page,
  124. to_page,
  125. callback
  126. )
  127. callback(msg="OCR finished")
  128. logging.info("OCR({}~{}): {}".format(from_page, to_page, timer() - start))
  129. start = timer()
  130. self._layouts_rec(zoomin)
  131. callback(0.63, "Layout analysis finished.")
  132. self._table_transformer_job(zoomin)
  133. callback(0.65, "Table analysis finished.")
  134. self._text_merge()
  135. callback(0.67, "Text merging finished")
  136. tbls = self._extract_table_figure(True, zoomin, True, True)
  137. # self._naive_vertical_merge()
  138. self._concat_downward()
  139. # self._filter_forpages()
  140. logging.info("layouts cost: {}s".format(timer() - start))
  141. return [(b["text"], self._line_tag(b, zoomin))
  142. for b in self.boxes], tbls
  143. class Markdown(MarkdownParser):
  144. def __call__(self, filename, binary=None):
  145. if binary:
  146. encoding = find_codec(binary)
  147. txt = binary.decode(encoding, errors="ignore")
  148. else:
  149. with open(filename, "r") as f:
  150. txt = f.read()
  151. remainder, tables = self.extract_tables_and_remainder(f'{txt}\n')
  152. sections = []
  153. tbls = []
  154. for sec in remainder.split("\n"):
  155. if num_tokens_from_string(sec) > 10 * self.chunk_token_num:
  156. sections.append((sec[:int(len(sec) / 2)], ""))
  157. sections.append((sec[int(len(sec) / 2):], ""))
  158. else:
  159. if sections and sections[-1][0].strip().find("#") == 0:
  160. sec_, _ = sections.pop(-1)
  161. sections.append((sec_+"\n"+sec, ""))
  162. else:
  163. sections.append((sec, ""))
  164. for table in tables:
  165. tbls.append(((None, markdown(table, extensions=['markdown.extensions.tables'])), ""))
  166. return sections, tbls
  167. def chunk(filename, binary=None, from_page=0, to_page=100000,
  168. lang="Chinese", callback=None, **kwargs):
  169. """
  170. Supported file formats are docx, pdf, excel, txt.
  171. This method apply the naive ways to chunk files.
  172. Successive text will be sliced into pieces using 'delimiter'.
  173. Next, these successive pieces are merge into chunks whose token number is no more than 'Max token number'.
  174. """
  175. eng = lang.lower() == "english" # is_english(cks)
  176. parser_config = kwargs.get(
  177. "parser_config", {
  178. "chunk_token_num": 128, "delimiter": "\n!?。;!?", "layout_recognize": True})
  179. doc = {
  180. "docnm_kwd": filename,
  181. "title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename))
  182. }
  183. doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])
  184. res = []
  185. pdf_parser = None
  186. if re.search(r"\.docx$", filename, re.IGNORECASE):
  187. callback(0.1, "Start to parse.")
  188. sections, tbls = Docx()(filename, binary)
  189. res = tokenize_table(tbls, doc, eng) # just for table
  190. callback(0.8, "Finish parsing.")
  191. st = timer()
  192. chunks, images = naive_merge_docx(
  193. sections, int(parser_config.get(
  194. "chunk_token_num", 128)), parser_config.get(
  195. "delimiter", "\n!?。;!?"))
  196. if kwargs.get("section_only", False):
  197. return chunks
  198. res.extend(tokenize_chunks_docx(chunks, doc, eng, images))
  199. logging.info("naive_merge({}): {}".format(filename, timer() - st))
  200. return res
  201. elif re.search(r"\.pdf$", filename, re.IGNORECASE):
  202. pdf_parser = Pdf(
  203. ) if parser_config.get("layout_recognize", True) else PlainParser()
  204. sections, tbls = pdf_parser(filename if not binary else binary,
  205. from_page=from_page, to_page=to_page, callback=callback)
  206. res = tokenize_table(tbls, doc, eng)
  207. elif re.search(r"\.xlsx?$", filename, re.IGNORECASE):
  208. callback(0.1, "Start to parse.")
  209. excel_parser = ExcelParser()
  210. if parser_config.get("html4excel"):
  211. sections = [(_, "") for _ in excel_parser.html(binary, 12) if _]
  212. else:
  213. sections = [(_, "") for _ in excel_parser(binary) if _]
  214. elif re.search(r"\.(txt|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt|sql)$", filename, re.IGNORECASE):
  215. callback(0.1, "Start to parse.")
  216. sections = TxtParser()(filename, binary,
  217. parser_config.get("chunk_token_num", 128),
  218. parser_config.get("delimiter", "\n!?;。;!?"))
  219. callback(0.8, "Finish parsing.")
  220. elif re.search(r"\.(md|markdown)$", filename, re.IGNORECASE):
  221. callback(0.1, "Start to parse.")
  222. sections, tbls = Markdown(int(parser_config.get("chunk_token_num", 128)))(filename, binary)
  223. res = tokenize_table(tbls, doc, eng)
  224. callback(0.8, "Finish parsing.")
  225. elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
  226. callback(0.1, "Start to parse.")
  227. sections = HtmlParser()(filename, binary)
  228. sections = [(_, "") for _ in sections if _]
  229. callback(0.8, "Finish parsing.")
  230. elif re.search(r"\.json$", filename, re.IGNORECASE):
  231. callback(0.1, "Start to parse.")
  232. sections = JsonParser(int(parser_config.get("chunk_token_num", 128)))(binary)
  233. sections = [(_, "") for _ in sections if _]
  234. callback(0.8, "Finish parsing.")
  235. elif re.search(r"\.doc$", filename, re.IGNORECASE):
  236. callback(0.1, "Start to parse.")
  237. binary = BytesIO(binary)
  238. doc_parsed = parser.from_buffer(binary)
  239. sections = doc_parsed['content'].split('\n')
  240. sections = [(_, "") for _ in sections if _]
  241. callback(0.8, "Finish parsing.")
  242. else:
  243. raise NotImplementedError(
  244. "file type not supported yet(pdf, xlsx, doc, docx, txt supported)")
  245. st = timer()
  246. chunks = naive_merge(
  247. sections, int(parser_config.get(
  248. "chunk_token_num", 128)), parser_config.get(
  249. "delimiter", "\n!?。;!?"))
  250. if kwargs.get("section_only", False):
  251. return chunks
  252. res.extend(tokenize_chunks(chunks, doc, eng, pdf_parser))
  253. logging.info("naive_merge({}): {}".format(filename, timer() - st))
  254. return res
  255. if __name__ == "__main__":
  256. import sys
  257. def dummy(prog=None, msg=""):
  258. pass
  259. chunk(sys.argv[1], from_page=0, to_page=10, callback=dummy)