Nelze vybrat více než 25 témat Téma musí začínat písmenem nebo číslem, může obsahovat pomlčky („-“) a může být dlouhé až 35 znaků.

naive.py 9.9KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. # Licensed under the Apache License, Version 2.0 (the "License");
  2. # you may not use this file except in compliance with the License.
  3. # You may obtain a copy of the License at
  4. #
  5. # http://www.apache.org/licenses/LICENSE-2.0
  6. #
  7. # Unless required by applicable law or agreed to in writing, software
  8. # distributed under the License is distributed on an "AS IS" BASIS,
  9. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. # See the License for the specific language governing permissions and
  11. # limitations under the License.
  12. #
  13. from tika import parser
  14. from io import BytesIO
  15. from docx import Document
  16. from timeit import default_timer as timer
  17. import re
  18. from deepdoc.parser.pdf_parser import PlainParser
  19. from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec, concat_img, naive_merge_docx, tokenize_chunks_docx
  20. from deepdoc.parser import PdfParser, ExcelParser, DocxParser, HtmlParser, JsonParser
  21. from rag.settings import cron_logger
  22. from rag.utils import num_tokens_from_string
  23. from PIL import Image
  24. from functools import reduce
  25. class Docx(DocxParser):
  26. def __init__(self):
  27. pass
  28. def get_picture(self, document, paragraph):
  29. img = paragraph._element.xpath('.//pic:pic')
  30. if not img:
  31. return None
  32. img = img[0]
  33. embed = img.xpath('.//a:blip/@r:embed')[0]
  34. related_part = document.part.related_parts[embed]
  35. image = related_part.image
  36. image = Image.open(BytesIO(image.blob)).convert('RGB')
  37. return image
  38. def __clean(self, line):
  39. line = re.sub(r"\u3000", " ", line).strip()
  40. return line
  41. def __call__(self, filename, binary=None, from_page=0, to_page=100000):
  42. self.doc = Document(
  43. filename) if not binary else Document(BytesIO(binary))
  44. pn = 0
  45. lines = []
  46. last_image = None
  47. for p in self.doc.paragraphs:
  48. if pn > to_page:
  49. break
  50. if from_page <= pn < to_page:
  51. current_image = None
  52. if p.text.strip():
  53. if p.style.name == 'Caption':
  54. former_image = None
  55. if lines and lines[-1][1] and lines[-1][2] != 'Caption':
  56. former_image = lines[-1][1].pop()
  57. elif last_image:
  58. former_image = last_image
  59. last_image = None
  60. lines.append((self.__clean(p.text), [former_image], p.style.name))
  61. else:
  62. current_image = self.get_picture(self.doc, p)
  63. image_list = [current_image]
  64. if last_image:
  65. image_list.insert(0, last_image)
  66. last_image = None
  67. lines.append((self.__clean(p.text), image_list, p.style.name))
  68. else:
  69. if current_image := self.get_picture(self.doc, p):
  70. if lines:
  71. lines[-1][1].append(current_image)
  72. else:
  73. last_image = current_image
  74. for run in p.runs:
  75. if 'lastRenderedPageBreak' in run._element.xml:
  76. pn += 1
  77. continue
  78. if 'w:br' in run._element.xml and 'type="page"' in run._element.xml:
  79. pn += 1
  80. new_line = [(line[0], reduce(concat_img, line[1])) for line in lines]
  81. tbls = []
  82. for tb in self.doc.tables:
  83. html= "<table>"
  84. for r in tb.rows:
  85. html += "<tr>"
  86. i = 0
  87. while i < len(r.cells):
  88. span = 1
  89. c = r.cells[i]
  90. for j in range(i+1, len(r.cells)):
  91. if c.text == r.cells[j].text:
  92. span += 1
  93. i = j
  94. i += 1
  95. html += f"<td>{c.text}</td>" if span == 1 else f"<td colspan='{span}'>{c.text}</td>"
  96. html += "</tr>"
  97. html += "</table>"
  98. tbls.append(((None, html), ""))
  99. return new_line, tbls
  100. class Pdf(PdfParser):
  101. def __call__(self, filename, binary=None, from_page=0,
  102. to_page=100000, zoomin=3, callback=None):
  103. start = timer()
  104. callback(msg="OCR is running...")
  105. self.__images__(
  106. filename if not binary else binary,
  107. zoomin,
  108. from_page,
  109. to_page,
  110. callback
  111. )
  112. callback(msg="OCR finished")
  113. cron_logger.info("OCR({}~{}): {}".format(from_page, to_page, timer() - start))
  114. start = timer()
  115. self._layouts_rec(zoomin)
  116. callback(0.63, "Layout analysis finished.")
  117. self._table_transformer_job(zoomin)
  118. callback(0.65, "Table analysis finished.")
  119. self._text_merge()
  120. callback(0.67, "Text merging finished")
  121. tbls = self._extract_table_figure(True, zoomin, True, True)
  122. #self._naive_vertical_merge()
  123. self._concat_downward()
  124. #self._filter_forpages()
  125. cron_logger.info("layouts: {}".format(timer() - start))
  126. return [(b["text"], self._line_tag(b, zoomin))
  127. for b in self.boxes], tbls
  128. def chunk(filename, binary=None, from_page=0, to_page=100000,
  129. lang="Chinese", callback=None, **kwargs):
  130. """
  131. Supported file formats are docx, pdf, excel, txt.
  132. This method apply the naive ways to chunk files.
  133. Successive text will be sliced into pieces using 'delimiter'.
  134. Next, these successive pieces are merge into chunks whose token number is no more than 'Max token number'.
  135. """
  136. eng = lang.lower() == "english" # is_english(cks)
  137. parser_config = kwargs.get(
  138. "parser_config", {
  139. "chunk_token_num": 128, "delimiter": "\n!?。;!?", "layout_recognize": True})
  140. doc = {
  141. "docnm_kwd": filename,
  142. "title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename))
  143. }
  144. doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])
  145. res = []
  146. pdf_parser = None
  147. sections = []
  148. if re.search(r"\.docx$", filename, re.IGNORECASE):
  149. callback(0.1, "Start to parse.")
  150. sections, tbls = Docx()(filename, binary)
  151. res = tokenize_table(tbls, doc, eng) # just for table
  152. callback(0.8, "Finish parsing.")
  153. st = timer()
  154. chunks, images = naive_merge_docx(
  155. sections, int(parser_config.get(
  156. "chunk_token_num", 128)), parser_config.get(
  157. "delimiter", "\n!?。;!?"))
  158. res.extend(tokenize_chunks_docx(chunks, doc, eng, images))
  159. cron_logger.info("naive_merge({}): {}".format(filename, timer() - st))
  160. return res
  161. elif re.search(r"\.pdf$", filename, re.IGNORECASE):
  162. pdf_parser = Pdf(
  163. ) if parser_config.get("layout_recognize", True) else PlainParser()
  164. sections, tbls = pdf_parser(filename if not binary else binary,
  165. from_page=from_page, to_page=to_page, callback=callback)
  166. res = tokenize_table(tbls, doc, eng)
  167. elif re.search(r"\.xlsx?$", filename, re.IGNORECASE):
  168. callback(0.1, "Start to parse.")
  169. excel_parser = ExcelParser()
  170. sections = [(l, "") for l in excel_parser.html(binary) if l]
  171. elif re.search(r"\.(txt|md|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt)$", filename, re.IGNORECASE):
  172. callback(0.1, "Start to parse.")
  173. txt = ""
  174. if binary:
  175. encoding = find_codec(binary)
  176. txt = binary.decode(encoding, errors="ignore")
  177. else:
  178. with open(filename, "r") as f:
  179. while True:
  180. l = f.readline()
  181. if not l:
  182. break
  183. txt += l
  184. sections = []
  185. for sec in txt.split("\n"):
  186. if num_tokens_from_string(sec) > 10 * int(parser_config.get("chunk_token_num", 128)):
  187. sections.append((sec[:int(len(sec)/2)], ""))
  188. sections.append((sec[int(len(sec)/2):], ""))
  189. else:
  190. sections.append((sec, ""))
  191. callback(0.8, "Finish parsing.")
  192. elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
  193. callback(0.1, "Start to parse.")
  194. sections = HtmlParser()(filename, binary)
  195. sections = [(l, "") for l in sections if l]
  196. callback(0.8, "Finish parsing.")
  197. elif re.search(r"\.json$", filename, re.IGNORECASE):
  198. callback(0.1, "Start to parse.")
  199. sections = JsonParser(int(parser_config.get("chunk_token_num", 128)))(binary)
  200. sections = [(l, "") for l in sections if l]
  201. callback(0.8, "Finish parsing.")
  202. elif re.search(r"\.doc$", filename, re.IGNORECASE):
  203. callback(0.1, "Start to parse.")
  204. binary = BytesIO(binary)
  205. doc_parsed = parser.from_buffer(binary)
  206. sections = doc_parsed['content'].split('\n')
  207. sections = [(l, "") for l in sections if l]
  208. callback(0.8, "Finish parsing.")
  209. else:
  210. raise NotImplementedError(
  211. "file type not supported yet(pdf, xlsx, doc, docx, txt supported)")
  212. st = timer()
  213. chunks = naive_merge(
  214. sections, int(parser_config.get(
  215. "chunk_token_num", 128)), parser_config.get(
  216. "delimiter", "\n!?。;!?"))
  217. res.extend(tokenize_chunks(chunks, doc, eng, pdf_parser))
  218. cron_logger.info("naive_merge({}): {}".format(filename, timer() - st))
  219. return res
  220. if __name__ == "__main__":
  221. import sys
  222. def dummy(prog=None, msg=""):
  223. pass
  224. chunk(sys.argv[1], from_page=0, to_page=10, callback=dummy)