You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. # Licensed under the Apache License, Version 2.0 (the "License");
  2. # you may not use this file except in compliance with the License.
  3. # You may obtain a copy of the License at
  4. #
  5. # http://www.apache.org/licenses/LICENSE-2.0
  6. #
  7. # Unless required by applicable law or agreed to in writing, software
  8. # distributed under the License is distributed on an "AS IS" BASIS,
  9. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. # See the License for the specific language governing permissions and
  11. # limitations under the License.
  12. #
  13. from io import BytesIO
  14. from docx import Document
  15. import re
  16. from deepdoc.parser.pdf_parser import PlainParser
  17. from rag.nlp import huqie, naive_merge, tokenize_table, tokenize_chunks
  18. from deepdoc.parser import PdfParser, ExcelParser, DocxParser
  19. from rag.settings import cron_logger
  20. class Docx(DocxParser):
  21. def __init__(self):
  22. pass
  23. def __clean(self, line):
  24. line = re.sub(r"\u3000", " ", line).strip()
  25. return line
  26. def __call__(self, filename, binary=None, from_page=0, to_page=100000):
  27. self.doc = Document(
  28. filename) if not binary else Document(BytesIO(binary))
  29. pn = 0
  30. lines = []
  31. for p in self.doc.paragraphs:
  32. if pn > to_page:
  33. break
  34. if from_page <= pn < to_page and p.text.strip():
  35. lines.append(self.__clean(p.text))
  36. for run in p.runs:
  37. if 'lastRenderedPageBreak' in run._element.xml:
  38. pn += 1
  39. continue
  40. if 'w:br' in run._element.xml and 'type="page"' in run._element.xml:
  41. pn += 1
  42. tbls = []
  43. for tb in self.doc.tables:
  44. html= "<table>"
  45. for r in tb.rows:
  46. html += "<tr>"
  47. i = 0
  48. while i < len(r.cells):
  49. span = 1
  50. c = r.cells[i]
  51. for j in range(i+1, len(r.cells)):
  52. if c.text == r.cells[j].text:
  53. span += 1
  54. i = j
  55. i += 1
  56. html += f"<td>{c.text}</td>" if span == 1 else f"<td colspan='{span}'>{c.text}</td>"
  57. html += "</tr>"
  58. html += "</table>"
  59. tbls.append(((None, html), ""))
  60. return [(l, "") for l in lines if l], tbls
  61. class Pdf(PdfParser):
  62. def __call__(self, filename, binary=None, from_page=0,
  63. to_page=100000, zoomin=3, callback=None):
  64. callback(msg="OCR is running...")
  65. self.__images__(
  66. filename if not binary else binary,
  67. zoomin,
  68. from_page,
  69. to_page,
  70. callback
  71. )
  72. callback(msg="OCR finished")
  73. from timeit import default_timer as timer
  74. start = timer()
  75. self._layouts_rec(zoomin)
  76. callback(0.63, "Layout analysis finished.")
  77. print("paddle layouts:", timer() - start)
  78. self._table_transformer_job(zoomin)
  79. callback(0.65, "Table analysis finished.")
  80. self._text_merge()
  81. callback(0.67, "Text merging finished")
  82. tbls = self._extract_table_figure(True, zoomin, True, True)
  83. #self._naive_vertical_merge()
  84. self._concat_downward()
  85. #self._filter_forpages()
  86. cron_logger.info("paddle layouts:".format(
  87. (timer() - start) / (self.total_page + 0.1)))
  88. return [(b["text"], self._line_tag(b, zoomin))
  89. for b in self.boxes], tbls
  90. def chunk(filename, binary=None, from_page=0, to_page=100000,
  91. lang="Chinese", callback=None, **kwargs):
  92. """
  93. Supported file formats are docx, pdf, excel, txt.
  94. This method apply the naive ways to chunk files.
  95. Successive text will be sliced into pieces using 'delimiter'.
  96. Next, these successive pieces are merge into chunks whose token number is no more than 'Max token number'.
  97. """
  98. eng = lang.lower() == "english" # is_english(cks)
  99. parser_config = kwargs.get(
  100. "parser_config", {
  101. "chunk_token_num": 128, "delimiter": "\n!?。;!?", "layout_recognize": True})
  102. doc = {
  103. "docnm_kwd": filename,
  104. "title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
  105. }
  106. doc["title_sm_tks"] = huqie.qieqie(doc["title_tks"])
  107. res = []
  108. pdf_parser = None
  109. sections = []
  110. if re.search(r"\.docx?$", filename, re.IGNORECASE):
  111. callback(0.1, "Start to parse.")
  112. sections, tbls = Docx()(filename, binary)
  113. res = tokenize_table(tbls, doc, eng)
  114. callback(0.8, "Finish parsing.")
  115. elif re.search(r"\.pdf$", filename, re.IGNORECASE):
  116. pdf_parser = Pdf(
  117. ) if parser_config.get("layout_recognize", True) else PlainParser()
  118. sections, tbls = pdf_parser(filename if not binary else binary,
  119. from_page=from_page, to_page=to_page, callback=callback)
  120. res = tokenize_table(tbls, doc, eng)
  121. elif re.search(r"\.xlsx?$", filename, re.IGNORECASE):
  122. callback(0.1, "Start to parse.")
  123. excel_parser = ExcelParser()
  124. sections = [(excel_parser.html(binary), "")]
  125. elif re.search(r"\.txt$", filename, re.IGNORECASE):
  126. callback(0.1, "Start to parse.")
  127. txt = ""
  128. if binary:
  129. try:
  130. txt = binary.decode("utf-8")
  131. except Exception as e:
  132. txt = binary.decode("gb2312")
  133. else:
  134. with open(filename, "r") as f:
  135. while True:
  136. l = f.readline()
  137. if not l:
  138. break
  139. txt += l
  140. sections = txt.split("\n")
  141. sections = [(l, "") for l in sections if l]
  142. callback(0.8, "Finish parsing.")
  143. else:
  144. raise NotImplementedError(
  145. "file type not supported yet(docx, pdf, txt supported)")
  146. chunks = naive_merge(
  147. sections, parser_config.get(
  148. "chunk_token_num", 128), parser_config.get(
  149. "delimiter", "\n!?。;!?"))
  150. res.extend(tokenize_chunks(chunks, doc, eng, pdf_parser))
  151. return res
  152. if __name__ == "__main__":
  153. import sys
  154. def dummy(prog=None, msg=""):
  155. pass
  156. chunk(sys.argv[1], from_page=0, to_page=10, callback=dummy)