您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. #
  2. # Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import logging
  17. from tika import parser
  18. import re
  19. from io import BytesIO
  20. from docx import Document
  21. from api.db import ParserType
  22. from deepdoc.parser.utils import get_text
  23. from rag.nlp import bullets_category, remove_contents_table, hierarchical_merge, \
  24. make_colon_as_title, tokenize_chunks, docx_question_level
  25. from rag.nlp import rag_tokenizer
  26. from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser
  27. class Docx(DocxParser):
  28. def __init__(self):
  29. pass
  30. def __clean(self, line):
  31. line = re.sub(r"\u3000", " ", line).strip()
  32. return line
  33. def old_call(self, filename, binary=None, from_page=0, to_page=100000):
  34. self.doc = Document(
  35. filename) if not binary else Document(BytesIO(binary))
  36. pn = 0
  37. lines = []
  38. for p in self.doc.paragraphs:
  39. if pn > to_page:
  40. break
  41. if from_page <= pn < to_page and p.text.strip():
  42. lines.append(self.__clean(p.text))
  43. for run in p.runs:
  44. if 'lastRenderedPageBreak' in run._element.xml:
  45. pn += 1
  46. continue
  47. if 'w:br' in run._element.xml and 'type="page"' in run._element.xml:
  48. pn += 1
  49. return [line for line in lines if line]
  50. def __call__(self, filename, binary=None, from_page=0, to_page=100000):
  51. self.doc = Document(
  52. filename) if not binary else Document(BytesIO(binary))
  53. pn = 0
  54. lines = []
  55. bull = bullets_category([p.text for p in self.doc.paragraphs])
  56. for p in self.doc.paragraphs:
  57. if pn > to_page:
  58. break
  59. question_level, p_text = docx_question_level(p, bull)
  60. if not p_text.strip("\n"):
  61. continue
  62. lines.append((question_level, p_text))
  63. for run in p.runs:
  64. if 'lastRenderedPageBreak' in run._element.xml:
  65. pn += 1
  66. continue
  67. if 'w:br' in run._element.xml and 'type="page"' in run._element.xml:
  68. pn += 1
  69. visit = [False for _ in range(len(lines))]
  70. sections = []
  71. for s in range(len(lines)):
  72. e = s + 1
  73. while e < len(lines):
  74. if lines[e][0] <= lines[s][0]:
  75. break
  76. e += 1
  77. if e - s == 1 and visit[s]:
  78. continue
  79. sec = []
  80. next_level = lines[s][0] + 1
  81. while not sec and next_level < 22:
  82. for i in range(s+1, e):
  83. if lines[i][0] != next_level:
  84. continue
  85. sec.append(lines[i][1])
  86. visit[i] = True
  87. next_level += 1
  88. sec.insert(0, lines[s][1])
  89. sections.append("\n".join(sec))
  90. return [s for s in sections if s]
  91. def __str__(self) -> str:
  92. return f'''
  93. question:{self.question},
  94. answer:{self.answer},
  95. level:{self.level},
  96. childs:{self.childs}
  97. '''
  98. class Pdf(PdfParser):
  99. def __init__(self):
  100. self.model_speciess = ParserType.LAWS.value
  101. super().__init__()
  102. def __call__(self, filename, binary=None, from_page=0,
  103. to_page=100000, zoomin=3, callback=None):
  104. from timeit import default_timer as timer
  105. start = timer()
  106. callback(msg="OCR started")
  107. self.__images__(
  108. filename if not binary else binary,
  109. zoomin,
  110. from_page,
  111. to_page,
  112. callback
  113. )
  114. callback(msg="OCR finished ({:.2f}s)".format(timer() - start))
  115. start = timer()
  116. self._layouts_rec(zoomin)
  117. callback(0.67, "Layout analysis ({:.2f}s)".format(timer() - start))
  118. logging.debug("layouts:".format(
  119. ))
  120. self._naive_vertical_merge()
  121. callback(0.8, "Text extraction ({:.2f}s)".format(timer() - start))
  122. return [(b["text"], self._line_tag(b, zoomin))
  123. for b in self.boxes], None
  124. def chunk(filename, binary=None, from_page=0, to_page=100000,
  125. lang="Chinese", callback=None, **kwargs):
  126. """
  127. Supported file formats are docx, pdf, txt.
  128. """
  129. parser_config = kwargs.get(
  130. "parser_config", {
  131. "chunk_token_num": 512, "delimiter": "\n!?。;!?", "layout_recognize": "DeepDOC"})
  132. doc = {
  133. "docnm_kwd": filename,
  134. "title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename))
  135. }
  136. doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])
  137. pdf_parser = None
  138. sections = []
  139. # is it English
  140. eng = lang.lower() == "english" # is_english(sections)
  141. if re.search(r"\.docx$", filename, re.IGNORECASE):
  142. callback(0.1, "Start to parse.")
  143. chunks = Docx()(filename, binary)
  144. callback(0.7, "Finish parsing.")
  145. return tokenize_chunks(chunks, doc, eng, None)
  146. elif re.search(r"\.pdf$", filename, re.IGNORECASE):
  147. pdf_parser = Pdf()
  148. if parser_config.get("layout_recognize", "DeepDOC") == "Plain Text":
  149. pdf_parser = PlainParser()
  150. for txt, poss in pdf_parser(filename if not binary else binary,
  151. from_page=from_page, to_page=to_page, callback=callback)[0]:
  152. sections.append(txt + poss)
  153. elif re.search(r"\.txt$", filename, re.IGNORECASE):
  154. callback(0.1, "Start to parse.")
  155. txt = get_text(filename, binary)
  156. sections = txt.split("\n")
  157. sections = [s for s in sections if s]
  158. callback(0.8, "Finish parsing.")
  159. elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
  160. callback(0.1, "Start to parse.")
  161. sections = HtmlParser()(filename, binary)
  162. sections = [s for s in sections if s]
  163. callback(0.8, "Finish parsing.")
  164. elif re.search(r"\.doc$", filename, re.IGNORECASE):
  165. callback(0.1, "Start to parse.")
  166. binary = BytesIO(binary)
  167. doc_parsed = parser.from_buffer(binary)
  168. sections = doc_parsed['content'].split('\n')
  169. sections = [s for s in sections if s]
  170. callback(0.8, "Finish parsing.")
  171. else:
  172. raise NotImplementedError(
  173. "file type not supported yet(doc, docx, pdf, txt supported)")
  174. # Remove 'Contents' part
  175. remove_contents_table(sections, eng)
  176. make_colon_as_title(sections)
  177. bull = bullets_category(sections)
  178. chunks = hierarchical_merge(bull, sections, 5)
  179. if not chunks:
  180. callback(0.99, "No chunk parsed out.")
  181. return tokenize_chunks(["\n".join(ck)
  182. for ck in chunks], doc, eng, pdf_parser)
  183. if __name__ == "__main__":
  184. import sys
  185. def dummy(prog=None, msg=""):
  186. pass
  187. chunk(sys.argv[1], callback=dummy)