Ви не можете вибрати більше 25 тем Теми мають розпочинатися з літери або цифри, можуть містити дефіси (-) і не повинні перевищувати 35 символів.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. # Licensed under the Apache License, Version 2.0 (the "License");
  2. # you may not use this file except in compliance with the License.
  3. # You may obtain a copy of the License at
  4. #
  5. # http://www.apache.org/licenses/LICENSE-2.0
  6. #
  7. # Unless required by applicable law or agreed to in writing, software
  8. # distributed under the License is distributed on an "AS IS" BASIS,
  9. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. # See the License for the specific language governing permissions and
  11. # limitations under the License.
  12. #
  13. from tika import parser
  14. from io import BytesIO
  15. from docx import Document
  16. from timeit import default_timer as timer
  17. import re
  18. from deepdoc.parser.pdf_parser import PlainParser
  19. from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec
  20. from deepdoc.parser import PdfParser, ExcelParser, DocxParser, HtmlParser, JsonParser
  21. from rag.settings import cron_logger
  22. from rag.utils import num_tokens_from_string
  23. class Docx(DocxParser):
  24. def __init__(self):
  25. pass
  26. def __clean(self, line):
  27. line = re.sub(r"\u3000", " ", line).strip()
  28. return line
  29. def __call__(self, filename, binary=None, from_page=0, to_page=100000):
  30. self.doc = Document(
  31. filename) if not binary else Document(BytesIO(binary))
  32. pn = 0
  33. lines = []
  34. for p in self.doc.paragraphs:
  35. if pn > to_page:
  36. break
  37. if from_page <= pn < to_page and p.text.strip():
  38. lines.append(self.__clean(p.text))
  39. for run in p.runs:
  40. if 'lastRenderedPageBreak' in run._element.xml:
  41. pn += 1
  42. continue
  43. if 'w:br' in run._element.xml and 'type="page"' in run._element.xml:
  44. pn += 1
  45. tbls = []
  46. for tb in self.doc.tables:
  47. html= "<table>"
  48. for r in tb.rows:
  49. html += "<tr>"
  50. i = 0
  51. while i < len(r.cells):
  52. span = 1
  53. c = r.cells[i]
  54. for j in range(i+1, len(r.cells)):
  55. if c.text == r.cells[j].text:
  56. span += 1
  57. i = j
  58. i += 1
  59. html += f"<td>{c.text}</td>" if span == 1 else f"<td colspan='{span}'>{c.text}</td>"
  60. html += "</tr>"
  61. html += "</table>"
  62. tbls.append(((None, html), ""))
  63. return [(l, "") for l in lines if l], tbls
  64. class Pdf(PdfParser):
  65. def __call__(self, filename, binary=None, from_page=0,
  66. to_page=100000, zoomin=3, callback=None):
  67. start = timer()
  68. callback(msg="OCR is running...")
  69. self.__images__(
  70. filename if not binary else binary,
  71. zoomin,
  72. from_page,
  73. to_page,
  74. callback
  75. )
  76. callback(msg="OCR finished")
  77. cron_logger.info("OCR({}~{}): {}".format(from_page, to_page, timer() - start))
  78. start = timer()
  79. self._layouts_rec(zoomin)
  80. callback(0.63, "Layout analysis finished.")
  81. self._table_transformer_job(zoomin)
  82. callback(0.65, "Table analysis finished.")
  83. self._text_merge()
  84. callback(0.67, "Text merging finished")
  85. tbls = self._extract_table_figure(True, zoomin, True, True)
  86. #self._naive_vertical_merge()
  87. self._concat_downward()
  88. #self._filter_forpages()
  89. cron_logger.info("layouts: {}".format(timer() - start))
  90. return [(b["text"], self._line_tag(b, zoomin))
  91. for b in self.boxes], tbls
  92. def chunk(filename, binary=None, from_page=0, to_page=100000,
  93. lang="Chinese", callback=None, **kwargs):
  94. """
  95. Supported file formats are docx, pdf, excel, txt.
  96. This method apply the naive ways to chunk files.
  97. Successive text will be sliced into pieces using 'delimiter'.
  98. Next, these successive pieces are merge into chunks whose token number is no more than 'Max token number'.
  99. """
  100. eng = lang.lower() == "english" # is_english(cks)
  101. parser_config = kwargs.get(
  102. "parser_config", {
  103. "chunk_token_num": 128, "delimiter": "\n!?。;!?", "layout_recognize": True})
  104. doc = {
  105. "docnm_kwd": filename,
  106. "title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename))
  107. }
  108. doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])
  109. res = []
  110. pdf_parser = None
  111. sections = []
  112. if re.search(r"\.docx$", filename, re.IGNORECASE):
  113. callback(0.1, "Start to parse.")
  114. sections, tbls = Docx()(filename, binary)
  115. res = tokenize_table(tbls, doc, eng)
  116. callback(0.8, "Finish parsing.")
  117. elif re.search(r"\.pdf$", filename, re.IGNORECASE):
  118. pdf_parser = Pdf(
  119. ) if parser_config.get("layout_recognize", True) else PlainParser()
  120. sections, tbls = pdf_parser(filename if not binary else binary,
  121. from_page=from_page, to_page=to_page, callback=callback)
  122. res = tokenize_table(tbls, doc, eng)
  123. elif re.search(r"\.xlsx?$", filename, re.IGNORECASE):
  124. callback(0.1, "Start to parse.")
  125. excel_parser = ExcelParser()
  126. sections = [(l, "") for l in excel_parser.html(binary) if l]
  127. elif re.search(r"\.(txt|md|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt)$", filename, re.IGNORECASE):
  128. callback(0.1, "Start to parse.")
  129. txt = ""
  130. if binary:
  131. encoding = find_codec(binary)
  132. txt = binary.decode(encoding, errors="ignore")
  133. else:
  134. with open(filename, "r") as f:
  135. while True:
  136. l = f.readline()
  137. if not l:
  138. break
  139. txt += l
  140. sections = []
  141. for sec in txt.split("\n"):
  142. if num_tokens_from_string(sec) > 10 * int(parser_config.get("chunk_token_num", 128)):
  143. sections.append((sec[:int(len(sec)/2)], ""))
  144. sections.append((sec[int(len(sec)/2):], ""))
  145. else:
  146. sections.append((sec, ""))
  147. callback(0.8, "Finish parsing.")
  148. elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
  149. callback(0.1, "Start to parse.")
  150. sections = HtmlParser()(filename, binary)
  151. sections = [(l, "") for l in sections if l]
  152. callback(0.8, "Finish parsing.")
  153. elif re.search(r"\.json$", filename, re.IGNORECASE):
  154. callback(0.1, "Start to parse.")
  155. sections = JsonParser(int(parser_config.get("chunk_token_num", 128)))(binary)
  156. sections = [(l, "") for l in sections if l]
  157. callback(0.8, "Finish parsing.")
  158. elif re.search(r"\.doc$", filename, re.IGNORECASE):
  159. callback(0.1, "Start to parse.")
  160. binary = BytesIO(binary)
  161. doc_parsed = parser.from_buffer(binary)
  162. sections = doc_parsed['content'].split('\n')
  163. sections = [(l, "") for l in sections if l]
  164. callback(0.8, "Finish parsing.")
  165. else:
  166. raise NotImplementedError(
  167. "file type not supported yet(pdf, xlsx, doc, docx, txt supported)")
  168. st = timer()
  169. chunks = naive_merge(
  170. sections, int(parser_config.get(
  171. "chunk_token_num", 128)), parser_config.get(
  172. "delimiter", "\n!?。;!?"))
  173. res.extend(tokenize_chunks(chunks, doc, eng, pdf_parser))
  174. cron_logger.info("naive_merge({}): {}".format(filename, timer() - st))
  175. return res
  176. if __name__ == "__main__":
  177. import sys
  178. def dummy(prog=None, msg=""):
  179. pass
  180. chunk(sys.argv[1], from_page=0, to_page=10, callback=dummy)