Nelze vybrat více než 25 témat Téma musí začínat písmenem nebo číslem, může obsahovat pomlčky („-“) a může být dlouhé až 35 znaků.

book.py 5.9KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. # Licensed under the Apache License, Version 2.0 (the "License");
  2. # you may not use this file except in compliance with the License.
  3. # You may obtain a copy of the License at
  4. #
  5. # http://www.apache.org/licenses/LICENSE-2.0
  6. #
  7. # Unless required by applicable law or agreed to in writing, software
  8. # distributed under the License is distributed on an "AS IS" BASIS,
  9. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. # See the License for the specific language governing permissions and
  11. # limitations under the License.
  12. #
  13. import logging
  14. from tika import parser
  15. import re
  16. from io import BytesIO
  17. from deepdoc.parser.utils import get_text
  18. from rag.nlp import bullets_category, is_english,remove_contents_table, \
  19. hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table, \
  20. tokenize_chunks
  21. from rag.nlp import rag_tokenizer
  22. from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser
  23. class Pdf(PdfParser):
  24. def __call__(self, filename, binary=None, from_page=0,
  25. to_page=100000, zoomin=3, callback=None):
  26. from timeit import default_timer as timer
  27. start = timer()
  28. callback(msg="OCR started")
  29. self.__images__(
  30. filename if not binary else binary,
  31. zoomin,
  32. from_page,
  33. to_page,
  34. callback)
  35. callback(msg="OCR finished ({:.2f}s)".format(timer() - start))
  36. start = timer()
  37. self._layouts_rec(zoomin)
  38. callback(0.67, "Layout analysis ({:.2f}s)".format(timer() - start))
  39. logging.debug("layouts: {}".format(timer() - start))
  40. start = timer()
  41. self._table_transformer_job(zoomin)
  42. callback(0.68, "Table analysis ({:.2f}s)".format(timer() - start))
  43. start = timer()
  44. self._text_merge()
  45. tbls = self._extract_table_figure(True, zoomin, True, True)
  46. self._naive_vertical_merge()
  47. self._filter_forpages()
  48. self._merge_with_same_bullet()
  49. callback(0.8, "Text extraction ({:.2f}s)".format(timer() - start))
  50. return [(b["text"] + self._line_tag(b, zoomin), b.get("layoutno", ""))
  51. for b in self.boxes], tbls
  52. def chunk(filename, binary=None, from_page=0, to_page=100000,
  53. lang="Chinese", callback=None, **kwargs):
  54. """
  55. Supported file formats are docx, pdf, txt.
  56. Since a book is long and not all the parts are useful, if it's a PDF,
  57. please setup the page ranges for every book in order eliminate negative effects and save elapsed computing time.
  58. """
  59. doc = {
  60. "docnm_kwd": filename,
  61. "title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename))
  62. }
  63. doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])
  64. pdf_parser = None
  65. sections, tbls = [], []
  66. if re.search(r"\.docx$", filename, re.IGNORECASE):
  67. callback(0.1, "Start to parse.")
  68. doc_parser = DocxParser()
  69. # TODO: table of contents need to be removed
  70. sections, tbls = doc_parser(
  71. binary if binary else filename, from_page=from_page, to_page=to_page)
  72. remove_contents_table(sections, eng=is_english(
  73. random_choices([t for t, _ in sections], k=200)))
  74. tbls = [((None, lns), None) for lns in tbls]
  75. callback(0.8, "Finish parsing.")
  76. elif re.search(r"\.pdf$", filename, re.IGNORECASE):
  77. pdf_parser = Pdf() if kwargs.get(
  78. "parser_config", {}).get(
  79. "layout_recognize", True) else PlainParser()
  80. sections, tbls = pdf_parser(filename if not binary else binary,
  81. from_page=from_page, to_page=to_page, callback=callback)
  82. elif re.search(r"\.txt$", filename, re.IGNORECASE):
  83. callback(0.1, "Start to parse.")
  84. txt = get_text(filename, binary)
  85. sections = txt.split("\n")
  86. sections = [(l, "") for l in sections if l]
  87. remove_contents_table(sections, eng=is_english(
  88. random_choices([t for t, _ in sections], k=200)))
  89. callback(0.8, "Finish parsing.")
  90. elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
  91. callback(0.1, "Start to parse.")
  92. sections = HtmlParser()(filename, binary)
  93. sections = [(l, "") for l in sections if l]
  94. remove_contents_table(sections, eng=is_english(
  95. random_choices([t for t, _ in sections], k=200)))
  96. callback(0.8, "Finish parsing.")
  97. elif re.search(r"\.doc$", filename, re.IGNORECASE):
  98. callback(0.1, "Start to parse.")
  99. binary = BytesIO(binary)
  100. doc_parsed = parser.from_buffer(binary)
  101. sections = doc_parsed['content'].split('\n')
  102. sections = [(l, "") for l in sections if l]
  103. remove_contents_table(sections, eng=is_english(
  104. random_choices([t for t, _ in sections], k=200)))
  105. callback(0.8, "Finish parsing.")
  106. else:
  107. raise NotImplementedError(
  108. "file type not supported yet(doc, docx, pdf, txt supported)")
  109. make_colon_as_title(sections)
  110. bull = bullets_category(
  111. [t for t in random_choices([t for t, _ in sections], k=100)])
  112. if bull >= 0:
  113. chunks = ["\n".join(ck)
  114. for ck in hierarchical_merge(bull, sections, 5)]
  115. else:
  116. sections = [s.split("@") for s, _ in sections]
  117. sections = [(pr[0], "@" + pr[1]) if len(pr) == 2 else (pr[0], '') for pr in sections ]
  118. chunks = naive_merge(
  119. sections, kwargs.get(
  120. "chunk_token_num", 256), kwargs.get(
  121. "delimer", "\n。;!?"))
  122. # is it English
  123. # is_english(random_choices([t for t, _ in sections], k=218))
  124. eng = lang.lower() == "english"
  125. res = tokenize_table(tbls, doc, eng)
  126. res.extend(tokenize_chunks(chunks, doc, eng, pdf_parser))
  127. return res
  128. if __name__ == "__main__":
  129. import sys
  130. def dummy(prog=None, msg=""):
  131. pass
  132. chunk(sys.argv[1], from_page=1, to_page=10, callback=dummy)