You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

laws.py 5.0KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. # Licensed under the Apache License, Version 2.0 (the "License");
  2. # you may not use this file except in compliance with the License.
  3. # You may obtain a copy of the License at
  4. #
  5. # http://www.apache.org/licenses/LICENSE-2.0
  6. #
  7. # Unless required by applicable law or agreed to in writing, software
  8. # distributed under the License is distributed on an "AS IS" BASIS,
  9. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. # See the License for the specific language governing permissions and
  11. # limitations under the License.
  12. #
  13. import copy
  14. import re
  15. from io import BytesIO
  16. from docx import Document
  17. from api.db import ParserType
  18. from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, hierarchical_merge, \
  19. make_colon_as_title, add_positions, tokenize_chunks
  20. from rag.nlp import huqie
  21. from deepdoc.parser import PdfParser, DocxParser, PlainParser
  22. from rag.settings import cron_logger
  23. class Docx(DocxParser):
  24. def __init__(self):
  25. self.model_speciess = ParserType.LAWS.value
  26. super().__init__()
  27. def __clean(self, line):
  28. line = re.sub(r"\u3000", " ", line).strip()
  29. return line
  30. def __call__(self, filename, binary=None, from_page=0, to_page=100000):
  31. self.doc = Document(
  32. filename) if not binary else Document(BytesIO(binary))
  33. pn = 0
  34. lines = []
  35. for p in self.doc.paragraphs:
  36. if pn > to_page:
  37. break
  38. if from_page <= pn < to_page and p.text.strip():
  39. lines.append(self.__clean(p.text))
  40. for run in p.runs:
  41. if 'lastRenderedPageBreak' in run._element.xml:
  42. pn += 1
  43. continue
  44. if 'w:br' in run._element.xml and 'type="page"' in run._element.xml:
  45. pn += 1
  46. return [l for l in lines if l]
  47. class Pdf(PdfParser):
  48. def __call__(self, filename, binary=None, from_page=0,
  49. to_page=100000, zoomin=3, callback=None):
  50. callback(msg="OCR is running...")
  51. self.__images__(
  52. filename if not binary else binary,
  53. zoomin,
  54. from_page,
  55. to_page,
  56. callback
  57. )
  58. callback(msg="OCR finished")
  59. from timeit import default_timer as timer
  60. start = timer()
  61. self._layouts_rec(zoomin)
  62. callback(0.67, "Layout analysis finished")
  63. cron_logger.info("paddle layouts:".format(
  64. (timer() - start) / (self.total_page + 0.1)))
  65. self._naive_vertical_merge()
  66. callback(0.8, "Text extraction finished")
  67. return [(b["text"], self._line_tag(b, zoomin))
  68. for b in self.boxes], None
  69. def chunk(filename, binary=None, from_page=0, to_page=100000,
  70. lang="Chinese", callback=None, **kwargs):
  71. """
  72. Supported file formats are docx, pdf, txt.
  73. """
  74. doc = {
  75. "docnm_kwd": filename,
  76. "title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
  77. }
  78. doc["title_sm_tks"] = huqie.qieqie(doc["title_tks"])
  79. pdf_parser = None
  80. sections = []
  81. if re.search(r"\.docx?$", filename, re.IGNORECASE):
  82. callback(0.1, "Start to parse.")
  83. for txt in Docx()(filename, binary):
  84. sections.append(txt)
  85. callback(0.8, "Finish parsing.")
  86. elif re.search(r"\.pdf$", filename, re.IGNORECASE):
  87. pdf_parser = Pdf() if kwargs.get(
  88. "parser_config", {}).get(
  89. "layout_recognize", True) else PlainParser()
  90. for txt, poss in pdf_parser(filename if not binary else binary,
  91. from_page=from_page, to_page=to_page, callback=callback)[0]:
  92. sections.append(txt + poss)
  93. elif re.search(r"\.txt$", filename, re.IGNORECASE):
  94. callback(0.1, "Start to parse.")
  95. txt = ""
  96. if binary:
  97. txt = binary.decode("utf-8")
  98. else:
  99. with open(filename, "r") as f:
  100. while True:
  101. l = f.readline()
  102. if not l:
  103. break
  104. txt += l
  105. sections = txt.split("\n")
  106. sections = [l for l in sections if l]
  107. callback(0.8, "Finish parsing.")
  108. else:
  109. raise NotImplementedError(
  110. "file type not supported yet(docx, pdf, txt supported)")
  111. # is it English
  112. eng = lang.lower() == "english" # is_english(sections)
  113. # Remove 'Contents' part
  114. remove_contents_table(sections, eng)
  115. make_colon_as_title(sections)
  116. bull = bullets_category(sections)
  117. chunks = hierarchical_merge(bull, sections, 3)
  118. if not chunks:
  119. callback(0.99, "No chunk parsed out.")
  120. return tokenize_chunks(["\n".join(ck)
  121. for ck in chunks], doc, eng, pdf_parser)
  122. if __name__ == "__main__":
  123. import sys
  124. def dummy(prog=None, msg=""):
  125. pass
  126. chunk(sys.argv[1], callback=dummy)