You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

laws.py 7.0KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. import copy
  2. import re
  3. from io import BytesIO
  4. from docx import Document
  5. import numpy as np
  6. from rag.app import callback__, bullets_category, BULLET_PATTERN
  7. from rag.nlp import huqie
  8. from rag.parser.pdf_parser import HuParser
  9. class Docx(object):
  10. def __init__(self):
  11. pass
  12. def __clean(self, line):
  13. line = re.sub(r"\u3000", " ", line).strip()
  14. return line
  15. def __call__(self, filename, binary=None):
  16. self.doc = Document(
  17. filename) if not binary else Document(BytesIO(binary))
  18. lines = [self.__clean(p.text) for p in self.doc.paragraphs]
  19. return [l for l in lines if l]
  20. class Pdf(HuParser):
  21. def __call__(self, filename, binary=None, from_page=0,
  22. to_page=100000, zoomin=3, callback=None):
  23. self.__images__(
  24. filename if not binary else binary,
  25. zoomin,
  26. from_page,
  27. to_page)
  28. callback__((min(to_page, self.total_page) - from_page) / self.total_page / 2,
  29. "Page {}~{}: OCR finished".format(from_page, min(to_page, self.total_page)), callback)
  30. from timeit import default_timer as timer
  31. start = timer()
  32. self._layouts_paddle(zoomin)
  33. callback__((min(to_page, self.total_page) - from_page) / self.total_page / 2,
  34. "Page {}~{}: Layout analysis finished".format(from_page, min(to_page, self.total_page)), callback)
  35. print("paddle layouts:", timer()-start)
  36. bxs = self.sort_Y_firstly(self.boxes, np.median(self.mean_height) / 3)
  37. # is it English
  38. eng = 0
  39. for b in bxs:
  40. if re.match(r"[a-zA-Z]", b["text"].strip()):
  41. eng += 1
  42. if eng / len(bxs) > 0.8:
  43. eng = True
  44. else:
  45. eng = False
  46. # Merge vertically
  47. i = 0
  48. while i + 1 < len(bxs):
  49. b = bxs[i]
  50. b_ = bxs[i + 1]
  51. if b["page_number"] < b_["page_number"] and re.match(r"[0-9 •一—-]+$", b["text"]):
  52. bxs.pop(i)
  53. continue
  54. concatting_feats = [
  55. b["text"].strip()[-1] in ",;:'\",、‘“;:",
  56. len(b["text"].strip())>1 and b["text"].strip()[-2] in ",;:'\",‘“、;:",
  57. b["text"].strip()[0] in "。;?!?”)),,、:",
  58. ]
  59. # features for not concating
  60. feats = [
  61. b.get("layoutno",0) != b.get("layoutno",0),
  62. b["text"].strip()[-1] in "。?!?",
  63. eng and b["text"].strip()[-1] in ".!?",
  64. b["page_number"] == b_["page_number"] and b_["top"] - \
  65. b["bottom"] > self.mean_height[b["page_number"] - 1] * 1.5,
  66. b["page_number"] < b_["page_number"] and abs(
  67. b["x0"] - b_["x0"]) > self.mean_width[b["page_number"] - 1] * 4
  68. ]
  69. if any(feats) and not any(concatting_feats):
  70. i += 1
  71. continue
  72. # merge up and down
  73. b["bottom"] = b_["bottom"]
  74. b["text"] += b_["text"]
  75. b["x0"] = min(b["x0"], b_["x0"])
  76. b["x1"] = max(b["x1"], b_["x1"])
  77. bxs.pop(i + 1)
  78. callback__((min(to_page, self.total_page) - from_page) / self.total_page / 2,
  79. "Page {}~{}: Text extraction finished".format(from_page, min(to_page, self.total_page)), callback)
  80. return [b["text"] + self._line_tag(b, zoomin) for b in bxs]
  81. def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
  82. doc = {
  83. "docnm_kwd": filename,
  84. "title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
  85. }
  86. doc["title_sm_tks"] = huqie.qieqie(doc["title_tks"])
  87. pdf_parser = None
  88. sections = []
  89. if re.search(r"\.docx?$", filename, re.IGNORECASE):
  90. for txt in Docx()(filename, binary):
  91. sections.append(txt)
  92. if re.search(r"\.pdf$", filename, re.IGNORECASE):
  93. pdf_parser = Pdf()
  94. for txt in pdf_parser(filename if not binary else binary,
  95. from_page=from_page, to_page=to_page, callback=callback):
  96. sections.append(txt)
  97. if re.search(r"\.txt$", filename, re.IGNORECASE):
  98. txt = ""
  99. if binary:txt = binary.decode("utf-8")
  100. else:
  101. with open(filename, "r") as f:
  102. while True:
  103. l = f.readline()
  104. if not l:break
  105. txt += l
  106. sections = txt.split("\n")
  107. sections = [l for l in sections if l]
  108. # is it English
  109. eng = 0
  110. for sec in sections:
  111. if re.match(r"[a-zA-Z]", sec.strip()):
  112. eng += 1
  113. if eng / len(sections) > 0.8:
  114. eng = True
  115. else:
  116. eng = False
  117. # Remove 'Contents' part
  118. i = 0
  119. while i < len(sections):
  120. if not re.match(r"(Contents|目录|目次)$", re.sub(r"( | |\u3000)+", "", sections[i].split("@@")[0])):
  121. i += 1
  122. continue
  123. sections.pop(i)
  124. if i >= len(sections): break
  125. prefix = sections[i].strip()[:3] if not eng else " ".join(sections[i].strip().split(" ")[:2])
  126. while not prefix:
  127. sections.pop(i)
  128. if i >= len(sections): break
  129. prefix = sections[i].strip()[:3] if not eng else " ".join(sections[i].strip().split(" ")[:2])
  130. sections.pop(i)
  131. if i >= len(sections) or not prefix: break
  132. for j in range(i, min(i+128, len(sections))):
  133. if not re.match(prefix, sections[j]):
  134. continue
  135. for k in range(i, j):sections.pop(i)
  136. break
  137. bull = bullets_category(sections)
  138. projs = [len(BULLET_PATTERN[bull])] * len(sections)
  139. for i, sec in enumerate(sections):
  140. for j,p in enumerate(BULLET_PATTERN[bull]):
  141. if re.match(p, sec.strip()):
  142. projs[i] = j
  143. break
  144. readed = [0] * len(sections)
  145. cks = []
  146. for pr in range(len(BULLET_PATTERN[bull])-1, 1, -1):
  147. for i in range(len(sections)):
  148. if readed[i] or projs[i] < pr:
  149. continue
  150. # find father and grand-father and grand...father
  151. p = projs[i]
  152. readed[i] = 1
  153. ck = [sections[i]]
  154. for j in range(i-1, -1, -1):
  155. if projs[j] >= p:continue
  156. ck.append(sections[j])
  157. readed[j] = 1
  158. p = projs[j]
  159. if p == 0: break
  160. cks.append(ck[::-1])
  161. res = []
  162. # wrap up to es documents
  163. for ck in cks:
  164. print("\n-".join(ck))
  165. ck = "\n".join(ck)
  166. d = copy.deepcopy(doc)
  167. if pdf_parser:
  168. d["image"] = pdf_parser.crop(ck)
  169. ck = pdf_parser.remove_tag(ck)
  170. d["content_ltks"] = huqie.qie(ck)
  171. d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
  172. res.append(d)
  173. return res
  174. if __name__ == "__main__":
  175. import sys
  176. chunk(sys.argv[1])