You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. import random
  2. from collections import Counter
  3. from rag.utils import num_tokens_from_string
  4. from . import huqie
  5. import re
  6. import copy
  7. BULLET_PATTERN = [[
  8. r"第[零一二三四五六七八九十百0-9]+(分?编|部分)",
  9. r"第[零一二三四五六七八九十百0-9]+章",
  10. r"第[零一二三四五六七八九十百0-9]+节",
  11. r"第[零一二三四五六七八九十百0-9]+条",
  12. r"[\((][零一二三四五六七八九十百]+[\))]",
  13. ], [
  14. r"第[0-9]+章",
  15. r"第[0-9]+节",
  16. r"[0-9]{,2}[\. 、]",
  17. r"[0-9]{,2}\.[0-9]{,2}[^a-zA-Z/%~-]",
  18. r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
  19. r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
  20. ], [
  21. r"第[零一二三四五六七八九十百0-9]+章",
  22. r"第[零一二三四五六七八九十百0-9]+节",
  23. r"[零一二三四五六七八九十百]+[ 、]",
  24. r"[\((][零一二三四五六七八九十百]+[\))]",
  25. r"[\((][0-9]{,2}[\))]",
  26. ], [
  27. r"PART (ONE|TWO|THREE|FOUR|FIVE|SIX|SEVEN|EIGHT|NINE|TEN)",
  28. r"Chapter (I+V?|VI*|XI|IX|X)",
  29. r"Section [0-9]+",
  30. r"Article [0-9]+"
  31. ]
  32. ]
  33. def random_choices(arr, k):
  34. k = min(len(arr), k)
  35. return random.choices(arr, k=k)
  36. def not_bullet(line):
  37. patt = [
  38. r"0", r"[0-9]+ +[0-9~个只-]", r"[0-9]+\.{2,}"
  39. ]
  40. return any([re.match(r, line) for r in patt])
  41. def bullets_category(sections):
  42. global BULLET_PATTERN
  43. hits = [0] * len(BULLET_PATTERN)
  44. for i, pro in enumerate(BULLET_PATTERN):
  45. for sec in sections:
  46. for p in pro:
  47. if re.match(p, sec) and not not_bullet(sec):
  48. hits[i] += 1
  49. break
  50. maxium = 0
  51. res = -1
  52. for i, h in enumerate(hits):
  53. if h <= maxium:
  54. continue
  55. res = i
  56. maxium = h
  57. return res
  58. def is_english(texts):
  59. eng = 0
  60. if not texts: return False
  61. for t in texts:
  62. if re.match(r"[a-zA-Z]{2,}", t.strip()):
  63. eng += 1
  64. if eng / len(texts) > 0.8:
  65. return True
  66. return False
  67. def tokenize(d, t, eng):
  68. d["content_with_weight"] = t
  69. t = re.sub(r"</?(table|td|caption|tr|th)( [^<>]{0,12})?>", " ", t)
  70. d["content_ltks"] = huqie.qie(t)
  71. d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
  72. def tokenize_chunks(chunks, doc, eng, pdf_parser):
  73. res = []
  74. # wrap up as es documents
  75. for ck in chunks:
  76. if len(ck.strip()) == 0:continue
  77. print("--", ck)
  78. d = copy.deepcopy(doc)
  79. if pdf_parser:
  80. try:
  81. d["image"], poss = pdf_parser.crop(ck, need_position=True)
  82. add_positions(d, poss)
  83. ck = pdf_parser.remove_tag(ck)
  84. except NotImplementedError as e:
  85. pass
  86. tokenize(d, ck, eng)
  87. res.append(d)
  88. return res
  89. def tokenize_table(tbls, doc, eng, batch_size=10):
  90. res = []
  91. # add tables
  92. for (img, rows), poss in tbls:
  93. if not rows:
  94. continue
  95. if isinstance(rows, str):
  96. d = copy.deepcopy(doc)
  97. tokenize(d, rows, eng)
  98. d["content_with_weight"] = rows
  99. if img: d["image"] = img
  100. if poss: add_positions(d, poss)
  101. res.append(d)
  102. continue
  103. de = "; " if eng else "; "
  104. for i in range(0, len(rows), batch_size):
  105. d = copy.deepcopy(doc)
  106. r = de.join(rows[i:i + batch_size])
  107. tokenize(d, r, eng)
  108. d["image"] = img
  109. add_positions(d, poss)
  110. res.append(d)
  111. return res
  112. def add_positions(d, poss):
  113. if not poss:
  114. return
  115. d["page_num_int"] = []
  116. d["position_int"] = []
  117. d["top_int"] = []
  118. for pn, left, right, top, bottom in poss:
  119. d["page_num_int"].append(int(pn + 1))
  120. d["top_int"].append(int(top))
  121. d["position_int"].append((int(pn + 1), int(left), int(right), int(top), int(bottom)))
  122. def remove_contents_table(sections, eng=False):
  123. i = 0
  124. while i < len(sections):
  125. def get(i):
  126. nonlocal sections
  127. return (sections[i] if isinstance(sections[i],
  128. type("")) else sections[i][0]).strip()
  129. if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$",
  130. re.sub(r"( | |\u3000)+", "", get(i).split("@@")[0], re.IGNORECASE)):
  131. i += 1
  132. continue
  133. sections.pop(i)
  134. if i >= len(sections):
  135. break
  136. prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
  137. while not prefix:
  138. sections.pop(i)
  139. if i >= len(sections):
  140. break
  141. prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
  142. sections.pop(i)
  143. if i >= len(sections) or not prefix:
  144. break
  145. for j in range(i, min(i + 128, len(sections))):
  146. if not re.match(prefix, get(j)):
  147. continue
  148. for _ in range(i, j):
  149. sections.pop(i)
  150. break
  151. def make_colon_as_title(sections):
  152. if not sections:
  153. return []
  154. if isinstance(sections[0], type("")):
  155. return sections
  156. i = 0
  157. while i < len(sections):
  158. txt, layout = sections[i]
  159. i += 1
  160. txt = txt.split("@")[0].strip()
  161. if not txt:
  162. continue
  163. if txt[-1] not in "::":
  164. continue
  165. txt = txt[::-1]
  166. arr = re.split(r"([。?!!?;;]| .)", txt)
  167. if len(arr) < 2 or len(arr[1]) < 32:
  168. continue
  169. sections.insert(i - 1, (arr[0][::-1], "title"))
  170. i += 1
  171. def title_frequency(bull, sections):
  172. bullets_size = len(BULLET_PATTERN[bull])
  173. levels = [bullets_size+1 for _ in range(len(sections))]
  174. if not sections or bull < 0:
  175. return bullets_size+1, levels
  176. for i, (txt, layout) in enumerate(sections):
  177. for j, p in enumerate(BULLET_PATTERN[bull]):
  178. if re.match(p, txt.strip()) and not not_bullet(txt):
  179. levels[i] = j
  180. break
  181. else:
  182. if re.search(r"(title|head)", layout) and not not_title(txt.split("@")[0]):
  183. levels[i] = bullets_size
  184. most_level = bullets_size+1
  185. for l, c in sorted(Counter(levels).items(), key=lambda x:x[1]*-1):
  186. if l <= bullets_size:
  187. most_level = l
  188. break
  189. return most_level, levels
  190. def not_title(txt):
  191. if re.match(r"第[零一二三四五六七八九十百0-9]+条", txt):
  192. return False
  193. if len(txt.split(" ")) > 12 or (txt.find(" ") < 0 and len(txt) >= 32):
  194. return True
  195. return re.search(r"[,;,。;!!]", txt)
  196. def hierarchical_merge(bull, sections, depth):
  197. if not sections or bull < 0:
  198. return []
  199. if isinstance(sections[0], type("")):
  200. sections = [(s, "") for s in sections]
  201. sections = [(t, o) for t, o in sections if
  202. t and len(t.split("@")[0].strip()) > 1 and not re.match(r"[0-9]+$", t.split("@")[0].strip())]
  203. bullets_size = len(BULLET_PATTERN[bull])
  204. levels = [[] for _ in range(bullets_size + 2)]
  205. for i, (txt, layout) in enumerate(sections):
  206. for j, p in enumerate(BULLET_PATTERN[bull]):
  207. if re.match(p, txt.strip()):
  208. levels[j].append(i)
  209. break
  210. else:
  211. if re.search(r"(title|head)", layout) and not not_title(txt):
  212. levels[bullets_size].append(i)
  213. else:
  214. levels[bullets_size + 1].append(i)
  215. sections = [t for t, _ in sections]
  216. # for s in sections: print("--", s)
  217. def binary_search(arr, target):
  218. if not arr:
  219. return -1
  220. if target > arr[-1]:
  221. return len(arr) - 1
  222. if target < arr[0]:
  223. return -1
  224. s, e = 0, len(arr)
  225. while e - s > 1:
  226. i = (e + s) // 2
  227. if target > arr[i]:
  228. s = i
  229. continue
  230. elif target < arr[i]:
  231. e = i
  232. continue
  233. else:
  234. assert False
  235. return s
  236. cks = []
  237. readed = [False] * len(sections)
  238. levels = levels[::-1]
  239. for i, arr in enumerate(levels[:depth]):
  240. for j in arr:
  241. if readed[j]:
  242. continue
  243. readed[j] = True
  244. cks.append([j])
  245. if i + 1 == len(levels) - 1:
  246. continue
  247. for ii in range(i + 1, len(levels)):
  248. jj = binary_search(levels[ii], j)
  249. if jj < 0:
  250. continue
  251. if jj > cks[-1][-1]:
  252. cks[-1].pop(-1)
  253. cks[-1].append(levels[ii][jj])
  254. for ii in cks[-1]:
  255. readed[ii] = True
  256. if not cks:
  257. return cks
  258. for i in range(len(cks)):
  259. cks[i] = [sections[j] for j in cks[i][::-1]]
  260. print("--------------\n", "\n* ".join(cks[i]))
  261. res = [[]]
  262. num = [0]
  263. for ck in cks:
  264. if len(ck) == 1:
  265. n = num_tokens_from_string(re.sub(r"@@[0-9]+.*", "", ck[0]))
  266. if n + num[-1] < 218:
  267. res[-1].append(ck[0])
  268. num[-1] += n
  269. continue
  270. res.append(ck)
  271. num.append(n)
  272. continue
  273. res.append(ck)
  274. num.append(218)
  275. return res
  276. def naive_merge(sections, chunk_token_num=128, delimiter="\n。;!?"):
  277. if not sections:
  278. return []
  279. if isinstance(sections[0], type("")):
  280. sections = [(s, "") for s in sections]
  281. cks = [""]
  282. tk_nums = [0]
  283. def add_chunk(t, pos):
  284. nonlocal cks, tk_nums, delimiter
  285. tnum = num_tokens_from_string(t)
  286. if tnum < 8:
  287. pos = ""
  288. if tk_nums[-1] > chunk_token_num:
  289. if t.find(pos) < 0:
  290. t += pos
  291. cks.append(t)
  292. tk_nums.append(tnum)
  293. else:
  294. if cks[-1].find(pos) < 0:
  295. t += pos
  296. cks[-1] += t
  297. tk_nums[-1] += tnum
  298. for sec, pos in sections:
  299. add_chunk(sec, pos)
  300. continue
  301. s, e = 0, 1
  302. while e < len(sec):
  303. if sec[e] in delimiter:
  304. add_chunk(sec[s: e + 1], pos)
  305. s = e + 1
  306. e = s + 1
  307. else:
  308. e += 1
  309. if s < e:
  310. add_chunk(sec[s: e], pos)
  311. return cks