You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. import random
  2. from collections import Counter
  3. from rag.utils import num_tokens_from_string
  4. from . import huqie
  5. import re
  6. import copy
  7. BULLET_PATTERN = [[
  8. r"第[零一二三四五六七八九十百0-9]+(分?编|部分)",
  9. r"第[零一二三四五六七八九十百0-9]+章",
  10. r"第[零一二三四五六七八九十百0-9]+节",
  11. r"第[零一二三四五六七八九十百0-9]+条",
  12. r"[\((][零一二三四五六七八九十百]+[\))]",
  13. ], [
  14. r"第[0-9]+章",
  15. r"第[0-9]+节",
  16. r"[0-9]{,2}[\. 、]",
  17. r"[0-9]{,2}\.[0-9]{,2}[^a-zA-Z/%~-]",
  18. r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
  19. r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
  20. ], [
  21. r"第[零一二三四五六七八九十百0-9]+章",
  22. r"第[零一二三四五六七八九十百0-9]+节",
  23. r"[零一二三四五六七八九十百]+[ 、]",
  24. r"[\((][零一二三四五六七八九十百]+[\))]",
  25. r"[\((][0-9]{,2}[\))]",
  26. ], [
  27. r"PART (ONE|TWO|THREE|FOUR|FIVE|SIX|SEVEN|EIGHT|NINE|TEN)",
  28. r"Chapter (I+V?|VI*|XI|IX|X)",
  29. r"Section [0-9]+",
  30. r"Article [0-9]+"
  31. ]
  32. ]
  33. def random_choices(arr, k):
  34. k = min(len(arr), k)
  35. return random.choices(arr, k=k)
  36. def not_bullet(line):
  37. patt = [
  38. r"0", r"[0-9]+ +[0-9~个只-]", r"[0-9]+\.{2,}"
  39. ]
  40. return any([re.match(r, line) for r in patt])
  41. def bullets_category(sections):
  42. global BULLET_PATTERN
  43. hits = [0] * len(BULLET_PATTERN)
  44. for i, pro in enumerate(BULLET_PATTERN):
  45. for sec in sections:
  46. for p in pro:
  47. if re.match(p, sec) and not not_bullet(sec):
  48. hits[i] += 1
  49. break
  50. maxium = 0
  51. res = -1
  52. for i, h in enumerate(hits):
  53. if h <= maxium:
  54. continue
  55. res = i
  56. maxium = h
  57. return res
  58. def is_english(texts):
  59. eng = 0
  60. for t in texts:
  61. if re.match(r"[a-zA-Z]{2,}", t.strip()):
  62. eng += 1
  63. if eng / len(texts) > 0.8:
  64. return True
  65. return False
  66. def tokenize(d, t, eng):
  67. d["content_with_weight"] = t
  68. t = re.sub(r"</?(table|td|caption|tr|th)( [^<>]{0,12})?>", " ", t)
  69. d["content_ltks"] = huqie.qie(t)
  70. d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
  71. def tokenize_chunks(chunks, doc, eng, pdf_parser):
  72. res = []
  73. # wrap up as es documents
  74. for ck in chunks:
  75. if len(ck.strip()) == 0:continue
  76. print("--", ck)
  77. d = copy.deepcopy(doc)
  78. if pdf_parser:
  79. try:
  80. d["image"], poss = pdf_parser.crop(ck, need_position=True)
  81. add_positions(d, poss)
  82. ck = pdf_parser.remove_tag(ck)
  83. except NotImplementedError as e:
  84. pass
  85. tokenize(d, ck, eng)
  86. res.append(d)
  87. return res
  88. def tokenize_table(tbls, doc, eng, batch_size=10):
  89. res = []
  90. # add tables
  91. for (img, rows), poss in tbls:
  92. if not rows:
  93. continue
  94. if isinstance(rows, str):
  95. d = copy.deepcopy(doc)
  96. tokenize(d, rows, eng)
  97. d["content_with_weight"] = rows
  98. d["image"] = img
  99. add_positions(d, poss)
  100. res.append(d)
  101. continue
  102. de = "; " if eng else "; "
  103. for i in range(0, len(rows), batch_size):
  104. d = copy.deepcopy(doc)
  105. r = de.join(rows[i:i + batch_size])
  106. tokenize(d, r, eng)
  107. d["image"] = img
  108. add_positions(d, poss)
  109. res.append(d)
  110. return res
  111. def add_positions(d, poss):
  112. if not poss:
  113. return
  114. d["page_num_int"] = []
  115. d["position_int"] = []
  116. d["top_int"] = []
  117. for pn, left, right, top, bottom in poss:
  118. d["page_num_int"].append(pn + 1)
  119. d["top_int"].append(top)
  120. d["position_int"].append((pn + 1, left, right, top, bottom))
  121. def remove_contents_table(sections, eng=False):
  122. i = 0
  123. while i < len(sections):
  124. def get(i):
  125. nonlocal sections
  126. return (sections[i] if isinstance(sections[i],
  127. type("")) else sections[i][0]).strip()
  128. if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$",
  129. re.sub(r"( | |\u3000)+", "", get(i).split("@@")[0], re.IGNORECASE)):
  130. i += 1
  131. continue
  132. sections.pop(i)
  133. if i >= len(sections):
  134. break
  135. prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
  136. while not prefix:
  137. sections.pop(i)
  138. if i >= len(sections):
  139. break
  140. prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
  141. sections.pop(i)
  142. if i >= len(sections) or not prefix:
  143. break
  144. for j in range(i, min(i + 128, len(sections))):
  145. if not re.match(prefix, get(j)):
  146. continue
  147. for _ in range(i, j):
  148. sections.pop(i)
  149. break
  150. def make_colon_as_title(sections):
  151. if not sections:
  152. return []
  153. if isinstance(sections[0], type("")):
  154. return sections
  155. i = 0
  156. while i < len(sections):
  157. txt, layout = sections[i]
  158. i += 1
  159. txt = txt.split("@")[0].strip()
  160. if not txt:
  161. continue
  162. if txt[-1] not in "::":
  163. continue
  164. txt = txt[::-1]
  165. arr = re.split(r"([。?!!?;;]| .)", txt)
  166. if len(arr) < 2 or len(arr[1]) < 32:
  167. continue
  168. sections.insert(i - 1, (arr[0][::-1], "title"))
  169. i += 1
  170. def title_frequency(bull, sections):
  171. bullets_size = len(BULLET_PATTERN[bull])
  172. levels = [bullets_size+1 for _ in range(len(sections))]
  173. if not sections or bull < 0:
  174. return bullets_size+1, levels
  175. for i, (txt, layout) in enumerate(sections):
  176. for j, p in enumerate(BULLET_PATTERN[bull]):
  177. if re.match(p, txt.strip()) and not not_bullet(txt):
  178. levels[i] = j
  179. break
  180. else:
  181. if re.search(r"(title|head)", layout) and not not_title(txt.split("@")[0]):
  182. levels[i] = bullets_size
  183. most_level = bullets_size+1
  184. for l, c in sorted(Counter(levels).items(), key=lambda x:x[1]*-1):
  185. if l <= bullets_size:
  186. most_level = l
  187. break
  188. return most_level, levels
  189. def not_title(txt):
  190. if re.match(r"第[零一二三四五六七八九十百0-9]+条", txt):
  191. return False
  192. if len(txt.split(" ")) > 12 or (txt.find(" ") < 0 and len(txt) >= 32):
  193. return True
  194. return re.search(r"[,;,。;!!]", txt)
  195. def hierarchical_merge(bull, sections, depth):
  196. if not sections or bull < 0:
  197. return []
  198. if isinstance(sections[0], type("")):
  199. sections = [(s, "") for s in sections]
  200. sections = [(t, o) for t, o in sections if
  201. t and len(t.split("@")[0].strip()) > 1 and not re.match(r"[0-9]+$", t.split("@")[0].strip())]
  202. bullets_size = len(BULLET_PATTERN[bull])
  203. levels = [[] for _ in range(bullets_size + 2)]
  204. for i, (txt, layout) in enumerate(sections):
  205. for j, p in enumerate(BULLET_PATTERN[bull]):
  206. if re.match(p, txt.strip()):
  207. levels[j].append(i)
  208. break
  209. else:
  210. if re.search(r"(title|head)", layout) and not not_title(txt):
  211. levels[bullets_size].append(i)
  212. else:
  213. levels[bullets_size + 1].append(i)
  214. sections = [t for t, _ in sections]
  215. # for s in sections: print("--", s)
  216. def binary_search(arr, target):
  217. if not arr:
  218. return -1
  219. if target > arr[-1]:
  220. return len(arr) - 1
  221. if target < arr[0]:
  222. return -1
  223. s, e = 0, len(arr)
  224. while e - s > 1:
  225. i = (e + s) // 2
  226. if target > arr[i]:
  227. s = i
  228. continue
  229. elif target < arr[i]:
  230. e = i
  231. continue
  232. else:
  233. assert False
  234. return s
  235. cks = []
  236. readed = [False] * len(sections)
  237. levels = levels[::-1]
  238. for i, arr in enumerate(levels[:depth]):
  239. for j in arr:
  240. if readed[j]:
  241. continue
  242. readed[j] = True
  243. cks.append([j])
  244. if i + 1 == len(levels) - 1:
  245. continue
  246. for ii in range(i + 1, len(levels)):
  247. jj = binary_search(levels[ii], j)
  248. if jj < 0:
  249. continue
  250. if jj > cks[-1][-1]:
  251. cks[-1].pop(-1)
  252. cks[-1].append(levels[ii][jj])
  253. for ii in cks[-1]:
  254. readed[ii] = True
  255. if not cks:
  256. return cks
  257. for i in range(len(cks)):
  258. cks[i] = [sections[j] for j in cks[i][::-1]]
  259. print("--------------\n", "\n* ".join(cks[i]))
  260. res = [[]]
  261. num = [0]
  262. for ck in cks:
  263. if len(ck) == 1:
  264. n = num_tokens_from_string(re.sub(r"@@[0-9]+.*", "", ck[0]))
  265. if n + num[-1] < 218:
  266. res[-1].append(ck[0])
  267. num[-1] += n
  268. continue
  269. res.append(ck)
  270. num.append(n)
  271. continue
  272. res.append(ck)
  273. num.append(218)
  274. return res
  275. def naive_merge(sections, chunk_token_num=128, delimiter="\n。;!?"):
  276. if not sections:
  277. return []
  278. if isinstance(sections[0], type("")):
  279. sections = [(s, "") for s in sections]
  280. cks = [""]
  281. tk_nums = [0]
  282. def add_chunk(t, pos):
  283. nonlocal cks, tk_nums, delimiter
  284. tnum = num_tokens_from_string(t)
  285. if tnum < 8:
  286. pos = ""
  287. if tk_nums[-1] > chunk_token_num:
  288. if t.find(pos) < 0:
  289. t += pos
  290. cks.append(t)
  291. tk_nums.append(tnum)
  292. else:
  293. if cks[-1].find(pos) < 0:
  294. t += pos
  295. cks[-1] += t
  296. tk_nums[-1] += tnum
  297. for sec, pos in sections:
  298. add_chunk(sec, pos)
  299. continue
  300. s, e = 0, 1
  301. while e < len(sec):
  302. if sec[e] in delimiter:
  303. add_chunk(sec[s: e + 1], pos)
  304. s = e + 1
  305. e = s + 1
  306. else:
  307. e += 1
  308. if s < e:
  309. add_chunk(sec[s: e], pos)
  310. return cks