You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

__init__.py 10.0KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. import random
  2. from collections import Counter
  3. from rag.utils import num_tokens_from_string
  4. from . import huqie
  5. from nltk import word_tokenize
  6. import re
  7. import copy
  8. from nltk.stem import PorterStemmer
  9. stemmer = PorterStemmer()
  10. BULLET_PATTERN = [[
  11. r"第[零一二三四五六七八九十百0-9]+(分?编|部分)",
  12. r"第[零一二三四五六七八九十百0-9]+章",
  13. r"第[零一二三四五六七八九十百0-9]+节",
  14. r"第[零一二三四五六七八九十百0-9]+条",
  15. r"[\((][零一二三四五六七八九十百]+[\))]",
  16. ], [
  17. r"第[0-9]+章",
  18. r"第[0-9]+节",
  19. r"[0-9]{,3}[\. 、]",
  20. r"[0-9]{,2}\.[0-9]{,2}",
  21. r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
  22. r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
  23. ], [
  24. r"第[零一二三四五六七八九十百0-9]+章",
  25. r"第[零一二三四五六七八九十百0-9]+节",
  26. r"[零一二三四五六七八九十百]+[ 、]",
  27. r"[\((][零一二三四五六七八九十百]+[\))]",
  28. r"[\((][0-9]{,2}[\))]",
  29. ], [
  30. r"PART (ONE|TWO|THREE|FOUR|FIVE|SIX|SEVEN|EIGHT|NINE|TEN)",
  31. r"Chapter (I+V?|VI*|XI|IX|X)",
  32. r"Section [0-9]+",
  33. r"Article [0-9]+"
  34. ]
  35. ]
  36. def random_choices(arr, k):
  37. k = min(len(arr), k)
  38. return random.choices(arr, k=k)
  39. def bullets_category(sections):
  40. global BULLET_PATTERN
  41. hits = [0] * len(BULLET_PATTERN)
  42. for i, pro in enumerate(BULLET_PATTERN):
  43. for sec in sections:
  44. for p in pro:
  45. if re.match(p, sec):
  46. hits[i] += 1
  47. break
  48. maxium = 0
  49. res = -1
  50. for i, h in enumerate(hits):
  51. if h <= maxium:
  52. continue
  53. res = i
  54. maxium = h
  55. return res
  56. def is_english(texts):
  57. eng = 0
  58. for t in texts:
  59. if re.match(r"[a-zA-Z]{2,}", t.strip()):
  60. eng += 1
  61. if eng / len(texts) > 0.8:
  62. return True
  63. return False
  64. def tokenize(d, t, eng):
  65. d["content_with_weight"] = t
  66. t = re.sub(r"</?(table|td|caption|tr|th)( [^<>]{0,12})?>", " ", t)
  67. if eng:
  68. t = re.sub(r"([a-z])-([a-z])", r"\1\2", t)
  69. d["content_ltks"] = " ".join([stemmer.stem(w)
  70. for w in word_tokenize(t)])
  71. else:
  72. d["content_ltks"] = huqie.qie(t)
  73. d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
  74. def tokenize_table(tbls, doc, eng, batch_size=10):
  75. res = []
  76. # add tables
  77. for (img, rows), poss in tbls:
  78. if not rows:
  79. continue
  80. if isinstance(rows, str):
  81. d = copy.deepcopy(doc)
  82. r = re.sub(r"<[^<>]{,12}>", "", rows)
  83. tokenize(d, r, eng)
  84. d["content_with_weight"] = rows
  85. d["image"] = img
  86. add_positions(d, poss)
  87. res.append(d)
  88. continue
  89. de = "; " if eng else "; "
  90. for i in range(0, len(rows), batch_size):
  91. d = copy.deepcopy(doc)
  92. r = de.join(rows[i:i + batch_size])
  93. tokenize(d, r, eng)
  94. d["image"] = img
  95. add_positions(d, poss)
  96. res.append(d)
  97. return res
  98. def add_positions(d, poss):
  99. if not poss:
  100. return
  101. d["page_num_int"] = []
  102. d["position_int"] = []
  103. d["top_int"] = []
  104. for pn, left, right, top, bottom in poss:
  105. d["page_num_int"].append(pn + 1)
  106. d["top_int"].append(top)
  107. d["position_int"].append((pn + 1, left, right, top, bottom))
  108. def remove_contents_table(sections, eng=False):
  109. i = 0
  110. while i < len(sections):
  111. def get(i):
  112. nonlocal sections
  113. return (sections[i] if isinstance(sections[i],
  114. type("")) else sections[i][0]).strip()
  115. if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$",
  116. re.sub(r"( | |\u3000)+", "", get(i).split("@@")[0], re.IGNORECASE)):
  117. i += 1
  118. continue
  119. sections.pop(i)
  120. if i >= len(sections):
  121. break
  122. prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
  123. while not prefix:
  124. sections.pop(i)
  125. if i >= len(sections):
  126. break
  127. prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
  128. sections.pop(i)
  129. if i >= len(sections) or not prefix:
  130. break
  131. for j in range(i, min(i + 128, len(sections))):
  132. if not re.match(prefix, get(j)):
  133. continue
  134. for _ in range(i, j):
  135. sections.pop(i)
  136. break
  137. def make_colon_as_title(sections):
  138. if not sections:
  139. return []
  140. if isinstance(sections[0], type("")):
  141. return sections
  142. i = 0
  143. while i < len(sections):
  144. txt, layout = sections[i]
  145. i += 1
  146. txt = txt.split("@")[0].strip()
  147. if not txt:
  148. continue
  149. if txt[-1] not in "::":
  150. continue
  151. txt = txt[::-1]
  152. arr = re.split(r"([。?!!?;;]| .)", txt)
  153. if len(arr) < 2 or len(arr[1]) < 32:
  154. continue
  155. sections.insert(i - 1, (arr[0][::-1], "title"))
  156. i += 1
  157. def title_frequency(bull, sections):
  158. bullets_size = len(BULLET_PATTERN[bull])
  159. levels = [bullets_size+1 for _ in range(len(sections))]
  160. if not sections or bull < 0:
  161. return bullets_size+1, levels
  162. for i, (txt, layout) in enumerate(sections):
  163. for j, p in enumerate(BULLET_PATTERN[bull]):
  164. if re.match(p, txt.strip()):
  165. levels[i] = j
  166. break
  167. else:
  168. if re.search(r"(title|head)", layout) and not not_title(txt.split("@")[0]):
  169. levels[i] = bullets_size
  170. most_level = bullets_size+1
  171. for l, c in sorted(Counter(levels).items(), key=lambda x:x[1]*-1):
  172. if l <= bullets_size:
  173. most_level = l
  174. break
  175. return most_level, levels
  176. def not_title(txt):
  177. if re.match(r"第[零一二三四五六七八九十百0-9]+条", txt):
  178. return False
  179. if len(txt.split(" ")) > 12 or (txt.find(" ") < 0 and len(txt) >= 32):
  180. return True
  181. return re.search(r"[,;,。;!!]", txt)
  182. def hierarchical_merge(bull, sections, depth):
  183. if not sections or bull < 0:
  184. return []
  185. if isinstance(sections[0], type("")):
  186. sections = [(s, "") for s in sections]
  187. sections = [(t, o) for t, o in sections if
  188. t and len(t.split("@")[0].strip()) > 1 and not re.match(r"[0-9]+$", t.split("@")[0].strip())]
  189. bullets_size = len(BULLET_PATTERN[bull])
  190. levels = [[] for _ in range(bullets_size + 2)]
  191. for i, (txt, layout) in enumerate(sections):
  192. for j, p in enumerate(BULLET_PATTERN[bull]):
  193. if re.match(p, txt.strip()):
  194. levels[j].append(i)
  195. break
  196. else:
  197. if re.search(r"(title|head)", layout) and not not_title(txt):
  198. levels[bullets_size].append(i)
  199. else:
  200. levels[bullets_size + 1].append(i)
  201. sections = [t for t, _ in sections]
  202. # for s in sections: print("--", s)
  203. def binary_search(arr, target):
  204. if not arr:
  205. return -1
  206. if target > arr[-1]:
  207. return len(arr) - 1
  208. if target < arr[0]:
  209. return -1
  210. s, e = 0, len(arr)
  211. while e - s > 1:
  212. i = (e + s) // 2
  213. if target > arr[i]:
  214. s = i
  215. continue
  216. elif target < arr[i]:
  217. e = i
  218. continue
  219. else:
  220. assert False
  221. return s
  222. cks = []
  223. readed = [False] * len(sections)
  224. levels = levels[::-1]
  225. for i, arr in enumerate(levels[:depth]):
  226. for j in arr:
  227. if readed[j]:
  228. continue
  229. readed[j] = True
  230. cks.append([j])
  231. if i + 1 == len(levels) - 1:
  232. continue
  233. for ii in range(i + 1, len(levels)):
  234. jj = binary_search(levels[ii], j)
  235. if jj < 0:
  236. continue
  237. if jj > cks[-1][-1]:
  238. cks[-1].pop(-1)
  239. cks[-1].append(levels[ii][jj])
  240. for ii in cks[-1]:
  241. readed[ii] = True
  242. if not cks:
  243. return cks
  244. for i in range(len(cks)):
  245. cks[i] = [sections[j] for j in cks[i][::-1]]
  246. print("--------------\n", "\n* ".join(cks[i]))
  247. res = [[]]
  248. num = [0]
  249. for ck in cks:
  250. if len(ck) == 1:
  251. n = num_tokens_from_string(re.sub(r"@@[0-9]+.*", "", ck[0]))
  252. if n + num[-1] < 218:
  253. res[-1].append(ck[0])
  254. num[-1] += n
  255. continue
  256. res.append(ck)
  257. num.append(n)
  258. continue
  259. res.append(ck)
  260. num.append(218)
  261. return res
  262. def naive_merge(sections, chunk_token_num=128, delimiter="\n。;!?"):
  263. if not sections:
  264. return []
  265. if isinstance(sections[0], type("")):
  266. sections = [(s, "") for s in sections]
  267. cks = [""]
  268. tk_nums = [0]
  269. def add_chunk(t, pos):
  270. nonlocal cks, tk_nums, delimiter
  271. tnum = num_tokens_from_string(t)
  272. if tnum < 8:
  273. pos = ""
  274. if tk_nums[-1] > chunk_token_num:
  275. if t.find(pos) < 0:
  276. t += pos
  277. cks.append(t)
  278. tk_nums.append(tnum)
  279. else:
  280. if cks[-1].find(pos) < 0:
  281. t += pos
  282. cks[-1] += t
  283. tk_nums[-1] += tnum
  284. for sec, pos in sections:
  285. add_chunk(sec, pos)
  286. continue
  287. s, e = 0, 1
  288. while e < len(sec):
  289. if sec[e] in delimiter:
  290. add_chunk(sec[s: e + 1], pos)
  291. s = e + 1
  292. e = s + 1
  293. else:
  294. e += 1
  295. if s < e:
  296. add_chunk(sec[s: e], pos)
  297. return cks