Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391
  1. import random
  2. from collections import Counter
  3. from rag.utils import num_tokens_from_string
  4. from . import rag_tokenizer
  5. import re
  6. import copy
  7. all_codecs = [
  8. 'utf-8', 'gb2312', 'gbk', 'utf_16', 'ascii', 'big5', 'big5hkscs',
  9. 'cp037', 'cp273', 'cp424', 'cp437',
  10. 'cp500', 'cp720', 'cp737', 'cp775', 'cp850', 'cp852', 'cp855', 'cp856', 'cp857',
  11. 'cp858', 'cp860', 'cp861', 'cp862', 'cp863', 'cp864', 'cp865', 'cp866', 'cp869',
  12. 'cp874', 'cp875', 'cp932', 'cp949', 'cp950', 'cp1006', 'cp1026', 'cp1125',
  13. 'cp1140', 'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255', 'cp1256',
  14. 'cp1257', 'cp1258', 'euc_jp', 'euc_jis_2004', 'euc_jisx0213', 'euc_kr',
  15. 'gb2312', 'gb18030', 'hz', 'iso2022_jp', 'iso2022_jp_1', 'iso2022_jp_2',
  16. 'iso2022_jp_2004', 'iso2022_jp_3', 'iso2022_jp_ext', 'iso2022_kr', 'latin_1',
  17. 'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6', 'iso8859_7',
  18. 'iso8859_8', 'iso8859_9', 'iso8859_10', 'iso8859_11', 'iso8859_13',
  19. 'iso8859_14', 'iso8859_15', 'iso8859_16', 'johab', 'koi8_r', 'koi8_t', 'koi8_u',
  20. 'kz1048', 'mac_cyrillic', 'mac_greek', 'mac_iceland', 'mac_latin2', 'mac_roman',
  21. 'mac_turkish', 'ptcp154', 'shift_jis', 'shift_jis_2004', 'shift_jisx0213',
  22. 'utf_32', 'utf_32_be', 'utf_32_le''utf_16_be', 'utf_16_le', 'utf_7'
  23. ]
  24. def find_codec(blob):
  25. global all_codecs
  26. for c in all_codecs:
  27. try:
  28. blob[:1024].decode(c)
  29. return c
  30. except Exception as e:
  31. pass
  32. try:
  33. blob.decode(c)
  34. return c
  35. except Exception as e:
  36. pass
  37. return "utf-8"
  38. BULLET_PATTERN = [[
  39. r"第[零一二三四五六七八九十百0-9]+(分?编|部分)",
  40. r"第[零一二三四五六七八九十百0-9]+章",
  41. r"第[零一二三四五六七八九十百0-9]+节",
  42. r"第[零一二三四五六七八九十百0-9]+条",
  43. r"[\((][零一二三四五六七八九十百]+[\))]",
  44. ], [
  45. r"第[0-9]+章",
  46. r"第[0-9]+节",
  47. r"[0-9]{,2}[\. 、]",
  48. r"[0-9]{,2}\.[0-9]{,2}[^a-zA-Z/%~-]",
  49. r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
  50. r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
  51. ], [
  52. r"第[零一二三四五六七八九十百0-9]+章",
  53. r"第[零一二三四五六七八九十百0-9]+节",
  54. r"[零一二三四五六七八九十百]+[ 、]",
  55. r"[\((][零一二三四五六七八九十百]+[\))]",
  56. r"[\((][0-9]{,2}[\))]",
  57. ], [
  58. r"PART (ONE|TWO|THREE|FOUR|FIVE|SIX|SEVEN|EIGHT|NINE|TEN)",
  59. r"Chapter (I+V?|VI*|XI|IX|X)",
  60. r"Section [0-9]+",
  61. r"Article [0-9]+"
  62. ]
  63. ]
  64. def random_choices(arr, k):
  65. k = min(len(arr), k)
  66. return random.choices(arr, k=k)
  67. def not_bullet(line):
  68. patt = [
  69. r"0", r"[0-9]+ +[0-9~个只-]", r"[0-9]+\.{2,}"
  70. ]
  71. return any([re.match(r, line) for r in patt])
  72. def bullets_category(sections):
  73. global BULLET_PATTERN
  74. hits = [0] * len(BULLET_PATTERN)
  75. for i, pro in enumerate(BULLET_PATTERN):
  76. for sec in sections:
  77. for p in pro:
  78. if re.match(p, sec) and not not_bullet(sec):
  79. hits[i] += 1
  80. break
  81. maxium = 0
  82. res = -1
  83. for i, h in enumerate(hits):
  84. if h <= maxium:
  85. continue
  86. res = i
  87. maxium = h
  88. return res
  89. def is_english(texts):
  90. eng = 0
  91. if not texts: return False
  92. for t in texts:
  93. if re.match(r"[a-zA-Z]{2,}", t.strip()):
  94. eng += 1
  95. if eng / len(texts) > 0.8:
  96. return True
  97. return False
  98. def tokenize(d, t, eng):
  99. d["content_with_weight"] = t
  100. t = re.sub(r"</?(table|td|caption|tr|th)( [^<>]{0,12})?>", " ", t)
  101. d["content_ltks"] = rag_tokenizer.tokenize(t)
  102. d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
  103. def tokenize_chunks(chunks, doc, eng, pdf_parser):
  104. res = []
  105. # wrap up as es documents
  106. for ck in chunks:
  107. if len(ck.strip()) == 0:continue
  108. print("--", ck)
  109. d = copy.deepcopy(doc)
  110. if pdf_parser:
  111. try:
  112. d["image"], poss = pdf_parser.crop(ck, need_position=True)
  113. add_positions(d, poss)
  114. ck = pdf_parser.remove_tag(ck)
  115. except NotImplementedError as e:
  116. pass
  117. tokenize(d, ck, eng)
  118. res.append(d)
  119. return res
  120. def tokenize_table(tbls, doc, eng, batch_size=10):
  121. res = []
  122. # add tables
  123. for (img, rows), poss in tbls:
  124. if not rows:
  125. continue
  126. if isinstance(rows, str):
  127. d = copy.deepcopy(doc)
  128. tokenize(d, rows, eng)
  129. d["content_with_weight"] = rows
  130. if img: d["image"] = img
  131. if poss: add_positions(d, poss)
  132. res.append(d)
  133. continue
  134. de = "; " if eng else "; "
  135. for i in range(0, len(rows), batch_size):
  136. d = copy.deepcopy(doc)
  137. r = de.join(rows[i:i + batch_size])
  138. tokenize(d, r, eng)
  139. d["image"] = img
  140. add_positions(d, poss)
  141. res.append(d)
  142. return res
  143. def add_positions(d, poss):
  144. if not poss:
  145. return
  146. d["page_num_int"] = []
  147. d["position_int"] = []
  148. d["top_int"] = []
  149. for pn, left, right, top, bottom in poss:
  150. d["page_num_int"].append(int(pn + 1))
  151. d["top_int"].append(int(top))
  152. d["position_int"].append((int(pn + 1), int(left), int(right), int(top), int(bottom)))
  153. def remove_contents_table(sections, eng=False):
  154. i = 0
  155. while i < len(sections):
  156. def get(i):
  157. nonlocal sections
  158. return (sections[i] if isinstance(sections[i],
  159. type("")) else sections[i][0]).strip()
  160. if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$",
  161. re.sub(r"( | |\u3000)+", "", get(i).split("@@")[0], re.IGNORECASE)):
  162. i += 1
  163. continue
  164. sections.pop(i)
  165. if i >= len(sections):
  166. break
  167. prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
  168. while not prefix:
  169. sections.pop(i)
  170. if i >= len(sections):
  171. break
  172. prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
  173. sections.pop(i)
  174. if i >= len(sections) or not prefix:
  175. break
  176. for j in range(i, min(i + 128, len(sections))):
  177. if not re.match(prefix, get(j)):
  178. continue
  179. for _ in range(i, j):
  180. sections.pop(i)
  181. break
  182. def make_colon_as_title(sections):
  183. if not sections:
  184. return []
  185. if isinstance(sections[0], type("")):
  186. return sections
  187. i = 0
  188. while i < len(sections):
  189. txt, layout = sections[i]
  190. i += 1
  191. txt = txt.split("@")[0].strip()
  192. if not txt:
  193. continue
  194. if txt[-1] not in "::":
  195. continue
  196. txt = txt[::-1]
  197. arr = re.split(r"([。?!!?;;]| .)", txt)
  198. if len(arr) < 2 or len(arr[1]) < 32:
  199. continue
  200. sections.insert(i - 1, (arr[0][::-1], "title"))
  201. i += 1
  202. def title_frequency(bull, sections):
  203. bullets_size = len(BULLET_PATTERN[bull])
  204. levels = [bullets_size+1 for _ in range(len(sections))]
  205. if not sections or bull < 0:
  206. return bullets_size+1, levels
  207. for i, (txt, layout) in enumerate(sections):
  208. for j, p in enumerate(BULLET_PATTERN[bull]):
  209. if re.match(p, txt.strip()) and not not_bullet(txt):
  210. levels[i] = j
  211. break
  212. else:
  213. if re.search(r"(title|head)", layout) and not not_title(txt.split("@")[0]):
  214. levels[i] = bullets_size
  215. most_level = bullets_size+1
  216. for l, c in sorted(Counter(levels).items(), key=lambda x:x[1]*-1):
  217. if l <= bullets_size:
  218. most_level = l
  219. break
  220. return most_level, levels
  221. def not_title(txt):
  222. if re.match(r"第[零一二三四五六七八九十百0-9]+条", txt):
  223. return False
  224. if len(txt.split(" ")) > 12 or (txt.find(" ") < 0 and len(txt) >= 32):
  225. return True
  226. return re.search(r"[,;,。;!!]", txt)
  227. def hierarchical_merge(bull, sections, depth):
  228. if not sections or bull < 0:
  229. return []
  230. if isinstance(sections[0], type("")):
  231. sections = [(s, "") for s in sections]
  232. sections = [(t, o) for t, o in sections if
  233. t and len(t.split("@")[0].strip()) > 1 and not re.match(r"[0-9]+$", t.split("@")[0].strip())]
  234. bullets_size = len(BULLET_PATTERN[bull])
  235. levels = [[] for _ in range(bullets_size + 2)]
  236. for i, (txt, layout) in enumerate(sections):
  237. for j, p in enumerate(BULLET_PATTERN[bull]):
  238. if re.match(p, txt.strip()):
  239. levels[j].append(i)
  240. break
  241. else:
  242. if re.search(r"(title|head)", layout) and not not_title(txt):
  243. levels[bullets_size].append(i)
  244. else:
  245. levels[bullets_size + 1].append(i)
  246. sections = [t for t, _ in sections]
  247. # for s in sections: print("--", s)
  248. def binary_search(arr, target):
  249. if not arr:
  250. return -1
  251. if target > arr[-1]:
  252. return len(arr) - 1
  253. if target < arr[0]:
  254. return -1
  255. s, e = 0, len(arr)
  256. while e - s > 1:
  257. i = (e + s) // 2
  258. if target > arr[i]:
  259. s = i
  260. continue
  261. elif target < arr[i]:
  262. e = i
  263. continue
  264. else:
  265. assert False
  266. return s
  267. cks = []
  268. readed = [False] * len(sections)
  269. levels = levels[::-1]
  270. for i, arr in enumerate(levels[:depth]):
  271. for j in arr:
  272. if readed[j]:
  273. continue
  274. readed[j] = True
  275. cks.append([j])
  276. if i + 1 == len(levels) - 1:
  277. continue
  278. for ii in range(i + 1, len(levels)):
  279. jj = binary_search(levels[ii], j)
  280. if jj < 0:
  281. continue
  282. if jj > cks[-1][-1]:
  283. cks[-1].pop(-1)
  284. cks[-1].append(levels[ii][jj])
  285. for ii in cks[-1]:
  286. readed[ii] = True
  287. if not cks:
  288. return cks
  289. for i in range(len(cks)):
  290. cks[i] = [sections[j] for j in cks[i][::-1]]
  291. print("--------------\n", "\n* ".join(cks[i]))
  292. res = [[]]
  293. num = [0]
  294. for ck in cks:
  295. if len(ck) == 1:
  296. n = num_tokens_from_string(re.sub(r"@@[0-9]+.*", "", ck[0]))
  297. if n + num[-1] < 218:
  298. res[-1].append(ck[0])
  299. num[-1] += n
  300. continue
  301. res.append(ck)
  302. num.append(n)
  303. continue
  304. res.append(ck)
  305. num.append(218)
  306. return res
  307. def naive_merge(sections, chunk_token_num=128, delimiter="\n。;!?"):
  308. if not sections:
  309. return []
  310. if isinstance(sections[0], type("")):
  311. sections = [(s, "") for s in sections]
  312. cks = [""]
  313. tk_nums = [0]
  314. def add_chunk(t, pos):
  315. nonlocal cks, tk_nums, delimiter
  316. tnum = num_tokens_from_string(t)
  317. if tnum < 8:
  318. pos = ""
  319. if tk_nums[-1] > chunk_token_num:
  320. if t.find(pos) < 0:
  321. t += pos
  322. cks.append(t)
  323. tk_nums.append(tnum)
  324. else:
  325. if cks[-1].find(pos) < 0:
  326. t += pos
  327. cks[-1] += t
  328. tk_nums[-1] += tnum
  329. for sec, pos in sections:
  330. add_chunk(sec, pos)
  331. continue
  332. s, e = 0, 1
  333. while e < len(sec):
  334. if sec[e] in delimiter:
  335. add_chunk(sec[s: e + 1], pos)
  336. s = e + 1
  337. e = s + 1
  338. else:
  339. e += 1
  340. if s < e:
  341. add_chunk(sec[s: e], pos)
  342. return cks