Du kan inte välja fler än 25 ämnen Ämnen måste starta med en bokstav eller siffra, kan innehålla bindestreck ('-') och vara max 35 tecken långa.

__init__.py 12KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385
  1. import random
  2. from collections import Counter
  3. from rag.utils import num_tokens_from_string
  4. from . import rag_tokenizer
  5. import re
  6. import copy
  7. all_codecs = [
  8. 'utf-8', 'gb2312', 'gbk', 'utf_16', 'ascii', 'big5', 'big5hkscs',
  9. 'cp037', 'cp273', 'cp424', 'cp437',
  10. 'cp500', 'cp720', 'cp737', 'cp775', 'cp850', 'cp852', 'cp855', 'cp856', 'cp857',
  11. 'cp858', 'cp860', 'cp861', 'cp862', 'cp863', 'cp864', 'cp865', 'cp866', 'cp869',
  12. 'cp874', 'cp875', 'cp932', 'cp949', 'cp950', 'cp1006', 'cp1026', 'cp1125',
  13. 'cp1140', 'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255', 'cp1256',
  14. 'cp1257', 'cp1258', 'euc_jp', 'euc_jis_2004', 'euc_jisx0213', 'euc_kr',
  15. 'gb2312', 'gb18030', 'hz', 'iso2022_jp', 'iso2022_jp_1', 'iso2022_jp_2',
  16. 'iso2022_jp_2004', 'iso2022_jp_3', 'iso2022_jp_ext', 'iso2022_kr', 'latin_1',
  17. 'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6', 'iso8859_7',
  18. 'iso8859_8', 'iso8859_9', 'iso8859_10', 'iso8859_11', 'iso8859_13',
  19. 'iso8859_14', 'iso8859_15', 'iso8859_16', 'johab', 'koi8_r', 'koi8_t', 'koi8_u',
  20. 'kz1048', 'mac_cyrillic', 'mac_greek', 'mac_iceland', 'mac_latin2', 'mac_roman',
  21. 'mac_turkish', 'ptcp154', 'shift_jis', 'shift_jis_2004', 'shift_jisx0213',
  22. 'utf_32', 'utf_32_be', 'utf_32_le''utf_16_be', 'utf_16_le', 'utf_7'
  23. ]
  24. def find_codec(blob):
  25. global all_codecs
  26. for c in all_codecs:
  27. try:
  28. blob.decode(c)
  29. return c
  30. except Exception as e:
  31. pass
  32. return "utf-8"
  33. BULLET_PATTERN = [[
  34. r"第[零一二三四五六七八九十百0-9]+(分?编|部分)",
  35. r"第[零一二三四五六七八九十百0-9]+章",
  36. r"第[零一二三四五六七八九十百0-9]+节",
  37. r"第[零一二三四五六七八九十百0-9]+条",
  38. r"[\((][零一二三四五六七八九十百]+[\))]",
  39. ], [
  40. r"第[0-9]+章",
  41. r"第[0-9]+节",
  42. r"[0-9]{,2}[\. 、]",
  43. r"[0-9]{,2}\.[0-9]{,2}[^a-zA-Z/%~-]",
  44. r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
  45. r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
  46. ], [
  47. r"第[零一二三四五六七八九十百0-9]+章",
  48. r"第[零一二三四五六七八九十百0-9]+节",
  49. r"[零一二三四五六七八九十百]+[ 、]",
  50. r"[\((][零一二三四五六七八九十百]+[\))]",
  51. r"[\((][0-9]{,2}[\))]",
  52. ], [
  53. r"PART (ONE|TWO|THREE|FOUR|FIVE|SIX|SEVEN|EIGHT|NINE|TEN)",
  54. r"Chapter (I+V?|VI*|XI|IX|X)",
  55. r"Section [0-9]+",
  56. r"Article [0-9]+"
  57. ]
  58. ]
  59. def random_choices(arr, k):
  60. k = min(len(arr), k)
  61. return random.choices(arr, k=k)
  62. def not_bullet(line):
  63. patt = [
  64. r"0", r"[0-9]+ +[0-9~个只-]", r"[0-9]+\.{2,}"
  65. ]
  66. return any([re.match(r, line) for r in patt])
  67. def bullets_category(sections):
  68. global BULLET_PATTERN
  69. hits = [0] * len(BULLET_PATTERN)
  70. for i, pro in enumerate(BULLET_PATTERN):
  71. for sec in sections:
  72. for p in pro:
  73. if re.match(p, sec) and not not_bullet(sec):
  74. hits[i] += 1
  75. break
  76. maxium = 0
  77. res = -1
  78. for i, h in enumerate(hits):
  79. if h <= maxium:
  80. continue
  81. res = i
  82. maxium = h
  83. return res
  84. def is_english(texts):
  85. eng = 0
  86. if not texts: return False
  87. for t in texts:
  88. if re.match(r"[a-zA-Z]{2,}", t.strip()):
  89. eng += 1
  90. if eng / len(texts) > 0.8:
  91. return True
  92. return False
  93. def tokenize(d, t, eng):
  94. d["content_with_weight"] = t
  95. t = re.sub(r"</?(table|td|caption|tr|th)( [^<>]{0,12})?>", " ", t)
  96. d["content_ltks"] = rag_tokenizer.tokenize(t)
  97. d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
  98. def tokenize_chunks(chunks, doc, eng, pdf_parser):
  99. res = []
  100. # wrap up as es documents
  101. for ck in chunks:
  102. if len(ck.strip()) == 0:continue
  103. print("--", ck)
  104. d = copy.deepcopy(doc)
  105. if pdf_parser:
  106. try:
  107. d["image"], poss = pdf_parser.crop(ck, need_position=True)
  108. add_positions(d, poss)
  109. ck = pdf_parser.remove_tag(ck)
  110. except NotImplementedError as e:
  111. pass
  112. tokenize(d, ck, eng)
  113. res.append(d)
  114. return res
  115. def tokenize_table(tbls, doc, eng, batch_size=10):
  116. res = []
  117. # add tables
  118. for (img, rows), poss in tbls:
  119. if not rows:
  120. continue
  121. if isinstance(rows, str):
  122. d = copy.deepcopy(doc)
  123. tokenize(d, rows, eng)
  124. d["content_with_weight"] = rows
  125. if img: d["image"] = img
  126. if poss: add_positions(d, poss)
  127. res.append(d)
  128. continue
  129. de = "; " if eng else "; "
  130. for i in range(0, len(rows), batch_size):
  131. d = copy.deepcopy(doc)
  132. r = de.join(rows[i:i + batch_size])
  133. tokenize(d, r, eng)
  134. d["image"] = img
  135. add_positions(d, poss)
  136. res.append(d)
  137. return res
  138. def add_positions(d, poss):
  139. if not poss:
  140. return
  141. d["page_num_int"] = []
  142. d["position_int"] = []
  143. d["top_int"] = []
  144. for pn, left, right, top, bottom in poss:
  145. d["page_num_int"].append(int(pn + 1))
  146. d["top_int"].append(int(top))
  147. d["position_int"].append((int(pn + 1), int(left), int(right), int(top), int(bottom)))
  148. def remove_contents_table(sections, eng=False):
  149. i = 0
  150. while i < len(sections):
  151. def get(i):
  152. nonlocal sections
  153. return (sections[i] if isinstance(sections[i],
  154. type("")) else sections[i][0]).strip()
  155. if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$",
  156. re.sub(r"( | |\u3000)+", "", get(i).split("@@")[0], re.IGNORECASE)):
  157. i += 1
  158. continue
  159. sections.pop(i)
  160. if i >= len(sections):
  161. break
  162. prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
  163. while not prefix:
  164. sections.pop(i)
  165. if i >= len(sections):
  166. break
  167. prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
  168. sections.pop(i)
  169. if i >= len(sections) or not prefix:
  170. break
  171. for j in range(i, min(i + 128, len(sections))):
  172. if not re.match(prefix, get(j)):
  173. continue
  174. for _ in range(i, j):
  175. sections.pop(i)
  176. break
  177. def make_colon_as_title(sections):
  178. if not sections:
  179. return []
  180. if isinstance(sections[0], type("")):
  181. return sections
  182. i = 0
  183. while i < len(sections):
  184. txt, layout = sections[i]
  185. i += 1
  186. txt = txt.split("@")[0].strip()
  187. if not txt:
  188. continue
  189. if txt[-1] not in "::":
  190. continue
  191. txt = txt[::-1]
  192. arr = re.split(r"([。?!!?;;]| .)", txt)
  193. if len(arr) < 2 or len(arr[1]) < 32:
  194. continue
  195. sections.insert(i - 1, (arr[0][::-1], "title"))
  196. i += 1
  197. def title_frequency(bull, sections):
  198. bullets_size = len(BULLET_PATTERN[bull])
  199. levels = [bullets_size+1 for _ in range(len(sections))]
  200. if not sections or bull < 0:
  201. return bullets_size+1, levels
  202. for i, (txt, layout) in enumerate(sections):
  203. for j, p in enumerate(BULLET_PATTERN[bull]):
  204. if re.match(p, txt.strip()) and not not_bullet(txt):
  205. levels[i] = j
  206. break
  207. else:
  208. if re.search(r"(title|head)", layout) and not not_title(txt.split("@")[0]):
  209. levels[i] = bullets_size
  210. most_level = bullets_size+1
  211. for l, c in sorted(Counter(levels).items(), key=lambda x:x[1]*-1):
  212. if l <= bullets_size:
  213. most_level = l
  214. break
  215. return most_level, levels
  216. def not_title(txt):
  217. if re.match(r"第[零一二三四五六七八九十百0-9]+条", txt):
  218. return False
  219. if len(txt.split(" ")) > 12 or (txt.find(" ") < 0 and len(txt) >= 32):
  220. return True
  221. return re.search(r"[,;,。;!!]", txt)
  222. def hierarchical_merge(bull, sections, depth):
  223. if not sections or bull < 0:
  224. return []
  225. if isinstance(sections[0], type("")):
  226. sections = [(s, "") for s in sections]
  227. sections = [(t, o) for t, o in sections if
  228. t and len(t.split("@")[0].strip()) > 1 and not re.match(r"[0-9]+$", t.split("@")[0].strip())]
  229. bullets_size = len(BULLET_PATTERN[bull])
  230. levels = [[] for _ in range(bullets_size + 2)]
  231. for i, (txt, layout) in enumerate(sections):
  232. for j, p in enumerate(BULLET_PATTERN[bull]):
  233. if re.match(p, txt.strip()):
  234. levels[j].append(i)
  235. break
  236. else:
  237. if re.search(r"(title|head)", layout) and not not_title(txt):
  238. levels[bullets_size].append(i)
  239. else:
  240. levels[bullets_size + 1].append(i)
  241. sections = [t for t, _ in sections]
  242. # for s in sections: print("--", s)
  243. def binary_search(arr, target):
  244. if not arr:
  245. return -1
  246. if target > arr[-1]:
  247. return len(arr) - 1
  248. if target < arr[0]:
  249. return -1
  250. s, e = 0, len(arr)
  251. while e - s > 1:
  252. i = (e + s) // 2
  253. if target > arr[i]:
  254. s = i
  255. continue
  256. elif target < arr[i]:
  257. e = i
  258. continue
  259. else:
  260. assert False
  261. return s
  262. cks = []
  263. readed = [False] * len(sections)
  264. levels = levels[::-1]
  265. for i, arr in enumerate(levels[:depth]):
  266. for j in arr:
  267. if readed[j]:
  268. continue
  269. readed[j] = True
  270. cks.append([j])
  271. if i + 1 == len(levels) - 1:
  272. continue
  273. for ii in range(i + 1, len(levels)):
  274. jj = binary_search(levels[ii], j)
  275. if jj < 0:
  276. continue
  277. if jj > cks[-1][-1]:
  278. cks[-1].pop(-1)
  279. cks[-1].append(levels[ii][jj])
  280. for ii in cks[-1]:
  281. readed[ii] = True
  282. if not cks:
  283. return cks
  284. for i in range(len(cks)):
  285. cks[i] = [sections[j] for j in cks[i][::-1]]
  286. print("--------------\n", "\n* ".join(cks[i]))
  287. res = [[]]
  288. num = [0]
  289. for ck in cks:
  290. if len(ck) == 1:
  291. n = num_tokens_from_string(re.sub(r"@@[0-9]+.*", "", ck[0]))
  292. if n + num[-1] < 218:
  293. res[-1].append(ck[0])
  294. num[-1] += n
  295. continue
  296. res.append(ck)
  297. num.append(n)
  298. continue
  299. res.append(ck)
  300. num.append(218)
  301. return res
  302. def naive_merge(sections, chunk_token_num=128, delimiter="\n。;!?"):
  303. if not sections:
  304. return []
  305. if isinstance(sections[0], type("")):
  306. sections = [(s, "") for s in sections]
  307. cks = [""]
  308. tk_nums = [0]
  309. def add_chunk(t, pos):
  310. nonlocal cks, tk_nums, delimiter
  311. tnum = num_tokens_from_string(t)
  312. if tnum < 8:
  313. pos = ""
  314. if tk_nums[-1] > chunk_token_num:
  315. if t.find(pos) < 0:
  316. t += pos
  317. cks.append(t)
  318. tk_nums.append(tnum)
  319. else:
  320. if cks[-1].find(pos) < 0:
  321. t += pos
  322. cks[-1] += t
  323. tk_nums[-1] += tnum
  324. for sec, pos in sections:
  325. add_chunk(sec, pos)
  326. continue
  327. s, e = 0, 1
  328. while e < len(sec):
  329. if sec[e] in delimiter:
  330. add_chunk(sec[s: e + 1], pos)
  331. s = e + 1
  332. e = s + 1
  333. else:
  334. e += 1
  335. if s < e:
  336. add_chunk(sec[s: e], pos)
  337. return cks