You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

__init__.py 18KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import logging
  17. import random
  18. from collections import Counter
  19. from rag.utils import num_tokens_from_string
  20. from . import rag_tokenizer
  21. import re
  22. import copy
  23. import roman_numbers as r
  24. from word2number import w2n
  25. from cn2an import cn2an
  26. from PIL import Image
  27. import chardet
  28. all_codecs = [
  29. 'utf-8', 'gb2312', 'gbk', 'utf_16', 'ascii', 'big5', 'big5hkscs',
  30. 'cp037', 'cp273', 'cp424', 'cp437',
  31. 'cp500', 'cp720', 'cp737', 'cp775', 'cp850', 'cp852', 'cp855', 'cp856', 'cp857',
  32. 'cp858', 'cp860', 'cp861', 'cp862', 'cp863', 'cp864', 'cp865', 'cp866', 'cp869',
  33. 'cp874', 'cp875', 'cp932', 'cp949', 'cp950', 'cp1006', 'cp1026', 'cp1125',
  34. 'cp1140', 'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255', 'cp1256',
  35. 'cp1257', 'cp1258', 'euc_jp', 'euc_jis_2004', 'euc_jisx0213', 'euc_kr',
  36. 'gb18030', 'hz', 'iso2022_jp', 'iso2022_jp_1', 'iso2022_jp_2',
  37. 'iso2022_jp_2004', 'iso2022_jp_3', 'iso2022_jp_ext', 'iso2022_kr', 'latin_1',
  38. 'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6', 'iso8859_7',
  39. 'iso8859_8', 'iso8859_9', 'iso8859_10', 'iso8859_11', 'iso8859_13',
  40. 'iso8859_14', 'iso8859_15', 'iso8859_16', 'johab', 'koi8_r', 'koi8_t', 'koi8_u',
  41. 'kz1048', 'mac_cyrillic', 'mac_greek', 'mac_iceland', 'mac_latin2', 'mac_roman',
  42. 'mac_turkish', 'ptcp154', 'shift_jis', 'shift_jis_2004', 'shift_jisx0213',
  43. 'utf_32', 'utf_32_be', 'utf_32_le', 'utf_16_be', 'utf_16_le', 'utf_7', 'windows-1250', 'windows-1251',
  44. 'windows-1252', 'windows-1253', 'windows-1254', 'windows-1255', 'windows-1256',
  45. 'windows-1257', 'windows-1258', 'latin-2'
  46. ]
  47. def find_codec(blob):
  48. detected = chardet.detect(blob[:1024])
  49. if detected['confidence'] > 0.5:
  50. if detected['encoding'] == "ascii":
  51. return "utf-8"
  52. for c in all_codecs:
  53. try:
  54. blob[:1024].decode(c)
  55. return c
  56. except Exception:
  57. pass
  58. try:
  59. blob.decode(c)
  60. return c
  61. except Exception:
  62. pass
  63. return "utf-8"
  64. QUESTION_PATTERN = [
  65. r"第([零一二三四五六七八九十百0-9]+)问",
  66. r"第([零一二三四五六七八九十百0-9]+)条",
  67. r"[\((]([零一二三四五六七八九十百]+)[\))]",
  68. r"第([0-9]+)问",
  69. r"第([0-9]+)条",
  70. r"([0-9]{1,2})[\. 、]",
  71. r"([零一二三四五六七八九十百]+)[ 、]",
  72. r"[\((]([0-9]{1,2})[\))]",
  73. r"QUESTION (ONE|TWO|THREE|FOUR|FIVE|SIX|SEVEN|EIGHT|NINE|TEN)",
  74. r"QUESTION (I+V?|VI*|XI|IX|X)",
  75. r"QUESTION ([0-9]+)",
  76. ]
  77. def has_qbullet(reg, box, last_box, last_index, last_bull, bull_x0_list):
  78. section, last_section = box['text'], last_box['text']
  79. q_reg = r'(\w|\W)*?(?:?|\?|\n|$)+'
  80. full_reg = reg + q_reg
  81. has_bull = re.match(full_reg, section)
  82. index_str = None
  83. if has_bull:
  84. if 'x0' not in last_box:
  85. last_box['x0'] = box['x0']
  86. if 'top' not in last_box:
  87. last_box['top'] = box['top']
  88. if last_bull and box['x0'] - last_box['x0'] > 10:
  89. return None, last_index
  90. if not last_bull and box['x0'] >= last_box['x0'] and box['top'] - last_box['top'] < 20:
  91. return None, last_index
  92. avg_bull_x0 = 0
  93. if bull_x0_list:
  94. avg_bull_x0 = sum(bull_x0_list) / len(bull_x0_list)
  95. else:
  96. avg_bull_x0 = box['x0']
  97. if box['x0'] - avg_bull_x0 > 10:
  98. return None, last_index
  99. index_str = has_bull.group(1)
  100. index = index_int(index_str)
  101. if last_section[-1] == ':' or last_section[-1] == ':':
  102. return None, last_index
  103. if not last_index or index >= last_index:
  104. bull_x0_list.append(box['x0'])
  105. return has_bull, index
  106. if section[-1] == '?' or section[-1] == '?':
  107. bull_x0_list.append(box['x0'])
  108. return has_bull, index
  109. if box['layout_type'] == 'title':
  110. bull_x0_list.append(box['x0'])
  111. return has_bull, index
  112. pure_section = section.lstrip(re.match(reg, section).group()).lower()
  113. ask_reg = r'(what|when|where|how|why|which|who|whose|为什么|为啥|哪)'
  114. if re.match(ask_reg, pure_section):
  115. bull_x0_list.append(box['x0'])
  116. return has_bull, index
  117. return None, last_index
  118. def index_int(index_str):
  119. res = -1
  120. try:
  121. res = int(index_str)
  122. except ValueError:
  123. try:
  124. res = w2n.word_to_num(index_str)
  125. except ValueError:
  126. try:
  127. res = cn2an(index_str)
  128. except ValueError:
  129. try:
  130. res = r.number(index_str)
  131. except ValueError:
  132. return -1
  133. return res
  134. def qbullets_category(sections):
  135. global QUESTION_PATTERN
  136. hits = [0] * len(QUESTION_PATTERN)
  137. for i, pro in enumerate(QUESTION_PATTERN):
  138. for sec in sections:
  139. if re.match(pro, sec) and not not_bullet(sec):
  140. hits[i] += 1
  141. break
  142. maxium = 0
  143. res = -1
  144. for i, h in enumerate(hits):
  145. if h <= maxium:
  146. continue
  147. res = i
  148. maxium = h
  149. return res, QUESTION_PATTERN[res]
  150. BULLET_PATTERN = [[
  151. r"第[零一二三四五六七八九十百0-9]+(分?编|部分)",
  152. r"第[零一二三四五六七八九十百0-9]+章",
  153. r"第[零一二三四五六七八九十百0-9]+节",
  154. r"第[零一二三四五六七八九十百0-9]+条",
  155. r"[\((][零一二三四五六七八九十百]+[\))]",
  156. ], [
  157. r"第[0-9]+章",
  158. r"第[0-9]+节",
  159. r"[0-9]{,2}[\. 、]",
  160. r"[0-9]{,2}\.[0-9]{,2}[^a-zA-Z/%~-]",
  161. r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
  162. r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
  163. ], [
  164. r"第[零一二三四五六七八九十百0-9]+章",
  165. r"第[零一二三四五六七八九十百0-9]+节",
  166. r"[零一二三四五六七八九十百]+[ 、]",
  167. r"[\((][零一二三四五六七八九十百]+[\))]",
  168. r"[\((][0-9]{,2}[\))]",
  169. ], [
  170. r"PART (ONE|TWO|THREE|FOUR|FIVE|SIX|SEVEN|EIGHT|NINE|TEN)",
  171. r"Chapter (I+V?|VI*|XI|IX|X)",
  172. r"Section [0-9]+",
  173. r"Article [0-9]+"
  174. ]
  175. ]
  176. def random_choices(arr, k):
  177. k = min(len(arr), k)
  178. return random.choices(arr, k=k)
  179. def not_bullet(line):
  180. patt = [
  181. r"0", r"[0-9]+ +[0-9~个只-]", r"[0-9]+\.{2,}"
  182. ]
  183. return any([re.match(r, line) for r in patt])
  184. def bullets_category(sections):
  185. global BULLET_PATTERN
  186. hits = [0] * len(BULLET_PATTERN)
  187. for i, pro in enumerate(BULLET_PATTERN):
  188. for sec in sections:
  189. for p in pro:
  190. if re.match(p, sec) and not not_bullet(sec):
  191. hits[i] += 1
  192. break
  193. maxium = 0
  194. res = -1
  195. for i, h in enumerate(hits):
  196. if h <= maxium:
  197. continue
  198. res = i
  199. maxium = h
  200. return res
  201. def is_english(texts):
  202. eng = 0
  203. if not texts:
  204. return False
  205. for t in texts:
  206. if re.match(r"[ `a-zA-Z.,':;/\"?<>!\(\)-]", t.strip()):
  207. eng += 1
  208. if eng / len(texts) > 0.8:
  209. return True
  210. return False
  211. def is_chinese(text):
  212. if not text:
  213. return False
  214. chinese = 0
  215. for ch in text:
  216. if '\u4e00' <= ch <= '\u9fff':
  217. chinese += 1
  218. if chinese / len(text) > 0.2:
  219. return True
  220. return False
  221. def tokenize(d, t, eng):
  222. d["content_with_weight"] = t
  223. t = re.sub(r"</?(table|td|caption|tr|th)( [^<>]{0,12})?>", " ", t)
  224. d["content_ltks"] = rag_tokenizer.tokenize(t)
  225. d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
  226. def tokenize_chunks(chunks, doc, eng, pdf_parser=None):
  227. res = []
  228. # wrap up as es documents
  229. for ii, ck in enumerate(chunks):
  230. if len(ck.strip()) == 0:
  231. continue
  232. logging.debug("-- {}".format(ck))
  233. d = copy.deepcopy(doc)
  234. if pdf_parser:
  235. try:
  236. d["image"], poss = pdf_parser.crop(ck, need_position=True)
  237. add_positions(d, poss)
  238. ck = pdf_parser.remove_tag(ck)
  239. except NotImplementedError:
  240. pass
  241. else:
  242. add_positions(d, [[ii]*5])
  243. tokenize(d, ck, eng)
  244. res.append(d)
  245. return res
  246. def tokenize_chunks_docx(chunks, doc, eng, images):
  247. res = []
  248. # wrap up as es documents
  249. for ck, image in zip(chunks, images):
  250. if len(ck.strip()) == 0:
  251. continue
  252. logging.debug("-- {}".format(ck))
  253. d = copy.deepcopy(doc)
  254. d["image"] = image
  255. tokenize(d, ck, eng)
  256. res.append(d)
  257. return res
  258. def tokenize_table(tbls, doc, eng, batch_size=10):
  259. res = []
  260. # add tables
  261. for (img, rows), poss in tbls:
  262. if not rows:
  263. continue
  264. if isinstance(rows, str):
  265. d = copy.deepcopy(doc)
  266. tokenize(d, rows, eng)
  267. d["content_with_weight"] = rows
  268. if img:
  269. d["image"] = img
  270. if poss:
  271. add_positions(d, poss)
  272. res.append(d)
  273. continue
  274. de = "; " if eng else "; "
  275. for i in range(0, len(rows), batch_size):
  276. d = copy.deepcopy(doc)
  277. r = de.join(rows[i:i + batch_size])
  278. tokenize(d, r, eng)
  279. d["image"] = img
  280. add_positions(d, poss)
  281. res.append(d)
  282. return res
  283. def add_positions(d, poss):
  284. if not poss:
  285. return
  286. page_num_int = []
  287. position_int = []
  288. top_int = []
  289. for pn, left, right, top, bottom in poss:
  290. page_num_int.append(int(pn + 1))
  291. top_int.append(int(top))
  292. position_int.append((int(pn + 1), int(left), int(right), int(top), int(bottom)))
  293. d["page_num_int"] = page_num_int
  294. d["position_int"] = position_int
  295. d["top_int"] = top_int
  296. def remove_contents_table(sections, eng=False):
  297. i = 0
  298. while i < len(sections):
  299. def get(i):
  300. nonlocal sections
  301. return (sections[i] if isinstance(sections[i],
  302. type("")) else sections[i][0]).strip()
  303. if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$",
  304. re.sub(r"( | |\u3000)+", "", get(i).split("@@")[0], re.IGNORECASE)):
  305. i += 1
  306. continue
  307. sections.pop(i)
  308. if i >= len(sections):
  309. break
  310. prefix = get(i)[:3] if not eng else " ".join(get(i).split()[:2])
  311. while not prefix:
  312. sections.pop(i)
  313. if i >= len(sections):
  314. break
  315. prefix = get(i)[:3] if not eng else " ".join(get(i).split()[:2])
  316. sections.pop(i)
  317. if i >= len(sections) or not prefix:
  318. break
  319. for j in range(i, min(i + 128, len(sections))):
  320. if not re.match(prefix, get(j)):
  321. continue
  322. for _ in range(i, j):
  323. sections.pop(i)
  324. break
  325. def make_colon_as_title(sections):
  326. if not sections:
  327. return []
  328. if isinstance(sections[0], type("")):
  329. return sections
  330. i = 0
  331. while i < len(sections):
  332. txt, layout = sections[i]
  333. i += 1
  334. txt = txt.split("@")[0].strip()
  335. if not txt:
  336. continue
  337. if txt[-1] not in "::":
  338. continue
  339. txt = txt[::-1]
  340. arr = re.split(r"([。?!!?;;]| \.)", txt)
  341. if len(arr) < 2 or len(arr[1]) < 32:
  342. continue
  343. sections.insert(i - 1, (arr[0][::-1], "title"))
  344. i += 1
  345. def title_frequency(bull, sections):
  346. bullets_size = len(BULLET_PATTERN[bull])
  347. levels = [bullets_size + 1 for _ in range(len(sections))]
  348. if not sections or bull < 0:
  349. return bullets_size + 1, levels
  350. for i, (txt, layout) in enumerate(sections):
  351. for j, p in enumerate(BULLET_PATTERN[bull]):
  352. if re.match(p, txt.strip()) and not not_bullet(txt):
  353. levels[i] = j
  354. break
  355. else:
  356. if re.search(r"(title|head)", layout) and not not_title(txt.split("@")[0]):
  357. levels[i] = bullets_size
  358. most_level = bullets_size + 1
  359. for level, c in sorted(Counter(levels).items(), key=lambda x: x[1] * -1):
  360. if level <= bullets_size:
  361. most_level = level
  362. break
  363. return most_level, levels
  364. def not_title(txt):
  365. if re.match(r"第[零一二三四五六七八九十百0-9]+条", txt):
  366. return False
  367. if len(txt.split()) > 12 or (txt.find(" ") < 0 and len(txt) >= 32):
  368. return True
  369. return re.search(r"[,;,。;!!]", txt)
  370. def hierarchical_merge(bull, sections, depth):
  371. if not sections or bull < 0:
  372. return []
  373. if isinstance(sections[0], type("")):
  374. sections = [(s, "") for s in sections]
  375. sections = [(t, o) for t, o in sections if
  376. t and len(t.split("@")[0].strip()) > 1 and not re.match(r"[0-9]+$", t.split("@")[0].strip())]
  377. bullets_size = len(BULLET_PATTERN[bull])
  378. levels = [[] for _ in range(bullets_size + 2)]
  379. for i, (txt, layout) in enumerate(sections):
  380. for j, p in enumerate(BULLET_PATTERN[bull]):
  381. if re.match(p, txt.strip()):
  382. levels[j].append(i)
  383. break
  384. else:
  385. if re.search(r"(title|head)", layout) and not not_title(txt):
  386. levels[bullets_size].append(i)
  387. else:
  388. levels[bullets_size + 1].append(i)
  389. sections = [t for t, _ in sections]
  390. # for s in sections: print("--", s)
  391. def binary_search(arr, target):
  392. if not arr:
  393. return -1
  394. if target > arr[-1]:
  395. return len(arr) - 1
  396. if target < arr[0]:
  397. return -1
  398. s, e = 0, len(arr)
  399. while e - s > 1:
  400. i = (e + s) // 2
  401. if target > arr[i]:
  402. s = i
  403. continue
  404. elif target < arr[i]:
  405. e = i
  406. continue
  407. else:
  408. assert False
  409. return s
  410. cks = []
  411. readed = [False] * len(sections)
  412. levels = levels[::-1]
  413. for i, arr in enumerate(levels[:depth]):
  414. for j in arr:
  415. if readed[j]:
  416. continue
  417. readed[j] = True
  418. cks.append([j])
  419. if i + 1 == len(levels) - 1:
  420. continue
  421. for ii in range(i + 1, len(levels)):
  422. jj = binary_search(levels[ii], j)
  423. if jj < 0:
  424. continue
  425. if levels[ii][jj] > cks[-1][-1]:
  426. cks[-1].pop(-1)
  427. cks[-1].append(levels[ii][jj])
  428. for ii in cks[-1]:
  429. readed[ii] = True
  430. if not cks:
  431. return cks
  432. for i in range(len(cks)):
  433. cks[i] = [sections[j] for j in cks[i][::-1]]
  434. logging.debug("\n* ".join(cks[i]))
  435. res = [[]]
  436. num = [0]
  437. for ck in cks:
  438. if len(ck) == 1:
  439. n = num_tokens_from_string(re.sub(r"@@[0-9]+.*", "", ck[0]))
  440. if n + num[-1] < 218:
  441. res[-1].append(ck[0])
  442. num[-1] += n
  443. continue
  444. res.append(ck)
  445. num.append(n)
  446. continue
  447. res.append(ck)
  448. num.append(218)
  449. return res
  450. def naive_merge(sections, chunk_token_num=128, delimiter="\n。;!?"):
  451. if not sections:
  452. return []
  453. if isinstance(sections[0], type("")):
  454. sections = [(s, "") for s in sections]
  455. cks = [""]
  456. tk_nums = [0]
  457. def add_chunk(t, pos):
  458. nonlocal cks, tk_nums, delimiter
  459. tnum = num_tokens_from_string(t)
  460. if not pos:
  461. pos = ""
  462. if tnum < 8:
  463. pos = ""
  464. # Ensure that the length of the merged chunk does not exceed chunk_token_num
  465. if tk_nums[-1] > chunk_token_num:
  466. if t.find(pos) < 0:
  467. t += pos
  468. cks.append(t)
  469. tk_nums.append(tnum)
  470. else:
  471. if cks[-1].find(pos) < 0:
  472. t += pos
  473. cks[-1] += t
  474. tk_nums[-1] += tnum
  475. for sec, pos in sections:
  476. add_chunk(sec, pos)
  477. return cks
  478. def docx_question_level(p, bull=-1):
  479. txt = re.sub(r"\u3000", " ", p.text).strip()
  480. if p.style.name.startswith('Heading'):
  481. return int(p.style.name.split(' ')[-1]), txt
  482. else:
  483. if bull < 0:
  484. return 0, txt
  485. for j, title in enumerate(BULLET_PATTERN[bull]):
  486. if re.match(title, txt):
  487. return j + 1, txt
  488. return len(BULLET_PATTERN[bull]), txt
  489. def concat_img(img1, img2):
  490. if img1 and not img2:
  491. return img1
  492. if not img1 and img2:
  493. return img2
  494. if not img1 and not img2:
  495. return None
  496. width1, height1 = img1.size
  497. width2, height2 = img2.size
  498. new_width = max(width1, width2)
  499. new_height = height1 + height2
  500. new_image = Image.new('RGB', (new_width, new_height))
  501. new_image.paste(img1, (0, 0))
  502. new_image.paste(img2, (0, height1))
  503. return new_image
  504. def naive_merge_docx(sections, chunk_token_num=128, delimiter="\n。;!?"):
  505. if not sections:
  506. return [], []
  507. cks = [""]
  508. images = [None]
  509. tk_nums = [0]
  510. def add_chunk(t, image, pos=""):
  511. nonlocal cks, tk_nums, delimiter
  512. tnum = num_tokens_from_string(t)
  513. if tnum < 8:
  514. pos = ""
  515. if tk_nums[-1] > chunk_token_num:
  516. if t.find(pos) < 0:
  517. t += pos
  518. cks.append(t)
  519. images.append(image)
  520. tk_nums.append(tnum)
  521. else:
  522. if cks[-1].find(pos) < 0:
  523. t += pos
  524. cks[-1] += t
  525. images[-1] = concat_img(images[-1], image)
  526. tk_nums[-1] += tnum
  527. for sec, image in sections:
  528. add_chunk(sec, image, '')
  529. return cks, images
  530. def extract_between(text: str, start_tag: str, end_tag: str) -> list[str]:
  531. pattern = re.escape(start_tag) + r"(.*?)" + re.escape(end_tag)
  532. return re.findall(pattern, text, flags=re.DOTALL)