You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

__init__.py 18KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import random
  17. from collections import Counter
  18. from rag.utils import num_tokens_from_string
  19. from . import rag_tokenizer
  20. import re
  21. import copy
  22. import roman_numbers as r
  23. from word2number import w2n
  24. from cn2an import cn2an
  25. from PIL import Image
  26. all_codecs = [
  27. 'utf-8', 'gb2312', 'gbk', 'utf_16', 'ascii', 'big5', 'big5hkscs',
  28. 'cp037', 'cp273', 'cp424', 'cp437',
  29. 'cp500', 'cp720', 'cp737', 'cp775', 'cp850', 'cp852', 'cp855', 'cp856', 'cp857',
  30. 'cp858', 'cp860', 'cp861', 'cp862', 'cp863', 'cp864', 'cp865', 'cp866', 'cp869',
  31. 'cp874', 'cp875', 'cp932', 'cp949', 'cp950', 'cp1006', 'cp1026', 'cp1125',
  32. 'cp1140', 'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255', 'cp1256',
  33. 'cp1257', 'cp1258', 'euc_jp', 'euc_jis_2004', 'euc_jisx0213', 'euc_kr',
  34. 'gb2312', 'gb18030', 'hz', 'iso2022_jp', 'iso2022_jp_1', 'iso2022_jp_2',
  35. 'iso2022_jp_2004', 'iso2022_jp_3', 'iso2022_jp_ext', 'iso2022_kr', 'latin_1',
  36. 'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6', 'iso8859_7',
  37. 'iso8859_8', 'iso8859_9', 'iso8859_10', 'iso8859_11', 'iso8859_13',
  38. 'iso8859_14', 'iso8859_15', 'iso8859_16', 'johab', 'koi8_r', 'koi8_t', 'koi8_u',
  39. 'kz1048', 'mac_cyrillic', 'mac_greek', 'mac_iceland', 'mac_latin2', 'mac_roman',
  40. 'mac_turkish', 'ptcp154', 'shift_jis', 'shift_jis_2004', 'shift_jisx0213',
  41. 'utf_32', 'utf_32_be', 'utf_32_le''utf_16_be', 'utf_16_le', 'utf_7'
  42. ]
  43. def find_codec(blob):
  44. global all_codecs
  45. for c in all_codecs:
  46. try:
  47. blob[:1024].decode(c)
  48. return c
  49. except Exception as e:
  50. pass
  51. try:
  52. blob.decode(c)
  53. return c
  54. except Exception as e:
  55. pass
  56. return "utf-8"
  57. QUESTION_PATTERN = [
  58. r"第([零一二三四五六七八九十百0-9]+)问",
  59. r"第([零一二三四五六七八九十百0-9]+)条",
  60. r"[\((]([零一二三四五六七八九十百]+)[\))]",
  61. r"第([0-9]+)问",
  62. r"第([0-9]+)条",
  63. r"([0-9]{1,2})[\. 、]",
  64. r"([零一二三四五六七八九十百]+)[ 、]",
  65. r"[\((]([0-9]{1,2})[\))]",
  66. r"QUESTION (ONE|TWO|THREE|FOUR|FIVE|SIX|SEVEN|EIGHT|NINE|TEN)",
  67. r"QUESTION (I+V?|VI*|XI|IX|X)",
  68. r"QUESTION ([0-9]+)",
  69. ]
  70. def has_qbullet(reg, box, last_box, last_index, last_bull, bull_x0_list):
  71. section, last_section = box['text'], last_box['text']
  72. q_reg = r'(\w|\W)*?(?:?|\?|\n|$)+'
  73. full_reg = reg + q_reg
  74. has_bull = re.match(full_reg, section)
  75. index_str = None
  76. if has_bull:
  77. if 'x0' not in last_box:
  78. last_box['x0'] = box['x0']
  79. if 'top' not in last_box:
  80. last_box['top'] = box['top']
  81. if last_bull and box['x0']-last_box['x0']>10:
  82. return None, last_index
  83. if not last_bull and box['x0'] >= last_box['x0'] and box['top'] - last_box['top'] < 20:
  84. return None, last_index
  85. avg_bull_x0 = 0
  86. if bull_x0_list:
  87. avg_bull_x0 = sum(bull_x0_list) / len(bull_x0_list)
  88. else:
  89. avg_bull_x0 = box['x0']
  90. if box['x0'] - avg_bull_x0 > 10:
  91. return None, last_index
  92. index_str = has_bull.group(1)
  93. index = index_int(index_str)
  94. if last_section[-1] == ':' or last_section[-1] == ':':
  95. return None, last_index
  96. if not last_index or index >= last_index:
  97. bull_x0_list.append(box['x0'])
  98. return has_bull, index
  99. if section[-1] == '?' or section[-1] == '?':
  100. bull_x0_list.append(box['x0'])
  101. return has_bull, index
  102. if box['layout_type'] == 'title':
  103. bull_x0_list.append(box['x0'])
  104. return has_bull, index
  105. pure_section = section.lstrip(re.match(reg, section).group()).lower()
  106. ask_reg = r'(what|when|where|how|why|which|who|whose|为什么|为啥|哪)'
  107. if re.match(ask_reg, pure_section):
  108. bull_x0_list.append(box['x0'])
  109. return has_bull, index
  110. return None, last_index
  111. def index_int(index_str):
  112. res = -1
  113. try:
  114. res=int(index_str)
  115. except ValueError:
  116. try:
  117. res=w2n.word_to_num(index_str)
  118. except ValueError:
  119. try:
  120. res = cn2an(index_str)
  121. except ValueError:
  122. try:
  123. res = r.number(index_str)
  124. except ValueError:
  125. return -1
  126. return res
  127. def qbullets_category(sections):
  128. global QUESTION_PATTERN
  129. hits = [0] * len(QUESTION_PATTERN)
  130. for i, pro in enumerate(QUESTION_PATTERN):
  131. for sec in sections:
  132. if re.match(pro, sec) and not not_bullet(sec):
  133. hits[i] += 1
  134. break
  135. maxium = 0
  136. res = -1
  137. for i, h in enumerate(hits):
  138. if h <= maxium:
  139. continue
  140. res = i
  141. maxium = h
  142. return res, QUESTION_PATTERN[res]
  143. BULLET_PATTERN = [[
  144. r"第[零一二三四五六七八九十百0-9]+(分?编|部分)",
  145. r"第[零一二三四五六七八九十百0-9]+章",
  146. r"第[零一二三四五六七八九十百0-9]+节",
  147. r"第[零一二三四五六七八九十百0-9]+条",
  148. r"[\((][零一二三四五六七八九十百]+[\))]",
  149. ], [
  150. r"第[0-9]+章",
  151. r"第[0-9]+节",
  152. r"[0-9]{,2}[\. 、]",
  153. r"[0-9]{,2}\.[0-9]{,2}[^a-zA-Z/%~-]",
  154. r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
  155. r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
  156. ], [
  157. r"第[零一二三四五六七八九十百0-9]+章",
  158. r"第[零一二三四五六七八九十百0-9]+节",
  159. r"[零一二三四五六七八九十百]+[ 、]",
  160. r"[\((][零一二三四五六七八九十百]+[\))]",
  161. r"[\((][0-9]{,2}[\))]",
  162. ], [
  163. r"PART (ONE|TWO|THREE|FOUR|FIVE|SIX|SEVEN|EIGHT|NINE|TEN)",
  164. r"Chapter (I+V?|VI*|XI|IX|X)",
  165. r"Section [0-9]+",
  166. r"Article [0-9]+"
  167. ]
  168. ]
  169. def random_choices(arr, k):
  170. k = min(len(arr), k)
  171. return random.choices(arr, k=k)
  172. def not_bullet(line):
  173. patt = [
  174. r"0", r"[0-9]+ +[0-9~个只-]", r"[0-9]+\.{2,}"
  175. ]
  176. return any([re.match(r, line) for r in patt])
  177. def bullets_category(sections):
  178. global BULLET_PATTERN
  179. hits = [0] * len(BULLET_PATTERN)
  180. for i, pro in enumerate(BULLET_PATTERN):
  181. for sec in sections:
  182. for p in pro:
  183. if re.match(p, sec) and not not_bullet(sec):
  184. hits[i] += 1
  185. break
  186. maxium = 0
  187. res = -1
  188. for i, h in enumerate(hits):
  189. if h <= maxium:
  190. continue
  191. res = i
  192. maxium = h
  193. return res
  194. def is_english(texts):
  195. eng = 0
  196. if not texts: return False
  197. for t in texts:
  198. if re.match(r"[a-zA-Z]{2,}", t.strip()):
  199. eng += 1
  200. if eng / len(texts) > 0.8:
  201. return True
  202. return False
  203. def tokenize(d, t, eng):
  204. d["content_with_weight"] = t
  205. t = re.sub(r"</?(table|td|caption|tr|th)( [^<>]{0,12})?>", " ", t)
  206. d["content_ltks"] = rag_tokenizer.tokenize(t)
  207. d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
  208. def tokenize_chunks(chunks, doc, eng, pdf_parser=None):
  209. res = []
  210. # wrap up as es documents
  211. for ck in chunks:
  212. if len(ck.strip()) == 0:continue
  213. print("--", ck)
  214. d = copy.deepcopy(doc)
  215. if pdf_parser:
  216. try:
  217. d["image"], poss = pdf_parser.crop(ck, need_position=True)
  218. add_positions(d, poss)
  219. ck = pdf_parser.remove_tag(ck)
  220. except NotImplementedError as e:
  221. pass
  222. tokenize(d, ck, eng)
  223. res.append(d)
  224. return res
  225. def tokenize_chunks_docx(chunks, doc, eng, images):
  226. res = []
  227. # wrap up as es documents
  228. for ck, image in zip(chunks, images):
  229. if len(ck.strip()) == 0:continue
  230. print("--", ck)
  231. d = copy.deepcopy(doc)
  232. d["image"] = image
  233. tokenize(d, ck, eng)
  234. res.append(d)
  235. return res
  236. def tokenize_table(tbls, doc, eng, batch_size=10):
  237. res = []
  238. # add tables
  239. for (img, rows), poss in tbls:
  240. if not rows:
  241. continue
  242. if isinstance(rows, str):
  243. d = copy.deepcopy(doc)
  244. tokenize(d, rows, eng)
  245. d["content_with_weight"] = rows
  246. if img: d["image"] = img
  247. if poss: add_positions(d, poss)
  248. res.append(d)
  249. continue
  250. de = "; " if eng else "; "
  251. for i in range(0, len(rows), batch_size):
  252. d = copy.deepcopy(doc)
  253. r = de.join(rows[i:i + batch_size])
  254. tokenize(d, r, eng)
  255. d["image"] = img
  256. add_positions(d, poss)
  257. res.append(d)
  258. return res
  259. def add_positions(d, poss):
  260. if not poss:
  261. return
  262. d["page_num_int"] = []
  263. d["position_int"] = []
  264. d["top_int"] = []
  265. for pn, left, right, top, bottom in poss:
  266. d["page_num_int"].append(int(pn + 1))
  267. d["top_int"].append(int(top))
  268. d["position_int"].append((int(pn + 1), int(left), int(right), int(top), int(bottom)))
  269. def remove_contents_table(sections, eng=False):
  270. i = 0
  271. while i < len(sections):
  272. def get(i):
  273. nonlocal sections
  274. return (sections[i] if isinstance(sections[i],
  275. type("")) else sections[i][0]).strip()
  276. if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$",
  277. re.sub(r"( | |\u3000)+", "", get(i).split("@@")[0], re.IGNORECASE)):
  278. i += 1
  279. continue
  280. sections.pop(i)
  281. if i >= len(sections):
  282. break
  283. prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
  284. while not prefix:
  285. sections.pop(i)
  286. if i >= len(sections):
  287. break
  288. prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
  289. sections.pop(i)
  290. if i >= len(sections) or not prefix:
  291. break
  292. for j in range(i, min(i + 128, len(sections))):
  293. if not re.match(prefix, get(j)):
  294. continue
  295. for _ in range(i, j):
  296. sections.pop(i)
  297. break
  298. def make_colon_as_title(sections):
  299. if not sections:
  300. return []
  301. if isinstance(sections[0], type("")):
  302. return sections
  303. i = 0
  304. while i < len(sections):
  305. txt, layout = sections[i]
  306. i += 1
  307. txt = txt.split("@")[0].strip()
  308. if not txt:
  309. continue
  310. if txt[-1] not in "::":
  311. continue
  312. txt = txt[::-1]
  313. arr = re.split(r"([。?!!?;;]| \.)", txt)
  314. if len(arr) < 2 or len(arr[1]) < 32:
  315. continue
  316. sections.insert(i - 1, (arr[0][::-1], "title"))
  317. i += 1
  318. def title_frequency(bull, sections):
  319. bullets_size = len(BULLET_PATTERN[bull])
  320. levels = [bullets_size+1 for _ in range(len(sections))]
  321. if not sections or bull < 0:
  322. return bullets_size+1, levels
  323. for i, (txt, layout) in enumerate(sections):
  324. for j, p in enumerate(BULLET_PATTERN[bull]):
  325. if re.match(p, txt.strip()) and not not_bullet(txt):
  326. levels[i] = j
  327. break
  328. else:
  329. if re.search(r"(title|head)", layout) and not not_title(txt.split("@")[0]):
  330. levels[i] = bullets_size
  331. most_level = bullets_size+1
  332. for l, c in sorted(Counter(levels).items(), key=lambda x:x[1]*-1):
  333. if l <= bullets_size:
  334. most_level = l
  335. break
  336. return most_level, levels
  337. def not_title(txt):
  338. if re.match(r"第[零一二三四五六七八九十百0-9]+条", txt):
  339. return False
  340. if len(txt.split(" ")) > 12 or (txt.find(" ") < 0 and len(txt) >= 32):
  341. return True
  342. return re.search(r"[,;,。;!!]", txt)
  343. def hierarchical_merge(bull, sections, depth):
  344. if not sections or bull < 0:
  345. return []
  346. if isinstance(sections[0], type("")):
  347. sections = [(s, "") for s in sections]
  348. sections = [(t, o) for t, o in sections if
  349. t and len(t.split("@")[0].strip()) > 1 and not re.match(r"[0-9]+$", t.split("@")[0].strip())]
  350. bullets_size = len(BULLET_PATTERN[bull])
  351. levels = [[] for _ in range(bullets_size + 2)]
  352. for i, (txt, layout) in enumerate(sections):
  353. for j, p in enumerate(BULLET_PATTERN[bull]):
  354. if re.match(p, txt.strip()):
  355. levels[j].append(i)
  356. break
  357. else:
  358. if re.search(r"(title|head)", layout) and not not_title(txt):
  359. levels[bullets_size].append(i)
  360. else:
  361. levels[bullets_size + 1].append(i)
  362. sections = [t for t, _ in sections]
  363. # for s in sections: print("--", s)
  364. def binary_search(arr, target):
  365. if not arr:
  366. return -1
  367. if target > arr[-1]:
  368. return len(arr) - 1
  369. if target < arr[0]:
  370. return -1
  371. s, e = 0, len(arr)
  372. while e - s > 1:
  373. i = (e + s) // 2
  374. if target > arr[i]:
  375. s = i
  376. continue
  377. elif target < arr[i]:
  378. e = i
  379. continue
  380. else:
  381. assert False
  382. return s
  383. cks = []
  384. readed = [False] * len(sections)
  385. levels = levels[::-1]
  386. for i, arr in enumerate(levels[:depth]):
  387. for j in arr:
  388. if readed[j]:
  389. continue
  390. readed[j] = True
  391. cks.append([j])
  392. if i + 1 == len(levels) - 1:
  393. continue
  394. for ii in range(i + 1, len(levels)):
  395. jj = binary_search(levels[ii], j)
  396. if jj < 0:
  397. continue
  398. if jj > cks[-1][-1]:
  399. cks[-1].pop(-1)
  400. cks[-1].append(levels[ii][jj])
  401. for ii in cks[-1]:
  402. readed[ii] = True
  403. if not cks:
  404. return cks
  405. for i in range(len(cks)):
  406. cks[i] = [sections[j] for j in cks[i][::-1]]
  407. print("--------------\n", "\n* ".join(cks[i]))
  408. res = [[]]
  409. num = [0]
  410. for ck in cks:
  411. if len(ck) == 1:
  412. n = num_tokens_from_string(re.sub(r"@@[0-9]+.*", "", ck[0]))
  413. if n + num[-1] < 218:
  414. res[-1].append(ck[0])
  415. num[-1] += n
  416. continue
  417. res.append(ck)
  418. num.append(n)
  419. continue
  420. res.append(ck)
  421. num.append(218)
  422. return res
  423. def naive_merge(sections, chunk_token_num=128, delimiter="\n。;!?"):
  424. if not sections:
  425. return []
  426. if isinstance(sections[0], type("")):
  427. sections = [(s, "") for s in sections]
  428. cks = [""]
  429. tk_nums = [0]
  430. def add_chunk(t, pos):
  431. nonlocal cks, tk_nums, delimiter
  432. tnum = num_tokens_from_string(t)
  433. if tnum < 8:
  434. pos = ""
  435. # Ensure that the length of the merged chunk does not exceed chunk_token_num
  436. if tk_nums[-1] > chunk_token_num:
  437. if t.find(pos) < 0:
  438. t += pos
  439. cks.append(t)
  440. tk_nums.append(tnum)
  441. else:
  442. if cks[-1].find(pos) < 0:
  443. t += pos
  444. cks[-1] += t
  445. tk_nums[-1] += tnum
  446. for sec, pos in sections:
  447. add_chunk(sec, pos)
  448. continue
  449. s, e = 0, 1
  450. while e < len(sec):
  451. if sec[e] in delimiter:
  452. add_chunk(sec[s: e + 1], pos)
  453. s = e + 1
  454. e = s + 1
  455. else:
  456. e += 1
  457. if s < e:
  458. add_chunk(sec[s: e], pos)
  459. return cks
  460. def docx_question_level(p, bull = -1):
  461. txt = re.sub(r"\u3000", " ", p.text).strip()
  462. if p.style.name.startswith('Heading'):
  463. return int(p.style.name.split(' ')[-1]), txt
  464. else:
  465. if bull < 0:
  466. return 0, txt
  467. for j, title in enumerate(BULLET_PATTERN[bull]):
  468. if re.match(title, txt):
  469. return j+1, txt
  470. return len(BULLET_PATTERN[bull]), txt
  471. def concat_img(img1, img2):
  472. if img1 and not img2:
  473. return img1
  474. if not img1 and img2:
  475. return img2
  476. if not img1 and not img2:
  477. return None
  478. width1, height1 = img1.size
  479. width2, height2 = img2.size
  480. new_width = max(width1, width2)
  481. new_height = height1 + height2
  482. new_image = Image.new('RGB', (new_width, new_height))
  483. new_image.paste(img1, (0, 0))
  484. new_image.paste(img2, (0, height1))
  485. return new_image
  486. def naive_merge_docx(sections, chunk_token_num=128, delimiter="\n。;!?"):
  487. if not sections:
  488. return [], []
  489. cks = [""]
  490. images = [None]
  491. tk_nums = [0]
  492. def add_chunk(t, image, pos=""):
  493. nonlocal cks, tk_nums, delimiter
  494. tnum = num_tokens_from_string(t)
  495. if tnum < 8:
  496. pos = ""
  497. if tk_nums[-1] > chunk_token_num:
  498. if t.find(pos) < 0:
  499. t += pos
  500. cks.append(t)
  501. images.append(image)
  502. tk_nums.append(tnum)
  503. else:
  504. if cks[-1].find(pos) < 0:
  505. t += pos
  506. cks[-1] += t
  507. images[-1] = concat_img(images[-1], image)
  508. tk_nums[-1] += tnum
  509. for sec, image in sections:
  510. add_chunk(sec, image, '')
  511. return cks, images
  512. def keyword_extraction(chat_mdl, content):
  513. prompt = """
  514. You're a question analyzer.
  515. 1. Please give me the most important keyword/phrase of this question.
  516. Answer format: (in language of user's question)
  517. - keyword:
  518. """
  519. kwd = chat_mdl.chat(prompt, [{"role": "user", "content": content}], {"temperature": 0.2})
  520. if isinstance(kwd, tuple): return kwd[0]
  521. return kwd