選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。

__init__.py 22KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import logging
  17. import random
  18. from collections import Counter
  19. from rag.utils import num_tokens_from_string
  20. from . import rag_tokenizer
  21. import re
  22. import copy
  23. import roman_numbers as r
  24. from word2number import w2n
  25. from cn2an import cn2an
  26. from PIL import Image
  27. import chardet
  28. all_codecs = [
  29. 'utf-8', 'gb2312', 'gbk', 'utf_16', 'ascii', 'big5', 'big5hkscs',
  30. 'cp037', 'cp273', 'cp424', 'cp437',
  31. 'cp500', 'cp720', 'cp737', 'cp775', 'cp850', 'cp852', 'cp855', 'cp856', 'cp857',
  32. 'cp858', 'cp860', 'cp861', 'cp862', 'cp863', 'cp864', 'cp865', 'cp866', 'cp869',
  33. 'cp874', 'cp875', 'cp932', 'cp949', 'cp950', 'cp1006', 'cp1026', 'cp1125',
  34. 'cp1140', 'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255', 'cp1256',
  35. 'cp1257', 'cp1258', 'euc_jp', 'euc_jis_2004', 'euc_jisx0213', 'euc_kr',
  36. 'gb18030', 'hz', 'iso2022_jp', 'iso2022_jp_1', 'iso2022_jp_2',
  37. 'iso2022_jp_2004', 'iso2022_jp_3', 'iso2022_jp_ext', 'iso2022_kr', 'latin_1',
  38. 'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6', 'iso8859_7',
  39. 'iso8859_8', 'iso8859_9', 'iso8859_10', 'iso8859_11', 'iso8859_13',
  40. 'iso8859_14', 'iso8859_15', 'iso8859_16', 'johab', 'koi8_r', 'koi8_t', 'koi8_u',
  41. 'kz1048', 'mac_cyrillic', 'mac_greek', 'mac_iceland', 'mac_latin2', 'mac_roman',
  42. 'mac_turkish', 'ptcp154', 'shift_jis', 'shift_jis_2004', 'shift_jisx0213',
  43. 'utf_32', 'utf_32_be', 'utf_32_le', 'utf_16_be', 'utf_16_le', 'utf_7', 'windows-1250', 'windows-1251',
  44. 'windows-1252', 'windows-1253', 'windows-1254', 'windows-1255', 'windows-1256',
  45. 'windows-1257', 'windows-1258', 'latin-2'
  46. ]
  47. def find_codec(blob):
  48. detected = chardet.detect(blob[:1024])
  49. if detected['confidence'] > 0.5:
  50. if detected['encoding'] == "ascii":
  51. return "utf-8"
  52. for c in all_codecs:
  53. try:
  54. blob[:1024].decode(c)
  55. return c
  56. except Exception:
  57. pass
  58. try:
  59. blob.decode(c)
  60. return c
  61. except Exception:
  62. pass
  63. return "utf-8"
  64. QUESTION_PATTERN = [
  65. r"第([零一二三四五六七八九十百0-9]+)问",
  66. r"第([零一二三四五六七八九十百0-9]+)条",
  67. r"[\((]([零一二三四五六七八九十百]+)[\))]",
  68. r"第([0-9]+)问",
  69. r"第([0-9]+)条",
  70. r"([0-9]{1,2})[\. 、]",
  71. r"([零一二三四五六七八九十百]+)[ 、]",
  72. r"[\((]([0-9]{1,2})[\))]",
  73. r"QUESTION (ONE|TWO|THREE|FOUR|FIVE|SIX|SEVEN|EIGHT|NINE|TEN)",
  74. r"QUESTION (I+V?|VI*|XI|IX|X)",
  75. r"QUESTION ([0-9]+)",
  76. ]
  77. def has_qbullet(reg, box, last_box, last_index, last_bull, bull_x0_list):
  78. section, last_section = box['text'], last_box['text']
  79. q_reg = r'(\w|\W)*?(?:?|\?|\n|$)+'
  80. full_reg = reg + q_reg
  81. has_bull = re.match(full_reg, section)
  82. index_str = None
  83. if has_bull:
  84. if 'x0' not in last_box:
  85. last_box['x0'] = box['x0']
  86. if 'top' not in last_box:
  87. last_box['top'] = box['top']
  88. if last_bull and box['x0'] - last_box['x0'] > 10:
  89. return None, last_index
  90. if not last_bull and box['x0'] >= last_box['x0'] and box['top'] - last_box['top'] < 20:
  91. return None, last_index
  92. avg_bull_x0 = 0
  93. if bull_x0_list:
  94. avg_bull_x0 = sum(bull_x0_list) / len(bull_x0_list)
  95. else:
  96. avg_bull_x0 = box['x0']
  97. if box['x0'] - avg_bull_x0 > 10:
  98. return None, last_index
  99. index_str = has_bull.group(1)
  100. index = index_int(index_str)
  101. if last_section[-1] == ':' or last_section[-1] == ':':
  102. return None, last_index
  103. if not last_index or index >= last_index:
  104. bull_x0_list.append(box['x0'])
  105. return has_bull, index
  106. if section[-1] == '?' or section[-1] == '?':
  107. bull_x0_list.append(box['x0'])
  108. return has_bull, index
  109. if box['layout_type'] == 'title':
  110. bull_x0_list.append(box['x0'])
  111. return has_bull, index
  112. pure_section = section.lstrip(re.match(reg, section).group()).lower()
  113. ask_reg = r'(what|when|where|how|why|which|who|whose|为什么|为啥|哪)'
  114. if re.match(ask_reg, pure_section):
  115. bull_x0_list.append(box['x0'])
  116. return has_bull, index
  117. return None, last_index
  118. def index_int(index_str):
  119. res = -1
  120. try:
  121. res = int(index_str)
  122. except ValueError:
  123. try:
  124. res = w2n.word_to_num(index_str)
  125. except ValueError:
  126. try:
  127. res = cn2an(index_str)
  128. except ValueError:
  129. try:
  130. res = r.number(index_str)
  131. except ValueError:
  132. return -1
  133. return res
  134. def qbullets_category(sections):
  135. global QUESTION_PATTERN
  136. hits = [0] * len(QUESTION_PATTERN)
  137. for i, pro in enumerate(QUESTION_PATTERN):
  138. for sec in sections:
  139. if re.match(pro, sec) and not not_bullet(sec):
  140. hits[i] += 1
  141. break
  142. maxium = 0
  143. res = -1
  144. for i, h in enumerate(hits):
  145. if h <= maxium:
  146. continue
  147. res = i
  148. maxium = h
  149. return res, QUESTION_PATTERN[res]
  150. BULLET_PATTERN = [[
  151. r"第[零一二三四五六七八九十百0-9]+(分?编|部分)",
  152. r"第[零一二三四五六七八九十百0-9]+章",
  153. r"第[零一二三四五六七八九十百0-9]+节",
  154. r"第[零一二三四五六七八九十百0-9]+条",
  155. r"[\((][零一二三四五六七八九十百]+[\))]",
  156. ], [
  157. r"第[0-9]+章",
  158. r"第[0-9]+节",
  159. r"[0-9]{,2}[\. 、]",
  160. r"[0-9]{,2}\.[0-9]{,2}[^a-zA-Z/%~-]",
  161. r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
  162. r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
  163. ], [
  164. r"第[零一二三四五六七八九十百0-9]+章",
  165. r"第[零一二三四五六七八九十百0-9]+节",
  166. r"[零一二三四五六七八九十百]+[ 、]",
  167. r"[\((][零一二三四五六七八九十百]+[\))]",
  168. r"[\((][0-9]{,2}[\))]",
  169. ], [
  170. r"PART (ONE|TWO|THREE|FOUR|FIVE|SIX|SEVEN|EIGHT|NINE|TEN)",
  171. r"Chapter (I+V?|VI*|XI|IX|X)",
  172. r"Section [0-9]+",
  173. r"Article [0-9]+"
  174. ]
  175. ]
  176. def random_choices(arr, k):
  177. k = min(len(arr), k)
  178. return random.choices(arr, k=k)
  179. def not_bullet(line):
  180. patt = [
  181. r"0", r"[0-9]+ +[0-9~个只-]", r"[0-9]+\.{2,}"
  182. ]
  183. return any([re.match(r, line) for r in patt])
  184. def bullets_category(sections):
  185. global BULLET_PATTERN
  186. hits = [0] * len(BULLET_PATTERN)
  187. for i, pro in enumerate(BULLET_PATTERN):
  188. for sec in sections:
  189. sec = sec.strip()
  190. for p in pro:
  191. if re.match(p, sec) and not not_bullet(sec):
  192. hits[i] += 1
  193. break
  194. maxium = 0
  195. res = -1
  196. for i, h in enumerate(hits):
  197. if h <= maxium:
  198. continue
  199. res = i
  200. maxium = h
  201. return res
  202. def is_english(texts):
  203. if not texts:
  204. return False
  205. pattern = re.compile(r"[`a-zA-Z0-9\s.,':;/\"?<>!\(\)\-]")
  206. if isinstance(texts, str):
  207. texts = list(texts)
  208. elif isinstance(texts, list):
  209. texts = [t for t in texts if isinstance(t, str) and t.strip()]
  210. else:
  211. return False
  212. if not texts:
  213. return False
  214. eng = sum(1 for t in texts if pattern.fullmatch(t.strip()))
  215. return (eng / len(texts)) > 0.8
  216. def is_chinese(text):
  217. if not text:
  218. return False
  219. chinese = 0
  220. for ch in text:
  221. if '\u4e00' <= ch <= '\u9fff':
  222. chinese += 1
  223. if chinese / len(text) > 0.2:
  224. return True
  225. return False
  226. def tokenize(d, t, eng):
  227. d["content_with_weight"] = t
  228. t = re.sub(r"</?(table|td|caption|tr|th)( [^<>]{0,12})?>", " ", t)
  229. d["content_ltks"] = rag_tokenizer.tokenize(t)
  230. d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
  231. def tokenize_chunks(chunks, doc, eng, pdf_parser=None):
  232. res = []
  233. # wrap up as es documents
  234. for ii, ck in enumerate(chunks):
  235. if len(ck.strip()) == 0:
  236. continue
  237. logging.debug("-- {}".format(ck))
  238. d = copy.deepcopy(doc)
  239. if pdf_parser:
  240. try:
  241. d["image"], poss = pdf_parser.crop(ck, need_position=True)
  242. add_positions(d, poss)
  243. ck = pdf_parser.remove_tag(ck)
  244. except NotImplementedError:
  245. pass
  246. else:
  247. add_positions(d, [[ii]*5])
  248. tokenize(d, ck, eng)
  249. res.append(d)
  250. return res
  251. def tokenize_chunks_with_images(chunks, doc, eng, images):
  252. res = []
  253. # wrap up as es documents
  254. for ii, (ck, image) in enumerate(zip(chunks, images)):
  255. if len(ck.strip()) == 0:
  256. continue
  257. logging.debug("-- {}".format(ck))
  258. d = copy.deepcopy(doc)
  259. d["image"] = image
  260. add_positions(d, [[ii]*5])
  261. tokenize(d, ck, eng)
  262. res.append(d)
  263. return res
  264. def tokenize_table(tbls, doc, eng, batch_size=10):
  265. res = []
  266. # add tables
  267. for (img, rows), poss in tbls:
  268. if not rows:
  269. continue
  270. if isinstance(rows, str):
  271. d = copy.deepcopy(doc)
  272. tokenize(d, rows, eng)
  273. d["content_with_weight"] = rows
  274. if img:
  275. d["image"] = img
  276. d["doc_type_kwd"] = "image"
  277. if poss:
  278. add_positions(d, poss)
  279. res.append(d)
  280. continue
  281. de = "; " if eng else "; "
  282. for i in range(0, len(rows), batch_size):
  283. d = copy.deepcopy(doc)
  284. r = de.join(rows[i:i + batch_size])
  285. tokenize(d, r, eng)
  286. if img:
  287. d["image"] = img
  288. d["doc_type_kwd"] = "image"
  289. add_positions(d, poss)
  290. res.append(d)
  291. return res
  292. def add_positions(d, poss):
  293. if not poss:
  294. return
  295. page_num_int = []
  296. position_int = []
  297. top_int = []
  298. for pn, left, right, top, bottom in poss:
  299. page_num_int.append(int(pn + 1))
  300. top_int.append(int(top))
  301. position_int.append((int(pn + 1), int(left), int(right), int(top), int(bottom)))
  302. d["page_num_int"] = page_num_int
  303. d["position_int"] = position_int
  304. d["top_int"] = top_int
  305. def remove_contents_table(sections, eng=False):
  306. i = 0
  307. while i < len(sections):
  308. def get(i):
  309. nonlocal sections
  310. return (sections[i] if isinstance(sections[i],
  311. type("")) else sections[i][0]).strip()
  312. if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$",
  313. re.sub(r"( | |\u3000)+", "", get(i).split("@@")[0], flags=re.IGNORECASE)):
  314. i += 1
  315. continue
  316. sections.pop(i)
  317. if i >= len(sections):
  318. break
  319. prefix = get(i)[:3] if not eng else " ".join(get(i).split()[:2])
  320. while not prefix:
  321. sections.pop(i)
  322. if i >= len(sections):
  323. break
  324. prefix = get(i)[:3] if not eng else " ".join(get(i).split()[:2])
  325. sections.pop(i)
  326. if i >= len(sections) or not prefix:
  327. break
  328. for j in range(i, min(i + 128, len(sections))):
  329. if not re.match(prefix, get(j)):
  330. continue
  331. for _ in range(i, j):
  332. sections.pop(i)
  333. break
  334. def make_colon_as_title(sections):
  335. if not sections:
  336. return []
  337. if isinstance(sections[0], type("")):
  338. return sections
  339. i = 0
  340. while i < len(sections):
  341. txt, layout = sections[i]
  342. i += 1
  343. txt = txt.split("@")[0].strip()
  344. if not txt:
  345. continue
  346. if txt[-1] not in "::":
  347. continue
  348. txt = txt[::-1]
  349. arr = re.split(r"([。?!!?;;]| \.)", txt)
  350. if len(arr) < 2 or len(arr[1]) < 32:
  351. continue
  352. sections.insert(i - 1, (arr[0][::-1], "title"))
  353. i += 1
  354. def title_frequency(bull, sections):
  355. bullets_size = len(BULLET_PATTERN[bull])
  356. levels = [bullets_size + 1 for _ in range(len(sections))]
  357. if not sections or bull < 0:
  358. return bullets_size + 1, levels
  359. for i, (txt, layout) in enumerate(sections):
  360. for j, p in enumerate(BULLET_PATTERN[bull]):
  361. if re.match(p, txt.strip()) and not not_bullet(txt):
  362. levels[i] = j
  363. break
  364. else:
  365. if re.search(r"(title|head)", layout) and not not_title(txt.split("@")[0]):
  366. levels[i] = bullets_size
  367. most_level = bullets_size + 1
  368. for level, c in sorted(Counter(levels).items(), key=lambda x: x[1] * -1):
  369. if level <= bullets_size:
  370. most_level = level
  371. break
  372. return most_level, levels
  373. def not_title(txt):
  374. if re.match(r"第[零一二三四五六七八九十百0-9]+条", txt):
  375. return False
  376. if len(txt.split()) > 12 or (txt.find(" ") < 0 and len(txt) >= 32):
  377. return True
  378. return re.search(r"[,;,。;!!]", txt)
  379. def hierarchical_merge(bull, sections, depth):
  380. if not sections or bull < 0:
  381. return []
  382. if isinstance(sections[0], type("")):
  383. sections = [(s, "") for s in sections]
  384. sections = [(t, o) for t, o in sections if
  385. t and len(t.split("@")[0].strip()) > 1 and not re.match(r"[0-9]+$", t.split("@")[0].strip())]
  386. bullets_size = len(BULLET_PATTERN[bull])
  387. levels = [[] for _ in range(bullets_size + 2)]
  388. for i, (txt, layout) in enumerate(sections):
  389. for j, p in enumerate(BULLET_PATTERN[bull]):
  390. if re.match(p, txt.strip()):
  391. levels[j].append(i)
  392. break
  393. else:
  394. if re.search(r"(title|head)", layout) and not not_title(txt):
  395. levels[bullets_size].append(i)
  396. else:
  397. levels[bullets_size + 1].append(i)
  398. sections = [t for t, _ in sections]
  399. # for s in sections: print("--", s)
  400. def binary_search(arr, target):
  401. if not arr:
  402. return -1
  403. if target > arr[-1]:
  404. return len(arr) - 1
  405. if target < arr[0]:
  406. return -1
  407. s, e = 0, len(arr)
  408. while e - s > 1:
  409. i = (e + s) // 2
  410. if target > arr[i]:
  411. s = i
  412. continue
  413. elif target < arr[i]:
  414. e = i
  415. continue
  416. else:
  417. assert False
  418. return s
  419. cks = []
  420. readed = [False] * len(sections)
  421. levels = levels[::-1]
  422. for i, arr in enumerate(levels[:depth]):
  423. for j in arr:
  424. if readed[j]:
  425. continue
  426. readed[j] = True
  427. cks.append([j])
  428. if i + 1 == len(levels) - 1:
  429. continue
  430. for ii in range(i + 1, len(levels)):
  431. jj = binary_search(levels[ii], j)
  432. if jj < 0:
  433. continue
  434. if levels[ii][jj] > cks[-1][-1]:
  435. cks[-1].pop(-1)
  436. cks[-1].append(levels[ii][jj])
  437. for ii in cks[-1]:
  438. readed[ii] = True
  439. if not cks:
  440. return cks
  441. for i in range(len(cks)):
  442. cks[i] = [sections[j] for j in cks[i][::-1]]
  443. logging.debug("\n* ".join(cks[i]))
  444. res = [[]]
  445. num = [0]
  446. for ck in cks:
  447. if len(ck) == 1:
  448. n = num_tokens_from_string(re.sub(r"@@[0-9]+.*", "", ck[0]))
  449. if n + num[-1] < 218:
  450. res[-1].append(ck[0])
  451. num[-1] += n
  452. continue
  453. res.append(ck)
  454. num.append(n)
  455. continue
  456. res.append(ck)
  457. num.append(218)
  458. return res
  459. def naive_merge(sections, chunk_token_num=128, delimiter="\n。;!?", overlapped_percent=0):
  460. from deepdoc.parser.pdf_parser import RAGFlowPdfParser
  461. if not sections:
  462. return []
  463. if isinstance(sections[0], type("")):
  464. sections = [(s, "") for s in sections]
  465. cks = [""]
  466. tk_nums = [0]
  467. def add_chunk(t, pos):
  468. nonlocal cks, tk_nums, delimiter
  469. tnum = num_tokens_from_string(t)
  470. if not pos:
  471. pos = ""
  472. if tnum < 8:
  473. pos = ""
  474. # Ensure that the length of the merged chunk does not exceed chunk_token_num
  475. if cks[-1] == "" or tk_nums[-1] > chunk_token_num * (100 - overlapped_percent)/100.:
  476. if cks:
  477. overlapped = RAGFlowPdfParser.remove_tag(cks[-1])
  478. t = overlapped[int(len(overlapped)*(100-overlapped_percent)/100.):] + t
  479. if t.find(pos) < 0:
  480. t += pos
  481. cks.append(t)
  482. tk_nums.append(tnum)
  483. else:
  484. if cks[-1].find(pos) < 0:
  485. t += pos
  486. cks[-1] += t
  487. tk_nums[-1] += tnum
  488. dels = get_delimiters(delimiter)
  489. for sec, pos in sections:
  490. if num_tokens_from_string(sec) < chunk_token_num:
  491. add_chunk(sec, pos)
  492. continue
  493. splited_sec = re.split(r"(%s)" % dels, sec, flags=re.DOTALL)
  494. for sub_sec in splited_sec:
  495. if re.match(f"^{dels}$", sub_sec):
  496. continue
  497. add_chunk(sub_sec, pos)
  498. return cks
  499. def naive_merge_with_images(texts, images, chunk_token_num=128, delimiter="\n。;!?"):
  500. if not texts or len(texts) != len(images):
  501. return [], []
  502. cks = [""]
  503. result_images = [None]
  504. tk_nums = [0]
  505. def add_chunk(t, image, pos=""):
  506. nonlocal cks, result_images, tk_nums, delimiter
  507. tnum = num_tokens_from_string(t)
  508. if not pos:
  509. pos = ""
  510. if tnum < 8:
  511. pos = ""
  512. # Ensure that the length of the merged chunk does not exceed chunk_token_num
  513. if cks[-1] == "" or tk_nums[-1] > chunk_token_num:
  514. if t.find(pos) < 0:
  515. t += pos
  516. cks.append(t)
  517. result_images.append(image)
  518. tk_nums.append(tnum)
  519. else:
  520. if cks[-1].find(pos) < 0:
  521. t += pos
  522. cks[-1] += t
  523. if result_images[-1] is None:
  524. result_images[-1] = image
  525. else:
  526. result_images[-1] = concat_img(result_images[-1], image)
  527. tk_nums[-1] += tnum
  528. dels = get_delimiters(delimiter)
  529. for text, image in zip(texts, images):
  530. # if text is tuple, unpack it
  531. if isinstance(text, tuple):
  532. text_str = text[0]
  533. text_pos = text[1] if len(text) > 1 else ""
  534. splited_sec = re.split(r"(%s)" % dels, text_str)
  535. for sub_sec in splited_sec:
  536. if re.match(f"^{dels}$", sub_sec):
  537. continue
  538. add_chunk(sub_sec, image, text_pos)
  539. else:
  540. splited_sec = re.split(r"(%s)" % dels, text)
  541. for sub_sec in splited_sec:
  542. if re.match(f"^{dels}$", sub_sec):
  543. continue
  544. add_chunk(sub_sec, image)
  545. return cks, result_images
  546. def docx_question_level(p, bull=-1):
  547. txt = re.sub(r"\u3000", " ", p.text).strip()
  548. if p.style.name.startswith('Heading'):
  549. return int(p.style.name.split(' ')[-1]), txt
  550. else:
  551. if bull < 0:
  552. return 0, txt
  553. for j, title in enumerate(BULLET_PATTERN[bull]):
  554. if re.match(title, txt):
  555. return j + 1, txt
  556. return len(BULLET_PATTERN[bull]), txt
  557. def concat_img(img1, img2):
  558. if img1 and not img2:
  559. return img1
  560. if not img1 and img2:
  561. return img2
  562. if not img1 and not img2:
  563. return None
  564. if img1 is img2:
  565. return img1
  566. if isinstance(img1, Image.Image) and isinstance(img2, Image.Image):
  567. pixel_data1 = img1.tobytes()
  568. pixel_data2 = img2.tobytes()
  569. if pixel_data1 == pixel_data2:
  570. img2.close()
  571. return img1
  572. width1, height1 = img1.size
  573. width2, height2 = img2.size
  574. new_width = max(width1, width2)
  575. new_height = height1 + height2
  576. new_image = Image.new('RGB', (new_width, new_height))
  577. new_image.paste(img1, (0, 0))
  578. new_image.paste(img2, (0, height1))
  579. img1.close()
  580. img2.close()
  581. return new_image
  582. def naive_merge_docx(sections, chunk_token_num=128, delimiter="\n。;!?"):
  583. if not sections:
  584. return [], []
  585. cks = [""]
  586. images = [None]
  587. tk_nums = [0]
  588. def add_chunk(t, image, pos=""):
  589. nonlocal cks, tk_nums, delimiter
  590. tnum = num_tokens_from_string(t)
  591. if tnum < 8:
  592. pos = ""
  593. if cks[-1] == "" or tk_nums[-1] > chunk_token_num:
  594. if t.find(pos) < 0:
  595. t += pos
  596. cks.append(t)
  597. images.append(image)
  598. tk_nums.append(tnum)
  599. else:
  600. if cks[-1].find(pos) < 0:
  601. t += pos
  602. cks[-1] += t
  603. images[-1] = concat_img(images[-1], image)
  604. tk_nums[-1] += tnum
  605. dels = get_delimiters(delimiter)
  606. for sec, image in sections:
  607. splited_sec = re.split(r"(%s)" % dels, sec)
  608. for sub_sec in splited_sec:
  609. if re.match(f"^{dels}$", sub_sec):
  610. continue
  611. add_chunk(sub_sec, image,"")
  612. return cks, images
  613. def extract_between(text: str, start_tag: str, end_tag: str) -> list[str]:
  614. pattern = re.escape(start_tag) + r"(.*?)" + re.escape(end_tag)
  615. return re.findall(pattern, text, flags=re.DOTALL)
  616. def get_delimiters(delimiters: str):
  617. dels = []
  618. s = 0
  619. for m in re.finditer(r"`([^`]+)`", delimiters, re.I):
  620. f, t = m.span()
  621. dels.append(m.group(1))
  622. dels.extend(list(delimiters[s: f]))
  623. s = t
  624. if s < len(delimiters):
  625. dels.extend(list(delimiters[s:]))
  626. dels.sort(key=lambda x: -len(x))
  627. dels = [re.escape(d) for d in dels if d]
  628. dels = [d for d in dels if d]
  629. dels_pattern = "|".join(dels)
  630. return dels_pattern