您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

__init__.py 7.4KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. import copy
  2. from nltk.stem import PorterStemmer
  3. stemmer = PorterStemmer()
  4. import re
  5. from nltk import word_tokenize
  6. from . import huqie
  7. from rag.utils import num_tokens_from_string
  8. import random
  9. BULLET_PATTERN = [[
  10. r"第[零一二三四五六七八九十百0-9]+(分?编|部分)",
  11. r"第[零一二三四五六七八九十百0-9]+章",
  12. r"第[零一二三四五六七八九十百0-9]+节",
  13. r"第[零一二三四五六七八九十百0-9]+条",
  14. r"[\((][零一二三四五六七八九十百]+[\))]",
  15. ], [
  16. r"第[0-9]+章",
  17. r"第[0-9]+节",
  18. r"[0-9]{,3}[\. 、]",
  19. r"[0-9]{,2}\.[0-9]{,2}",
  20. r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
  21. r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
  22. ], [
  23. r"第[零一二三四五六七八九十百0-9]+章",
  24. r"第[零一二三四五六七八九十百0-9]+节",
  25. r"[零一二三四五六七八九十百]+[ 、]",
  26. r"[\((][零一二三四五六七八九十百]+[\))]",
  27. r"[\((][0-9]{,2}[\))]",
  28. ], [
  29. r"PART (ONE|TWO|THREE|FOUR|FIVE|SIX|SEVEN|EIGHT|NINE|TEN)",
  30. r"Chapter (I+V?|VI*|XI|IX|X)",
  31. r"Section [0-9]+",
  32. r"Article [0-9]+"
  33. ]
  34. ]
  35. def random_choices(arr, k):
  36. k = min(len(arr), k)
  37. return random.choices(arr, k=k)
  38. def bullets_category(sections):
  39. global BULLET_PATTERN
  40. hits = [0] * len(BULLET_PATTERN)
  41. for i, pro in enumerate(BULLET_PATTERN):
  42. for sec in sections:
  43. for p in pro:
  44. if re.match(p, sec):
  45. hits[i] += 1
  46. break
  47. maxium = 0
  48. res = -1
  49. for i, h in enumerate(hits):
  50. if h <= maxium: continue
  51. res = i
  52. maxium = h
  53. return res
  54. def is_english(texts):
  55. eng = 0
  56. for t in texts:
  57. if re.match(r"[a-zA-Z]{2,}", t.strip()):
  58. eng += 1
  59. if eng / len(texts) > 0.8:
  60. return True
  61. return False
  62. def tokenize(d, t, eng):
  63. d["content_with_weight"] = t
  64. if eng:
  65. t = re.sub(r"([a-z])-([a-z])", r"\1\2", t)
  66. d["content_ltks"] = " ".join([stemmer.stem(w) for w in word_tokenize(t)])
  67. else:
  68. d["content_ltks"] = huqie.qie(t)
  69. d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
  70. def tokenize_table(tbls, doc, eng, batch_size=10):
  71. res = []
  72. # add tables
  73. for img, rows in tbls:
  74. de = "; " if eng else "; "
  75. for i in range(0, len(rows), batch_size):
  76. d = copy.deepcopy(doc)
  77. r = de.join(rows[i:i + batch_size])
  78. tokenize(d, r, eng)
  79. d["image"] = img
  80. res.append(d)
  81. return res
  82. def remove_contents_table(sections, eng=False):
  83. i = 0
  84. while i < len(sections):
  85. def get(i):
  86. nonlocal sections
  87. return (sections[i] if type(sections[i]) == type("") else sections[i][0]).strip()
  88. if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$",
  89. re.sub(r"( | |\u3000)+", "", get(i).split("@@")[0], re.IGNORECASE)):
  90. i += 1
  91. continue
  92. sections.pop(i)
  93. if i >= len(sections): break
  94. prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
  95. while not prefix:
  96. sections.pop(i)
  97. if i >= len(sections): break
  98. prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
  99. sections.pop(i)
  100. if i >= len(sections) or not prefix: break
  101. for j in range(i, min(i + 128, len(sections))):
  102. if not re.match(prefix, get(j)):
  103. continue
  104. for _ in range(i, j): sections.pop(i)
  105. break
  106. def make_colon_as_title(sections):
  107. if not sections: return []
  108. if type(sections[0]) == type(""): return sections
  109. i = 0
  110. while i < len(sections):
  111. txt, layout = sections[i]
  112. i += 1
  113. txt = txt.split("@")[0].strip()
  114. if not txt:
  115. continue
  116. if txt[-1] not in "::":
  117. continue
  118. txt = txt[::-1]
  119. arr = re.split(r"([。?!!?;;]| .)", txt)
  120. if len(arr) < 2 or len(arr[1]) < 32:
  121. continue
  122. sections.insert(i - 1, (arr[0][::-1], "title"))
  123. i += 1
  124. def hierarchical_merge(bull, sections, depth):
  125. if not sections or bull < 0: return []
  126. if type(sections[0]) == type(""): sections = [(s, "") for s in sections]
  127. sections = [(t,o) for t, o in sections if t and len(t.split("@")[0].strip()) > 1 and not re.match(r"[0-9]+$", t.split("@")[0].strip())]
  128. bullets_size = len(BULLET_PATTERN[bull])
  129. levels = [[] for _ in range(bullets_size + 2)]
  130. def not_title(txt):
  131. if re.match(r"第[零一二三四五六七八九十百0-9]+条", txt): return False
  132. if len(txt) >= 128: return True
  133. return re.search(r"[,;,。;!!]", txt)
  134. for i, (txt, layout) in enumerate(sections):
  135. for j, p in enumerate(BULLET_PATTERN[bull]):
  136. if re.match(p, txt.strip()) and not not_title(txt):
  137. levels[j].append(i)
  138. break
  139. else:
  140. if re.search(r"(title|head)", layout):
  141. levels[bullets_size].append(i)
  142. else:
  143. levels[bullets_size + 1].append(i)
  144. sections = [t for t, _ in sections]
  145. for s in sections: print("--", s)
  146. def binary_search(arr, target):
  147. if not arr: return -1
  148. if target > arr[-1]: return len(arr) - 1
  149. if target < arr[0]: return -1
  150. s, e = 0, len(arr)
  151. while e - s > 1:
  152. i = (e + s) // 2
  153. if target > arr[i]:
  154. s = i
  155. continue
  156. elif target < arr[i]:
  157. e = i
  158. continue
  159. else:
  160. assert False
  161. return s
  162. cks = []
  163. readed = [False] * len(sections)
  164. levels = levels[::-1]
  165. for i, arr in enumerate(levels[:depth]):
  166. for j in arr:
  167. if readed[j]: continue
  168. readed[j] = True
  169. cks.append([j])
  170. if i + 1 == len(levels) - 1: continue
  171. for ii in range(i + 1, len(levels)):
  172. jj = binary_search(levels[ii], j)
  173. if jj < 0: continue
  174. if jj > cks[-1][-1]: cks[-1].pop(-1)
  175. cks[-1].append(levels[ii][jj])
  176. for ii in cks[-1]: readed[ii] = True
  177. for i in range(len(cks)):
  178. cks[i] = [sections[j] for j in cks[i][::-1]]
  179. print("--------------\n", "\n* ".join(cks[i]))
  180. return cks
  181. def naive_merge(sections, chunk_token_num=128, delimiter="\n。;!?"):
  182. if not sections: return []
  183. if type(sections[0]) == type(""): sections = [(s, "") for s in sections]
  184. cks = [""]
  185. tk_nums = [0]
  186. def add_chunk(t, pos):
  187. nonlocal cks, tk_nums, delimiter
  188. tnum = num_tokens_from_string(t)
  189. if tnum < 8: pos = ""
  190. if tk_nums[-1] > chunk_token_num:
  191. if t.find(pos) < 0: t += pos
  192. cks.append(t)
  193. tk_nums.append(tnum)
  194. else:
  195. if cks[-1].find(pos) < 0: t += pos
  196. cks[-1] += t
  197. tk_nums[-1] += tnum
  198. for sec, pos in sections:
  199. s, e = 0, 1
  200. while e < len(sec):
  201. if sec[e] in delimiter:
  202. add_chunk(sec[s: e+1], pos)
  203. s = e + 1
  204. e = s + 1
  205. else:
  206. e += 1
  207. if s < e: add_chunk(sec[s: e], pos)
  208. return cks