Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. #
  2. # Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import copy
  17. import re
  18. from io import BytesIO
  19. from xpinyin import Pinyin
  20. import numpy as np
  21. import pandas as pd
  22. from collections import Counter
  23. # from openpyxl import load_workbook, Workbook
  24. from dateutil.parser import parse as datetime_parse
  25. from api.db.services.knowledgebase_service import KnowledgebaseService
  26. from deepdoc.parser.utils import get_text
  27. from rag.nlp import rag_tokenizer, tokenize
  28. from deepdoc.parser import ExcelParser
  29. class Excel(ExcelParser):
  30. def __call__(self, fnm, binary=None, from_page=0, to_page=10000000000, callback=None):
  31. if not binary:
  32. wb = Excel._load_excel_to_workbook(fnm)
  33. else:
  34. wb = Excel._load_excel_to_workbook(BytesIO(binary))
  35. total = 0
  36. for sheetname in wb.sheetnames:
  37. total += len(list(wb[sheetname].rows))
  38. res, fails, done = [], [], 0
  39. rn = 0
  40. for sheetname in wb.sheetnames:
  41. ws = wb[sheetname]
  42. rows = list(ws.rows)
  43. if not rows:
  44. continue
  45. headers, header_rows = self._parse_headers(ws, rows)
  46. if not headers:
  47. continue
  48. data = []
  49. for i, r in enumerate(rows[header_rows:]):
  50. rn += 1
  51. if rn - 1 < from_page:
  52. continue
  53. if rn - 1 >= to_page:
  54. break
  55. row_data = self._extract_row_data(ws, r, header_rows + i, len(headers))
  56. if row_data is None:
  57. fails.append(str(i))
  58. continue
  59. if self._is_empty_row(row_data):
  60. continue
  61. data.append(row_data)
  62. done += 1
  63. if len(data) == 0:
  64. continue
  65. df = pd.DataFrame(data, columns=headers)
  66. res.append(df)
  67. callback(0.3, ("Extract records: {}~{}".format(from_page + 1, min(to_page, from_page + rn)) + (f"{len(fails)} failure, line: %s..." % (",".join(fails[:3])) if fails else "")))
  68. return res
  69. def _parse_headers(self, ws, rows):
  70. if len(rows) == 0:
  71. return [], 0
  72. has_complex_structure = self._has_complex_header_structure(ws, rows)
  73. if has_complex_structure:
  74. return self._parse_multi_level_headers(ws, rows)
  75. else:
  76. return self._parse_simple_headers(rows)
  77. def _has_complex_header_structure(self, ws, rows):
  78. if len(rows) < 1:
  79. return False
  80. merged_ranges = list(ws.merged_cells.ranges)
  81. # 检查前两行是否涉及合并单元格
  82. for rng in merged_ranges:
  83. if rng.min_row <= 2: # 只要合并区域涉及第1或第2行
  84. return True
  85. return False
  86. def _row_looks_like_header(self, row):
  87. header_like_cells = 0
  88. data_like_cells = 0
  89. non_empty_cells = 0
  90. for cell in row:
  91. if cell.value is not None:
  92. non_empty_cells += 1
  93. val = str(cell.value).strip()
  94. if self._looks_like_header(val):
  95. header_like_cells += 1
  96. elif self._looks_like_data(val):
  97. data_like_cells += 1
  98. if non_empty_cells == 0:
  99. return False
  100. return header_like_cells >= data_like_cells
  101. def _parse_simple_headers(self, rows):
  102. if not rows:
  103. return [], 0
  104. header_row = rows[0]
  105. headers = []
  106. for cell in header_row:
  107. if cell.value is not None:
  108. header_value = str(cell.value).strip()
  109. if header_value:
  110. headers.append(header_value)
  111. else:
  112. pass
  113. final_headers = []
  114. for i, cell in enumerate(header_row):
  115. if cell.value is not None:
  116. header_value = str(cell.value).strip()
  117. if header_value:
  118. final_headers.append(header_value)
  119. else:
  120. final_headers.append(f"Column_{i + 1}")
  121. else:
  122. final_headers.append(f"Column_{i + 1}")
  123. return final_headers, 1
  124. def _parse_multi_level_headers(self, ws, rows):
  125. if len(rows) < 2:
  126. return [], 0
  127. header_rows = self._detect_header_rows(rows)
  128. if header_rows == 1:
  129. return self._parse_simple_headers(rows)
  130. else:
  131. return self._build_hierarchical_headers(ws, rows, header_rows), header_rows
  132. def _detect_header_rows(self, rows):
  133. if len(rows) < 2:
  134. return 1
  135. header_rows = 1
  136. max_check_rows = min(5, len(rows))
  137. for i in range(1, max_check_rows):
  138. row = rows[i]
  139. if self._row_looks_like_header(row):
  140. header_rows = i + 1
  141. else:
  142. break
  143. return header_rows
  144. def _looks_like_header(self, value):
  145. if len(value) < 1:
  146. return False
  147. if any(ord(c) > 127 for c in value):
  148. return True
  149. if len([c for c in value if c.isalpha()]) >= 2:
  150. return True
  151. if any(c in value for c in ["(", ")", ":", ":", "(", ")", "_", "-"]):
  152. return True
  153. return False
  154. def _looks_like_data(self, value):
  155. if len(value) == 1 and value.upper() in ["Y", "N", "M", "X", "/", "-"]:
  156. return True
  157. if value.replace(".", "").replace("-", "").replace(",", "").isdigit():
  158. return True
  159. if value.startswith("0x") and len(value) <= 10:
  160. return True
  161. return False
  162. def _build_hierarchical_headers(self, ws, rows, header_rows):
  163. headers = []
  164. max_col = max(len(row) for row in rows[:header_rows]) if header_rows > 0 else 0
  165. merged_ranges = list(ws.merged_cells.ranges)
  166. for col_idx in range(max_col):
  167. header_parts = []
  168. for row_idx in range(header_rows):
  169. if col_idx < len(rows[row_idx]):
  170. cell_value = rows[row_idx][col_idx].value
  171. merged_value = self._get_merged_cell_value(ws, row_idx + 1, col_idx + 1, merged_ranges)
  172. if merged_value is not None:
  173. cell_value = merged_value
  174. if cell_value is not None:
  175. cell_value = str(cell_value).strip()
  176. if cell_value and cell_value not in header_parts and self._is_valid_header_part(cell_value):
  177. header_parts.append(cell_value)
  178. if header_parts:
  179. header = "-".join(header_parts)
  180. headers.append(header)
  181. else:
  182. headers.append(f"Column_{col_idx + 1}")
  183. final_headers = [h for h in headers if h and h != "-"]
  184. return final_headers
  185. def _is_valid_header_part(self, value):
  186. if len(value) == 1 and value.upper() in ["Y", "N", "M", "X"]:
  187. return False
  188. if value.replace(".", "").replace("-", "").replace(",", "").isdigit():
  189. return False
  190. if value in ["/", "-", "+", "*", "="]:
  191. return False
  192. return True
  193. def _get_merged_cell_value(self, ws, row, col, merged_ranges):
  194. for merged_range in merged_ranges:
  195. if merged_range.min_row <= row <= merged_range.max_row and merged_range.min_col <= col <= merged_range.max_col:
  196. return ws.cell(merged_range.min_row, merged_range.min_col).value
  197. return None
  198. def _extract_row_data(self, ws, row, absolute_row_idx, expected_cols):
  199. row_data = []
  200. merged_ranges = list(ws.merged_cells.ranges)
  201. actual_row_num = absolute_row_idx + 1
  202. for col_idx in range(expected_cols):
  203. cell_value = None
  204. actual_col_num = col_idx + 1
  205. try:
  206. cell_value = ws.cell(row=actual_row_num, column=actual_col_num).value
  207. except ValueError:
  208. if col_idx < len(row):
  209. cell_value = row[col_idx].value
  210. if cell_value is None:
  211. merged_value = self._get_merged_cell_value(ws, actual_row_num, actual_col_num, merged_ranges)
  212. if merged_value is not None:
  213. cell_value = merged_value
  214. else:
  215. cell_value = self._get_inherited_value(ws, actual_row_num, actual_col_num, merged_ranges)
  216. row_data.append(cell_value)
  217. return row_data
  218. def _get_inherited_value(self, ws, row, col, merged_ranges):
  219. for merged_range in merged_ranges:
  220. if merged_range.min_row <= row <= merged_range.max_row and merged_range.min_col <= col <= merged_range.max_col:
  221. return ws.cell(merged_range.min_row, merged_range.min_col).value
  222. return None
  223. def _is_empty_row(self, row_data):
  224. for val in row_data:
  225. if val is not None and str(val).strip() != "":
  226. return False
  227. return True
  228. def trans_datatime(s):
  229. try:
  230. return datetime_parse(s.strip()).strftime("%Y-%m-%d %H:%M:%S")
  231. except Exception:
  232. pass
  233. def trans_bool(s):
  234. if re.match(r"(true|yes|是|\*|✓|✔|☑|✅|√)$", str(s).strip(), flags=re.IGNORECASE):
  235. return "yes"
  236. if re.match(r"(false|no|否|⍻|×)$", str(s).strip(), flags=re.IGNORECASE):
  237. return "no"
  238. def column_data_type(arr):
  239. arr = list(arr)
  240. counts = {"int": 0, "float": 0, "text": 0, "datetime": 0, "bool": 0}
  241. trans = {t: f for f, t in [(int, "int"), (float, "float"), (trans_datatime, "datetime"), (trans_bool, "bool"), (str, "text")]}
  242. float_flag = False
  243. for a in arr:
  244. if a is None:
  245. continue
  246. if re.match(r"[+-]?[0-9]+$", str(a).replace("%%", "")) and not str(a).replace("%%", "").startswith("0"):
  247. counts["int"] += 1
  248. if int(str(a)) > 2**63 - 1:
  249. float_flag = True
  250. break
  251. elif re.match(r"[+-]?[0-9.]{,19}$", str(a).replace("%%", "")) and not str(a).replace("%%", "").startswith("0"):
  252. counts["float"] += 1
  253. elif re.match(r"(true|yes|是|\*|✓|✔|☑|✅|√|false|no|否|⍻|×)$", str(a), flags=re.IGNORECASE):
  254. counts["bool"] += 1
  255. elif trans_datatime(str(a)):
  256. counts["datetime"] += 1
  257. else:
  258. counts["text"] += 1
  259. if float_flag:
  260. ty = "float"
  261. else:
  262. counts = sorted(counts.items(), key=lambda x: x[1] * -1)
  263. ty = counts[0][0]
  264. for i in range(len(arr)):
  265. if arr[i] is None:
  266. continue
  267. try:
  268. arr[i] = trans[ty](str(arr[i]))
  269. except Exception:
  270. arr[i] = None
  271. # if ty == "text":
  272. # if len(arr) > 128 and uni / len(arr) < 0.1:
  273. # ty = "keyword"
  274. return arr, ty
  275. def chunk(filename, binary=None, from_page=0, to_page=10000000000, lang="Chinese", callback=None, **kwargs):
  276. """
  277. Excel and csv(txt) format files are supported.
  278. For csv or txt file, the delimiter between columns is TAB.
  279. The first line must be column headers.
  280. Column headers must be meaningful terms inorder to make our NLP model understanding.
  281. It's good to enumerate some synonyms using slash '/' to separate, and even better to
  282. enumerate values using brackets like 'gender/sex(male, female)'.
  283. Here are some examples for headers:
  284. 1. supplier/vendor\tcolor(yellow, red, brown)\tgender/sex(male, female)\tsize(M,L,XL,XXL)
  285. 2. 姓名/名字\t电话/手机/微信\t最高学历(高中,职高,硕士,本科,博士,初中,中技,中专,专科,专升本,MPA,MBA,EMBA)
  286. Every row in table will be treated as a chunk.
  287. """
  288. if re.search(r"\.xlsx?$", filename, re.IGNORECASE):
  289. callback(0.1, "Start to parse.")
  290. excel_parser = Excel()
  291. dfs = excel_parser(filename, binary, from_page=from_page, to_page=to_page, callback=callback)
  292. elif re.search(r"\.(txt|csv)$", filename, re.IGNORECASE):
  293. callback(0.1, "Start to parse.")
  294. txt = get_text(filename, binary)
  295. lines = txt.split("\n")
  296. fails = []
  297. headers = lines[0].split(kwargs.get("delimiter", "\t"))
  298. rows = []
  299. for i, line in enumerate(lines[1:]):
  300. if i < from_page:
  301. continue
  302. if i >= to_page:
  303. break
  304. row = [field for field in line.split(kwargs.get("delimiter", "\t"))]
  305. if len(row) != len(headers):
  306. fails.append(str(i))
  307. continue
  308. rows.append(row)
  309. callback(0.3, ("Extract records: {}~{}".format(from_page, min(len(lines), to_page)) + (f"{len(fails)} failure, line: %s..." % (",".join(fails[:3])) if fails else "")))
  310. dfs = [pd.DataFrame(np.array(rows), columns=headers)]
  311. else:
  312. raise NotImplementedError("file type not supported yet(excel, text, csv supported)")
  313. res = []
  314. PY = Pinyin()
  315. fieds_map = {"text": "_tks", "int": "_long", "keyword": "_kwd", "float": "_flt", "datetime": "_dt", "bool": "_kwd"}
  316. for df in dfs:
  317. for n in ["id", "_id", "index", "idx"]:
  318. if n in df.columns:
  319. del df[n]
  320. clmns = df.columns.values
  321. if len(clmns) != len(set(clmns)):
  322. col_counts = Counter(clmns)
  323. duplicates = [col for col, count in col_counts.items() if count > 1]
  324. if duplicates:
  325. raise ValueError(f"Duplicate column names detected: {duplicates}\nFrom: {clmns}")
  326. txts = list(copy.deepcopy(clmns))
  327. py_clmns = [PY.get_pinyins(re.sub(r"(/.*|([^()]+?)|\([^()]+?\))", "", str(n)), "_")[0] for n in clmns]
  328. clmn_tys = []
  329. for j in range(len(clmns)):
  330. cln, ty = column_data_type(df[clmns[j]])
  331. clmn_tys.append(ty)
  332. df[clmns[j]] = cln
  333. if ty == "text":
  334. txts.extend([str(c) for c in cln if c])
  335. clmns_map = [(py_clmns[i].lower() + fieds_map[clmn_tys[i]], str(clmns[i]).replace("_", " ")) for i in range(len(clmns))]
  336. eng = lang.lower() == "english" # is_english(txts)
  337. for ii, row in df.iterrows():
  338. d = {"docnm_kwd": filename, "title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename))}
  339. row_txt = []
  340. for j in range(len(clmns)):
  341. if row[clmns[j]] is None:
  342. continue
  343. if not str(row[clmns[j]]):
  344. continue
  345. if not isinstance(row[clmns[j]], pd.Series) and pd.isna(row[clmns[j]]):
  346. continue
  347. fld = clmns_map[j][0]
  348. d[fld] = row[clmns[j]] if clmn_tys[j] != "text" else rag_tokenizer.tokenize(row[clmns[j]])
  349. row_txt.append("{}:{}".format(clmns[j], row[clmns[j]]))
  350. if not row_txt:
  351. continue
  352. tokenize(d, "; ".join(row_txt), eng)
  353. res.append(d)
  354. KnowledgebaseService.update_parser_config(kwargs["kb_id"], {"field_map": {k: v for k, v in clmns_map}})
  355. callback(0.35, "")
  356. return res
  357. if __name__ == "__main__":
  358. import sys
  359. def dummy(prog=None, msg=""):
  360. pass
  361. chunk(sys.argv[1], callback=dummy)