Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. #
  2. # Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import copy
  17. import re
  18. from io import BytesIO
  19. from xpinyin import Pinyin
  20. import numpy as np
  21. import pandas as pd
  22. from collections import Counter
  23. # from openpyxl import load_workbook, Workbook
  24. from dateutil.parser import parse as datetime_parse
  25. from api.db.services.knowledgebase_service import KnowledgebaseService
  26. from deepdoc.parser.utils import get_text
  27. from rag.nlp import rag_tokenizer, tokenize
  28. from deepdoc.parser import ExcelParser
  29. class Excel(ExcelParser):
  30. def __call__(self, fnm, binary=None, from_page=0, to_page=10000000000, callback=None):
  31. if not binary:
  32. wb = Excel._load_excel_to_workbook(fnm)
  33. else:
  34. wb = Excel._load_excel_to_workbook(BytesIO(binary))
  35. total = 0
  36. for sheetname in wb.sheetnames:
  37. total += len(list(wb[sheetname].rows))
  38. res, fails, done = [], [], 0
  39. rn = 0
  40. for sheetname in wb.sheetnames:
  41. ws = wb[sheetname]
  42. rows = list(ws.rows)
  43. if not rows:
  44. continue
  45. headers = [cell.value for cell in rows[0]]
  46. missed = set([i for i, h in enumerate(headers) if h is None])
  47. headers = [cell.value for i, cell in enumerate(rows[0]) if i not in missed]
  48. if not headers:
  49. continue
  50. data = []
  51. for i, r in enumerate(rows[1:]):
  52. rn += 1
  53. if rn - 1 < from_page:
  54. continue
  55. if rn - 1 >= to_page:
  56. break
  57. row = [cell.value for ii, cell in enumerate(r) if ii not in missed]
  58. if len(row) != len(headers):
  59. fails.append(str(i))
  60. continue
  61. data.append(row)
  62. done += 1
  63. if np.array(data).size == 0:
  64. continue
  65. res.append(pd.DataFrame(np.array(data), columns=headers))
  66. callback(0.3, ("Extract records: {}~{}".format(from_page + 1, min(to_page, from_page + rn)) + (f"{len(fails)} failure, line: %s..." % (",".join(fails[:3])) if fails else "")))
  67. return res
  68. def trans_datatime(s):
  69. try:
  70. return datetime_parse(s.strip()).strftime("%Y-%m-%d %H:%M:%S")
  71. except Exception:
  72. pass
  73. def trans_bool(s):
  74. if re.match(r"(true|yes|是|\*|✓|✔|☑|✅|√)$", str(s).strip(), flags=re.IGNORECASE):
  75. return "yes"
  76. if re.match(r"(false|no|否|⍻|×)$", str(s).strip(), flags=re.IGNORECASE):
  77. return "no"
  78. def column_data_type(arr):
  79. arr = list(arr)
  80. counts = {"int": 0, "float": 0, "text": 0, "datetime": 0, "bool": 0}
  81. trans = {t: f for f, t in [(int, "int"), (float, "float"), (trans_datatime, "datetime"), (trans_bool, "bool"), (str, "text")]}
  82. float_flag = False
  83. for a in arr:
  84. if a is None:
  85. continue
  86. if re.match(r"[+-]?[0-9]+$", str(a).replace("%%", "")) and not str(a).replace("%%", "").startswith("0"):
  87. counts["int"] += 1
  88. if int(str(a)) > 2**63 - 1:
  89. float_flag = True
  90. break
  91. elif re.match(r"[+-]?[0-9.]{,19}$", str(a).replace("%%", "")) and not str(a).replace("%%", "").startswith("0"):
  92. counts["float"] += 1
  93. elif re.match(r"(true|yes|是|\*|✓|✔|☑|✅|√|false|no|否|⍻|×)$", str(a), flags=re.IGNORECASE):
  94. counts["bool"] += 1
  95. elif trans_datatime(str(a)):
  96. counts["datetime"] += 1
  97. else:
  98. counts["text"] += 1
  99. if float_flag:
  100. ty = "float"
  101. else:
  102. counts = sorted(counts.items(), key=lambda x: x[1] * -1)
  103. ty = counts[0][0]
  104. for i in range(len(arr)):
  105. if arr[i] is None:
  106. continue
  107. try:
  108. arr[i] = trans[ty](str(arr[i]))
  109. except Exception:
  110. arr[i] = None
  111. # if ty == "text":
  112. # if len(arr) > 128 and uni / len(arr) < 0.1:
  113. # ty = "keyword"
  114. return arr, ty
  115. def chunk(filename, binary=None, from_page=0, to_page=10000000000, lang="Chinese", callback=None, **kwargs):
  116. """
  117. Excel and csv(txt) format files are supported.
  118. For csv or txt file, the delimiter between columns is TAB.
  119. The first line must be column headers.
  120. Column headers must be meaningful terms inorder to make our NLP model understanding.
  121. It's good to enumerate some synonyms using slash '/' to separate, and even better to
  122. enumerate values using brackets like 'gender/sex(male, female)'.
  123. Here are some examples for headers:
  124. 1. supplier/vendor\tcolor(yellow, red, brown)\tgender/sex(male, female)\tsize(M,L,XL,XXL)
  125. 2. 姓名/名字\t电话/手机/微信\t最高学历(高中,职高,硕士,本科,博士,初中,中技,中专,专科,专升本,MPA,MBA,EMBA)
  126. Every row in table will be treated as a chunk.
  127. """
  128. if re.search(r"\.xlsx?$", filename, re.IGNORECASE):
  129. callback(0.1, "Start to parse.")
  130. excel_parser = Excel()
  131. dfs = excel_parser(filename, binary, from_page=from_page, to_page=to_page, callback=callback)
  132. elif re.search(r"\.(txt|csv)$", filename, re.IGNORECASE):
  133. callback(0.1, "Start to parse.")
  134. txt = get_text(filename, binary)
  135. lines = txt.split("\n")
  136. fails = []
  137. headers = lines[0].split(kwargs.get("delimiter", "\t"))
  138. rows = []
  139. for i, line in enumerate(lines[1:]):
  140. if i < from_page:
  141. continue
  142. if i >= to_page:
  143. break
  144. row = [field for field in line.split(kwargs.get("delimiter", "\t"))]
  145. if len(row) != len(headers):
  146. fails.append(str(i))
  147. continue
  148. rows.append(row)
  149. callback(0.3, ("Extract records: {}~{}".format(from_page, min(len(lines), to_page)) + (f"{len(fails)} failure, line: %s..." % (",".join(fails[:3])) if fails else "")))
  150. dfs = [pd.DataFrame(np.array(rows), columns=headers)]
  151. else:
  152. raise NotImplementedError("file type not supported yet(excel, text, csv supported)")
  153. res = []
  154. PY = Pinyin()
  155. fieds_map = {"text": "_tks", "int": "_long", "keyword": "_kwd", "float": "_flt", "datetime": "_dt", "bool": "_kwd"}
  156. for df in dfs:
  157. for n in ["id", "_id", "index", "idx"]:
  158. if n in df.columns:
  159. del df[n]
  160. clmns = df.columns.values
  161. if len(clmns) != len(set(clmns)):
  162. col_counts = Counter(clmns)
  163. duplicates = [col for col, count in col_counts.items() if count > 1]
  164. if duplicates:
  165. raise ValueError(f"Duplicate column names detected: {duplicates}\nFrom: {clmns}")
  166. txts = list(copy.deepcopy(clmns))
  167. py_clmns = [PY.get_pinyins(re.sub(r"(/.*|([^()]+?)|\([^()]+?\))", "", str(n)), "_")[0] for n in clmns]
  168. clmn_tys = []
  169. for j in range(len(clmns)):
  170. cln, ty = column_data_type(df[clmns[j]])
  171. clmn_tys.append(ty)
  172. df[clmns[j]] = cln
  173. if ty == "text":
  174. txts.extend([str(c) for c in cln if c])
  175. clmns_map = [(py_clmns[i].lower() + fieds_map[clmn_tys[i]], str(clmns[i]).replace("_", " ")) for i in range(len(clmns))]
  176. eng = lang.lower() == "english" # is_english(txts)
  177. for ii, row in df.iterrows():
  178. d = {"docnm_kwd": filename, "title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename))}
  179. row_txt = []
  180. for j in range(len(clmns)):
  181. if row[clmns[j]] is None:
  182. continue
  183. if not str(row[clmns[j]]):
  184. continue
  185. if not isinstance(row[clmns[j]], pd.Series) and pd.isna(row[clmns[j]]):
  186. continue
  187. fld = clmns_map[j][0]
  188. d[fld] = row[clmns[j]] if clmn_tys[j] != "text" else rag_tokenizer.tokenize(row[clmns[j]])
  189. row_txt.append("{}:{}".format(clmns[j], row[clmns[j]]))
  190. if not row_txt:
  191. continue
  192. tokenize(d, "; ".join(row_txt), eng)
  193. res.append(d)
  194. KnowledgebaseService.update_parser_config(kwargs["kb_id"], {"field_map": {k: v for k, v in clmns_map}})
  195. callback(0.35, "")
  196. return res
  197. if __name__ == "__main__":
  198. import sys
  199. def dummy(prog=None, msg=""):
  200. pass
  201. chunk(sys.argv[1], callback=dummy)