您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. # Licensed under the Apache License, Version 2.0 (the "License");
  2. # you may not use this file except in compliance with the License.
  3. # You may obtain a copy of the License at
  4. #
  5. # http://www.apache.org/licenses/LICENSE-2.0
  6. #
  7. # Unless required by applicable law or agreed to in writing, software
  8. # distributed under the License is distributed on an "AS IS" BASIS,
  9. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. # See the License for the specific language governing permissions and
  11. # limitations under the License.
  12. #
  13. import copy
  14. import re
  15. from io import BytesIO
  16. from xpinyin import Pinyin
  17. import numpy as np
  18. import pandas as pd
  19. from openpyxl import load_workbook
  20. from dateutil.parser import parse as datetime_parse
  21. from api.db.services.knowledgebase_service import KnowledgebaseService
  22. from rag.nlp import rag_tokenizer, is_english, tokenize, find_codec
  23. from deepdoc.parser import ExcelParser
  24. class Excel(ExcelParser):
  25. def __call__(self, fnm, binary=None, from_page=0,
  26. to_page=10000000000, callback=None):
  27. if not binary:
  28. wb = load_workbook(fnm)
  29. else:
  30. wb = load_workbook(BytesIO(binary))
  31. total = 0
  32. for sheetname in wb.sheetnames:
  33. total += len(list(wb[sheetname].rows))
  34. res, fails, done = [], [], 0
  35. rn = 0
  36. for sheetname in wb.sheetnames:
  37. ws = wb[sheetname]
  38. rows = list(ws.rows)
  39. if not rows:continue
  40. headers = [cell.value for cell in rows[0]]
  41. missed = set([i for i, h in enumerate(headers) if h is None])
  42. headers = [
  43. cell.value for i,
  44. cell in enumerate(
  45. rows[0]) if i not in missed]
  46. if not headers:continue
  47. data = []
  48. for i, r in enumerate(rows[1:]):
  49. rn += 1
  50. if rn - 1 < from_page:
  51. continue
  52. if rn - 1 >= to_page:
  53. break
  54. row = [
  55. cell.value for ii,
  56. cell in enumerate(r) if ii not in missed]
  57. if len(row) != len(headers):
  58. fails.append(str(i))
  59. continue
  60. data.append(row)
  61. done += 1
  62. res.append(pd.DataFrame(np.array(data), columns=headers))
  63. callback(0.3, ("Extract records: {}~{}".format(from_page + 1, min(to_page, from_page + rn)) + (
  64. f"{len(fails)} failure, line: %s..." % (",".join(fails[:3])) if fails else "")))
  65. return res
  66. def trans_datatime(s):
  67. try:
  68. return datetime_parse(s.strip()).strftime("%Y-%m-%d %H:%M:%S")
  69. except Exception as e:
  70. pass
  71. def trans_bool(s):
  72. if re.match(r"(true|yes|是|\*|✓|✔|☑|✅|√)$",
  73. str(s).strip(), flags=re.IGNORECASE):
  74. return "yes"
  75. if re.match(r"(false|no|否|⍻|×)$", str(s).strip(), flags=re.IGNORECASE):
  76. return "no"
  77. def column_data_type(arr):
  78. arr = list(arr)
  79. uni = len(set([a for a in arr if a is not None]))
  80. counts = {"int": 0, "float": 0, "text": 0, "datetime": 0, "bool": 0}
  81. trans = {t: f for f, t in
  82. [(int, "int"), (float, "float"), (trans_datatime, "datetime"), (trans_bool, "bool"), (str, "text")]}
  83. for a in arr:
  84. if a is None:
  85. continue
  86. if re.match(r"[+-]?[0-9]+(\.0+)?$", str(a).replace("%%", "")):
  87. counts["int"] += 1
  88. elif re.match(r"[+-]?[0-9.]+$", str(a).replace("%%", "")):
  89. counts["float"] += 1
  90. elif re.match(r"(true|yes|是|\*|✓|✔|☑|✅|√|false|no|否|⍻|×)$", str(a), flags=re.IGNORECASE):
  91. counts["bool"] += 1
  92. elif trans_datatime(str(a)):
  93. counts["datetime"] += 1
  94. else:
  95. counts["text"] += 1
  96. counts = sorted(counts.items(), key=lambda x: x[1] * -1)
  97. ty = counts[0][0]
  98. for i in range(len(arr)):
  99. if arr[i] is None:
  100. continue
  101. try:
  102. arr[i] = trans[ty](str(arr[i]))
  103. except Exception as e:
  104. arr[i] = None
  105. # if ty == "text":
  106. # if len(arr) > 128 and uni / len(arr) < 0.1:
  107. # ty = "keyword"
  108. return arr, ty
  109. def chunk(filename, binary=None, from_page=0, to_page=10000000000,
  110. lang="Chinese", callback=None, **kwargs):
  111. """
  112. Excel and csv(txt) format files are supported.
  113. For csv or txt file, the delimiter between columns is TAB.
  114. The first line must be column headers.
  115. Column headers must be meaningful terms inorder to make our NLP model understanding.
  116. It's good to enumerate some synonyms using slash '/' to separate, and even better to
  117. enumerate values using brackets like 'gender/sex(male, female)'.
  118. Here are some examples for headers:
  119. 1. supplier/vendor\tcolor(yellow, red, brown)\tgender/sex(male, female)\tsize(M,L,XL,XXL)
  120. 2. 姓名/名字\t电话/手机/微信\t最高学历(高中,职高,硕士,本科,博士,初中,中技,中专,专科,专升本,MPA,MBA,EMBA)
  121. Every row in table will be treated as a chunk.
  122. """
  123. if re.search(r"\.xlsx?$", filename, re.IGNORECASE):
  124. callback(0.1, "Start to parse.")
  125. excel_parser = Excel()
  126. dfs = excel_parser(
  127. filename,
  128. binary,
  129. from_page=from_page,
  130. to_page=to_page,
  131. callback=callback)
  132. elif re.search(r"\.(txt|csv)$", filename, re.IGNORECASE):
  133. callback(0.1, "Start to parse.")
  134. txt = ""
  135. if binary:
  136. encoding = find_codec(binary)
  137. txt = binary.decode(encoding, errors="ignore")
  138. else:
  139. with open(filename, "r") as f:
  140. while True:
  141. l = f.readline()
  142. if not l:
  143. break
  144. txt += l
  145. lines = txt.split("\n")
  146. fails = []
  147. headers = lines[0].split(kwargs.get("delimiter", "\t"))
  148. rows = []
  149. for i, line in enumerate(lines[1:]):
  150. if i < from_page:
  151. continue
  152. if i >= to_page:
  153. break
  154. row = [l for l in line.split(kwargs.get("delimiter", "\t"))]
  155. if len(row) != len(headers):
  156. fails.append(str(i))
  157. continue
  158. rows.append(row)
  159. callback(0.3, ("Extract records: {}~{}".format(from_page, min(len(lines), to_page)) + (
  160. f"{len(fails)} failure, line: %s..." % (",".join(fails[:3])) if fails else "")))
  161. dfs = [pd.DataFrame(np.array(rows), columns=headers)]
  162. else:
  163. raise NotImplementedError(
  164. "file type not supported yet(excel, text, csv supported)")
  165. res = []
  166. PY = Pinyin()
  167. fieds_map = {
  168. "text": "_tks",
  169. "int": "_long",
  170. "keyword": "_kwd",
  171. "float": "_flt",
  172. "datetime": "_dt",
  173. "bool": "_kwd"}
  174. for df in dfs:
  175. for n in ["id", "_id", "index", "idx"]:
  176. if n in df.columns:
  177. del df[n]
  178. clmns = df.columns.values
  179. txts = list(copy.deepcopy(clmns))
  180. py_clmns = [
  181. PY.get_pinyins(
  182. re.sub(
  183. r"(/.*|([^()]+?)|\([^()]+?\))",
  184. "",
  185. str(n)),
  186. '_')[0] for n in clmns]
  187. clmn_tys = []
  188. for j in range(len(clmns)):
  189. cln, ty = column_data_type(df[clmns[j]])
  190. clmn_tys.append(ty)
  191. df[clmns[j]] = cln
  192. if ty == "text":
  193. txts.extend([str(c) for c in cln if c])
  194. clmns_map = [(py_clmns[i].lower() + fieds_map[clmn_tys[i]], str(clmns[i]).replace("_", " "))
  195. for i in range(len(clmns))]
  196. eng = lang.lower() == "english" # is_english(txts)
  197. for ii, row in df.iterrows():
  198. d = {
  199. "docnm_kwd": filename,
  200. "title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename))
  201. }
  202. row_txt = []
  203. for j in range(len(clmns)):
  204. if row[clmns[j]] is None:
  205. continue
  206. if not str(row[clmns[j]]):
  207. continue
  208. if pd.isna(row[clmns[j]]):
  209. continue
  210. fld = clmns_map[j][0]
  211. d[fld] = row[clmns[j]] if clmn_tys[j] != "text" else rag_tokenizer.tokenize(
  212. row[clmns[j]])
  213. row_txt.append("{}:{}".format(clmns[j], row[clmns[j]]))
  214. if not row_txt:
  215. continue
  216. tokenize(d, "; ".join(row_txt), eng)
  217. res.append(d)
  218. KnowledgebaseService.update_parser_config(
  219. kwargs["kb_id"], {"field_map": {k: v for k, v in clmns_map}})
  220. callback(0.35, "")
  221. return res
  222. if __name__ == "__main__":
  223. import sys
  224. def dummy(prog=None, msg=""):
  225. pass
  226. chunk(sys.argv[1], callback=dummy)