You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

corporations.py 3.5KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. # Licensed under the Apache License, Version 2.0 (the "License");
  2. # you may not use this file except in compliance with the License.
  3. # You may obtain a copy of the License at
  4. #
  5. # http://www.apache.org/licenses/LICENSE-2.0
  6. #
  7. # Unless required by applicable law or agreed to in writing, software
  8. # distributed under the License is distributed on an "AS IS" BASIS,
  9. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. # See the License for the specific language governing permissions and
  11. # limitations under the License.
  12. #
  13. import re
  14. import json
  15. import os
  16. import pandas as pd
  17. from rag.nlp import rag_tokenizer
  18. from . import regions
  19. from api.utils.log_utils import logger
  20. current_file_path = os.path.dirname(os.path.abspath(__file__))
  21. GOODS = pd.read_csv(os.path.join(current_file_path, "res/corp_baike_len.csv"), sep="\t", header=0).fillna(0)
  22. GOODS["cid"] = GOODS["cid"].astype(str)
  23. GOODS = GOODS.set_index(["cid"])
  24. CORP_TKS = json.load(open(os.path.join(current_file_path, "res/corp.tks.freq.json"), "r"))
  25. GOOD_CORP = json.load(open(os.path.join(current_file_path, "res/good_corp.json"), "r"))
  26. CORP_TAG = json.load(open(os.path.join(current_file_path, "res/corp_tag.json"), "r"))
  27. def baike(cid, default_v=0):
  28. global GOODS
  29. try:
  30. return GOODS.loc[str(cid), "len"]
  31. except Exception:
  32. pass
  33. return default_v
  34. def corpNorm(nm, add_region=True):
  35. global CORP_TKS
  36. if not nm or type(nm)!=type(""):return ""
  37. nm = rag_tokenizer.tradi2simp(rag_tokenizer.strQ2B(nm)).lower()
  38. nm = re.sub(r"&", "&", nm)
  39. nm = re.sub(r"[\(\)()\+'\"\t \*\\【】-]+", " ", nm)
  40. nm = re.sub(r"([—-]+.*| +co\..*|corp\..*| +inc\..*| +ltd.*)", "", nm, 10000, re.IGNORECASE)
  41. nm = re.sub(r"(计算机|技术|(技术|科技|网络)*有限公司|公司|有限|研发中心|中国|总部)$", "", nm, 10000, re.IGNORECASE)
  42. if not nm or (len(nm)<5 and not regions.isName(nm[0:2])):return nm
  43. tks = rag_tokenizer.tokenize(nm).split(" ")
  44. reg = [t for i,t in enumerate(tks) if regions.isName(t) and (t != "中国" or i > 0)]
  45. nm = ""
  46. for t in tks:
  47. if regions.isName(t) or t in CORP_TKS:continue
  48. if re.match(r"[0-9a-zA-Z\\,.]+", t) and re.match(r".*[0-9a-zA-Z\,.]+$", nm):nm += " "
  49. nm += t
  50. r = re.search(r"^([^a-z0-9 \(\)&]{2,})[a-z ]{4,}$", nm.strip())
  51. if r:nm = r.group(1)
  52. r = re.search(r"^([a-z ]{3,})[^a-z0-9 \(\)&]{2,}$", nm.strip())
  53. if r:nm = r.group(1)
  54. return nm.strip() + (("" if not reg else "(%s)"%reg[0]) if add_region else "")
  55. def rmNoise(n):
  56. n = re.sub(r"[\((][^()()]+[))]", "", n)
  57. n = re.sub(r"[,. &()()]+", "", n)
  58. return n
  59. GOOD_CORP = set([corpNorm(rmNoise(c), False) for c in GOOD_CORP])
  60. for c,v in CORP_TAG.items():
  61. cc = corpNorm(rmNoise(c), False)
  62. if not cc:
  63. logger.info(c)
  64. CORP_TAG = {corpNorm(rmNoise(c), False):v for c,v in CORP_TAG.items()}
  65. def is_good(nm):
  66. global GOOD_CORP
  67. if nm.find("外派")>=0:return False
  68. nm = rmNoise(nm)
  69. nm = corpNorm(nm, False)
  70. for n in GOOD_CORP:
  71. if re.match(r"[0-9a-zA-Z]+$", n):
  72. if n == nm: return True
  73. elif nm.find(n)>=0:return True
  74. return False
  75. def corp_tag(nm):
  76. global CORP_TAG
  77. nm = rmNoise(nm)
  78. nm = corpNorm(nm, False)
  79. for n in CORP_TAG.keys():
  80. if re.match(r"[0-9a-zA-Z., ]+$", n):
  81. if n == nm: return CORP_TAG[n]
  82. elif nm.find(n)>=0:
  83. if len(n)<3 and len(nm)/len(n)>=2:continue
  84. return CORP_TAG[n]
  85. return []