You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

corporations.py 3.4KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293
  1. # Licensed under the Apache License, Version 2.0 (the "License");
  2. # you may not use this file except in compliance with the License.
  3. # You may obtain a copy of the License at
  4. #
  5. # http://www.apache.org/licenses/LICENSE-2.0
  6. #
  7. # Unless required by applicable law or agreed to in writing, software
  8. # distributed under the License is distributed on an "AS IS" BASIS,
  9. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. # See the License for the specific language governing permissions and
  11. # limitations under the License.
  12. #
  13. import re,json,os
  14. import pandas as pd
  15. from rag.nlp import rag_tokenizer
  16. from . import regions
  17. current_file_path = os.path.dirname(os.path.abspath(__file__))
  18. GOODS = pd.read_csv(os.path.join(current_file_path, "res/corp_baike_len.csv"), sep="\t", header=0).fillna(0)
  19. GOODS["cid"] = GOODS["cid"].astype(str)
  20. GOODS = GOODS.set_index(["cid"])
  21. CORP_TKS = json.load(open(os.path.join(current_file_path, "res/corp.tks.freq.json"), "r"))
  22. GOOD_CORP = json.load(open(os.path.join(current_file_path, "res/good_corp.json"), "r"))
  23. CORP_TAG = json.load(open(os.path.join(current_file_path, "res/corp_tag.json"), "r"))
  24. def baike(cid, default_v=0):
  25. global GOODS
  26. try:
  27. return GOODS.loc[str(cid), "len"]
  28. except Exception as e:
  29. pass
  30. return default_v
  31. def corpNorm(nm, add_region=True):
  32. global CORP_TKS
  33. if not nm or type(nm)!=type(""):return ""
  34. nm = rag_tokenizer.tradi2simp(rag_tokenizer.strQ2B(nm)).lower()
  35. nm = re.sub(r"&", "&", nm)
  36. nm = re.sub(r"[\(\)()\+'\"\t \*\\【】-]+", " ", nm)
  37. nm = re.sub(r"([—-]+.*| +co\..*|corp\..*| +inc\..*| +ltd.*)", "", nm, 10000, re.IGNORECASE)
  38. nm = re.sub(r"(计算机|技术|(技术|科技|网络)*有限公司|公司|有限|研发中心|中国|总部)$", "", nm, 10000, re.IGNORECASE)
  39. if not nm or (len(nm)<5 and not regions.isName(nm[0:2])):return nm
  40. tks = rag_tokenizer.tokenize(nm).split(" ")
  41. reg = [t for i,t in enumerate(tks) if regions.isName(t) and (t != "中国" or i > 0)]
  42. nm = ""
  43. for t in tks:
  44. if regions.isName(t) or t in CORP_TKS:continue
  45. if re.match(r"[0-9a-zA-Z\\,.]+", t) and re.match(r".*[0-9a-zA-Z\,.]+$", nm):nm += " "
  46. nm += t
  47. r = re.search(r"^([^a-z0-9 \(\)&]{2,})[a-z ]{4,}$", nm.strip())
  48. if r:nm = r.group(1)
  49. r = re.search(r"^([a-z ]{3,})[^a-z0-9 \(\)&]{2,}$", nm.strip())
  50. if r:nm = r.group(1)
  51. return nm.strip() + (("" if not reg else "(%s)"%reg[0]) if add_region else "")
  52. def rmNoise(n):
  53. n = re.sub(r"[\((][^()()]+[))]", "", n)
  54. n = re.sub(r"[,. &()()]+", "", n)
  55. return n
  56. GOOD_CORP = set([corpNorm(rmNoise(c), False) for c in GOOD_CORP])
  57. for c,v in CORP_TAG.items():
  58. cc = corpNorm(rmNoise(c), False)
  59. if not cc: print (c)
  60. CORP_TAG = {corpNorm(rmNoise(c), False):v for c,v in CORP_TAG.items()}
  61. def is_good(nm):
  62. global GOOD_CORP
  63. if nm.find("外派")>=0:return False
  64. nm = rmNoise(nm)
  65. nm = corpNorm(nm, False)
  66. for n in GOOD_CORP:
  67. if re.match(r"[0-9a-zA-Z]+$", n):
  68. if n == nm: return True
  69. elif nm.find(n)>=0:return True
  70. return False
  71. def corp_tag(nm):
  72. global CORP_TAG
  73. nm = rmNoise(nm)
  74. nm = corpNorm(nm, False)
  75. for n in CORP_TAG.keys():
  76. if re.match(r"[0-9a-zA-Z., ]+$", n):
  77. if n == nm: return CORP_TAG[n]
  78. elif nm.find(n)>=0:
  79. if len(n)<3 and len(nm)/len(n)>=2:continue
  80. return CORP_TAG[n]
  81. return []