You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. #
  2. # Copyright 2019 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import os
  17. import typing
  18. import traceback
  19. import logging
  20. import inspect
  21. from logging.handlers import TimedRotatingFileHandler
  22. from threading import RLock
  23. from api.utils import file_utils
  24. class LoggerFactory(object):
  25. TYPE = "FILE"
  26. LOG_FORMAT = "[%(levelname)s] [%(asctime)s] [jobId] [%(process)s:%(thread)s] - [%(module)s.%(funcName)s] [line:%(lineno)d]: %(message)s"
  27. LEVEL = logging.DEBUG
  28. logger_dict = {}
  29. global_handler_dict = {}
  30. LOG_DIR = None
  31. PARENT_LOG_DIR = None
  32. log_share = True
  33. append_to_parent_log = None
  34. lock = RLock()
  35. # CRITICAL = 50
  36. # FATAL = CRITICAL
  37. # ERROR = 40
  38. # WARNING = 30
  39. # WARN = WARNING
  40. # INFO = 20
  41. # DEBUG = 10
  42. # NOTSET = 0
  43. levels = (10, 20, 30, 40)
  44. schedule_logger_dict = {}
  45. @staticmethod
  46. def set_directory(directory=None, parent_log_dir=None, append_to_parent_log=None, force=False):
  47. if parent_log_dir:
  48. LoggerFactory.PARENT_LOG_DIR = parent_log_dir
  49. if append_to_parent_log:
  50. LoggerFactory.append_to_parent_log = append_to_parent_log
  51. with LoggerFactory.lock:
  52. if not directory:
  53. directory = file_utils.get_project_base_directory("logs")
  54. if not LoggerFactory.LOG_DIR or force:
  55. LoggerFactory.LOG_DIR = directory
  56. if LoggerFactory.log_share:
  57. oldmask = os.umask(000)
  58. os.makedirs(LoggerFactory.LOG_DIR, exist_ok=True)
  59. os.umask(oldmask)
  60. else:
  61. os.makedirs(LoggerFactory.LOG_DIR, exist_ok=True)
  62. for loggerName, ghandler in LoggerFactory.global_handler_dict.items():
  63. for className, (logger, handler) in LoggerFactory.logger_dict.items():
  64. logger.removeHandler(ghandler)
  65. ghandler.close()
  66. LoggerFactory.global_handler_dict = {}
  67. for className, (logger, handler) in LoggerFactory.logger_dict.items():
  68. logger.removeHandler(handler)
  69. _handler = None
  70. if handler:
  71. handler.close()
  72. if className != "default":
  73. _handler = LoggerFactory.get_handler(className)
  74. logger.addHandler(_handler)
  75. LoggerFactory.assemble_global_handler(logger)
  76. LoggerFactory.logger_dict[className] = logger, _handler
  77. @staticmethod
  78. def new_logger(name):
  79. logger = logging.getLogger(name)
  80. logger.propagate = False
  81. logger.setLevel(LoggerFactory.LEVEL)
  82. return logger
  83. @staticmethod
  84. def get_logger(class_name=None):
  85. with LoggerFactory.lock:
  86. if class_name in LoggerFactory.logger_dict.keys():
  87. logger, handler = LoggerFactory.logger_dict[class_name]
  88. if not logger:
  89. logger, handler = LoggerFactory.init_logger(class_name)
  90. else:
  91. logger, handler = LoggerFactory.init_logger(class_name)
  92. return logger
  93. @staticmethod
  94. def get_global_handler(logger_name, level=None, log_dir=None):
  95. if not LoggerFactory.LOG_DIR:
  96. return logging.StreamHandler()
  97. if log_dir:
  98. logger_name_key = logger_name + "_" + log_dir
  99. else:
  100. logger_name_key = logger_name + "_" + LoggerFactory.LOG_DIR
  101. # if loggerName not in LoggerFactory.globalHandlerDict:
  102. if logger_name_key not in LoggerFactory.global_handler_dict:
  103. with LoggerFactory.lock:
  104. if logger_name_key not in LoggerFactory.global_handler_dict:
  105. handler = LoggerFactory.get_handler(logger_name, level, log_dir)
  106. LoggerFactory.global_handler_dict[logger_name_key] = handler
  107. return LoggerFactory.global_handler_dict[logger_name_key]
  108. @staticmethod
  109. def get_handler(class_name, level=None, log_dir=None, log_type=None, job_id=None):
  110. if not log_type:
  111. if not LoggerFactory.LOG_DIR or not class_name:
  112. return logging.StreamHandler()
  113. # return Diy_StreamHandler()
  114. if not log_dir:
  115. log_file = os.path.join(LoggerFactory.LOG_DIR, "{}.log".format(class_name))
  116. else:
  117. log_file = os.path.join(log_dir, "{}.log".format(class_name))
  118. else:
  119. log_file = os.path.join(log_dir, "rag_flow_{}.log".format(
  120. log_type) if level == LoggerFactory.LEVEL else 'rag_flow_{}_error.log'.format(log_type))
  121. os.makedirs(os.path.dirname(log_file), exist_ok=True)
  122. if LoggerFactory.log_share:
  123. handler = ROpenHandler(log_file,
  124. when='D',
  125. interval=1,
  126. backupCount=14,
  127. delay=True)
  128. else:
  129. handler = TimedRotatingFileHandler(log_file,
  130. when='D',
  131. interval=1,
  132. backupCount=14,
  133. delay=True)
  134. if level:
  135. handler.level = level
  136. return handler
  137. @staticmethod
  138. def init_logger(class_name):
  139. with LoggerFactory.lock:
  140. logger = LoggerFactory.new_logger(class_name)
  141. handler = None
  142. if class_name:
  143. handler = LoggerFactory.get_handler(class_name)
  144. logger.addHandler(handler)
  145. LoggerFactory.logger_dict[class_name] = logger, handler
  146. else:
  147. LoggerFactory.logger_dict["default"] = logger, handler
  148. LoggerFactory.assemble_global_handler(logger)
  149. return logger, handler
  150. @staticmethod
  151. def assemble_global_handler(logger):
  152. if LoggerFactory.LOG_DIR:
  153. for level in LoggerFactory.levels:
  154. if level >= LoggerFactory.LEVEL:
  155. level_logger_name = logging._levelToName[level]
  156. logger.addHandler(LoggerFactory.get_global_handler(level_logger_name, level))
  157. if LoggerFactory.append_to_parent_log and LoggerFactory.PARENT_LOG_DIR:
  158. for level in LoggerFactory.levels:
  159. if level >= LoggerFactory.LEVEL:
  160. level_logger_name = logging._levelToName[level]
  161. logger.addHandler(
  162. LoggerFactory.get_global_handler(level_logger_name, level, LoggerFactory.PARENT_LOG_DIR))
  163. def setDirectory(directory=None):
  164. LoggerFactory.set_directory(directory)
  165. def setLevel(level):
  166. LoggerFactory.LEVEL = level
  167. def getLogger(className=None, useLevelFile=False):
  168. if className is None:
  169. frame = inspect.stack()[1]
  170. module = inspect.getmodule(frame[0])
  171. className = 'stat'
  172. return LoggerFactory.get_logger(className)
  173. def exception_to_trace_string(ex):
  174. return "".join(traceback.TracebackException.from_exception(ex).format())
  175. class ROpenHandler(TimedRotatingFileHandler):
  176. def _open(self):
  177. prevumask = os.umask(000)
  178. rtv = TimedRotatingFileHandler._open(self)
  179. os.umask(prevumask)
  180. return rtv
  181. def sql_logger(job_id='', log_type='sql'):
  182. key = job_id + log_type
  183. if key in LoggerFactory.schedule_logger_dict.keys():
  184. return LoggerFactory.schedule_logger_dict[key]
  185. return get_job_logger(job_id=job_id, log_type=log_type)
  186. def ready_log(msg, job=None, task=None, role=None, party_id=None, detail=None):
  187. prefix, suffix = base_msg(job, task, role, party_id, detail)
  188. return f"{prefix}{msg} ready{suffix}"
  189. def start_log(msg, job=None, task=None, role=None, party_id=None, detail=None):
  190. prefix, suffix = base_msg(job, task, role, party_id, detail)
  191. return f"{prefix}start to {msg}{suffix}"
  192. def successful_log(msg, job=None, task=None, role=None, party_id=None, detail=None):
  193. prefix, suffix = base_msg(job, task, role, party_id, detail)
  194. return f"{prefix}{msg} successfully{suffix}"
  195. def warning_log(msg, job=None, task=None, role=None, party_id=None, detail=None):
  196. prefix, suffix = base_msg(job, task, role, party_id, detail)
  197. return f"{prefix}{msg} is not effective{suffix}"
  198. def failed_log(msg, job=None, task=None, role=None, party_id=None, detail=None):
  199. prefix, suffix = base_msg(job, task, role, party_id, detail)
  200. return f"{prefix}failed to {msg}{suffix}"
  201. def base_msg(job=None, task=None, role: str = None, party_id: typing.Union[str, int] = None, detail=None):
  202. if detail:
  203. detail_msg = f" detail: \n{detail}"
  204. else:
  205. detail_msg = ""
  206. if task is not None:
  207. return f"task {task.f_task_id} {task.f_task_version} ", f" on {task.f_role} {task.f_party_id}{detail_msg}"
  208. elif job is not None:
  209. return "", f" on {job.f_role} {job.f_party_id}{detail_msg}"
  210. elif role and party_id:
  211. return "", f" on {role} {party_id}{detail_msg}"
  212. else:
  213. return "", f"{detail_msg}"
  214. def exception_to_trace_string(ex):
  215. return "".join(traceback.TracebackException.from_exception(ex).format())
  216. def get_logger_base_dir():
  217. job_log_dir = file_utils.get_rag_flow_directory('logs')
  218. return job_log_dir
  219. def get_job_logger(job_id, log_type):
  220. rag_flow_log_dir = file_utils.get_rag_flow_directory('logs', 'rag_flow')
  221. job_log_dir = file_utils.get_rag_flow_directory('logs', job_id)
  222. if not job_id:
  223. log_dirs = [rag_flow_log_dir]
  224. else:
  225. if log_type == 'audit':
  226. log_dirs = [job_log_dir, rag_flow_log_dir]
  227. else:
  228. log_dirs = [job_log_dir]
  229. if LoggerFactory.log_share:
  230. oldmask = os.umask(000)
  231. os.makedirs(job_log_dir, exist_ok=True)
  232. os.makedirs(rag_flow_log_dir, exist_ok=True)
  233. os.umask(oldmask)
  234. else:
  235. os.makedirs(job_log_dir, exist_ok=True)
  236. os.makedirs(rag_flow_log_dir, exist_ok=True)
  237. logger = LoggerFactory.new_logger(f"{job_id}_{log_type}")
  238. for job_log_dir in log_dirs:
  239. handler = LoggerFactory.get_handler(class_name=None, level=LoggerFactory.LEVEL,
  240. log_dir=job_log_dir, log_type=log_type, job_id=job_id)
  241. error_handler = LoggerFactory.get_handler(class_name=None, level=logging.ERROR, log_dir=job_log_dir, log_type=log_type, job_id=job_id)
  242. logger.addHandler(handler)
  243. logger.addHandler(error_handler)
  244. with LoggerFactory.lock:
  245. LoggerFactory.schedule_logger_dict[job_id + log_type] = logger
  246. return logger