Ви не можете вибрати більше 25 тем Теми мають розпочинатися з літери або цифри, можуть містити дефіси (-) і не повинні перевищувати 35 символів.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import os
  17. import typing
  18. import traceback
  19. import logging
  20. import inspect
  21. from logging.handlers import TimedRotatingFileHandler
  22. from threading import RLock
  23. from api.utils import file_utils
  24. class LoggerFactory(object):
  25. TYPE = "FILE"
  26. LOG_FORMAT = "[%(levelname)s] [%(asctime)s] [jobId] [%(process)s:%(thread)s] - [%(module)s.%(funcName)s] [line:%(lineno)d]: %(message)s"
  27. LEVEL = logging.DEBUG
  28. logger_dict = {}
  29. global_handler_dict = {}
  30. LOG_DIR = None
  31. PARENT_LOG_DIR = None
  32. log_share = True
  33. append_to_parent_log = None
  34. lock = RLock()
  35. # CRITICAL = 50
  36. # FATAL = CRITICAL
  37. # ERROR = 40
  38. # WARNING = 30
  39. # WARN = WARNING
  40. # INFO = 20
  41. # DEBUG = 10
  42. # NOTSET = 0
  43. levels = (10, 20, 30, 40)
  44. schedule_logger_dict = {}
  45. @staticmethod
  46. def set_directory(directory=None, parent_log_dir=None,
  47. append_to_parent_log=None, force=False):
  48. if parent_log_dir:
  49. LoggerFactory.PARENT_LOG_DIR = parent_log_dir
  50. if append_to_parent_log:
  51. LoggerFactory.append_to_parent_log = append_to_parent_log
  52. with LoggerFactory.lock:
  53. if not directory:
  54. directory = file_utils.get_project_base_directory("logs")
  55. if not LoggerFactory.LOG_DIR or force:
  56. LoggerFactory.LOG_DIR = directory
  57. if LoggerFactory.log_share:
  58. oldmask = os.umask(000)
  59. os.makedirs(LoggerFactory.LOG_DIR, exist_ok=True)
  60. os.umask(oldmask)
  61. else:
  62. os.makedirs(LoggerFactory.LOG_DIR, exist_ok=True)
  63. for loggerName, ghandler in LoggerFactory.global_handler_dict.items():
  64. for className, (logger,
  65. handler) in LoggerFactory.logger_dict.items():
  66. logger.removeHandler(ghandler)
  67. ghandler.close()
  68. LoggerFactory.global_handler_dict = {}
  69. for className, (logger,
  70. handler) in LoggerFactory.logger_dict.items():
  71. logger.removeHandler(handler)
  72. _handler = None
  73. if handler:
  74. handler.close()
  75. if className != "default":
  76. _handler = LoggerFactory.get_handler(className)
  77. logger.addHandler(_handler)
  78. LoggerFactory.assemble_global_handler(logger)
  79. LoggerFactory.logger_dict[className] = logger, _handler
  80. @staticmethod
  81. def new_logger(name):
  82. logger = logging.getLogger(name)
  83. logger.propagate = False
  84. logger.setLevel(LoggerFactory.LEVEL)
  85. return logger
  86. @staticmethod
  87. def get_logger(class_name=None):
  88. with LoggerFactory.lock:
  89. if class_name in LoggerFactory.logger_dict.keys():
  90. logger, handler = LoggerFactory.logger_dict[class_name]
  91. if not logger:
  92. logger, handler = LoggerFactory.init_logger(class_name)
  93. else:
  94. logger, handler = LoggerFactory.init_logger(class_name)
  95. return logger
  96. @staticmethod
  97. def get_global_handler(logger_name, level=None, log_dir=None):
  98. if not LoggerFactory.LOG_DIR:
  99. return logging.StreamHandler()
  100. if log_dir:
  101. logger_name_key = logger_name + "_" + log_dir
  102. else:
  103. logger_name_key = logger_name + "_" + LoggerFactory.LOG_DIR
  104. # if loggerName not in LoggerFactory.globalHandlerDict:
  105. if logger_name_key not in LoggerFactory.global_handler_dict:
  106. with LoggerFactory.lock:
  107. if logger_name_key not in LoggerFactory.global_handler_dict:
  108. handler = LoggerFactory.get_handler(
  109. logger_name, level, log_dir)
  110. LoggerFactory.global_handler_dict[logger_name_key] = handler
  111. return LoggerFactory.global_handler_dict[logger_name_key]
  112. @staticmethod
  113. def get_handler(class_name, level=None, log_dir=None,
  114. log_type=None, job_id=None):
  115. if not log_type:
  116. if not LoggerFactory.LOG_DIR or not class_name:
  117. return logging.StreamHandler()
  118. # return Diy_StreamHandler()
  119. if not log_dir:
  120. log_file = os.path.join(
  121. LoggerFactory.LOG_DIR,
  122. "{}.log".format(class_name))
  123. else:
  124. log_file = os.path.join(log_dir, "{}.log".format(class_name))
  125. else:
  126. log_file = os.path.join(log_dir, "rag_flow_{}.log".format(
  127. log_type) if level == LoggerFactory.LEVEL else 'rag_flow_{}_error.log'.format(log_type))
  128. os.makedirs(os.path.dirname(log_file), exist_ok=True)
  129. if LoggerFactory.log_share:
  130. handler = ROpenHandler(log_file,
  131. when='D',
  132. interval=1,
  133. backupCount=14,
  134. delay=True)
  135. else:
  136. handler = TimedRotatingFileHandler(log_file,
  137. when='D',
  138. interval=1,
  139. backupCount=14,
  140. delay=True)
  141. if level:
  142. handler.level = level
  143. return handler
  144. @staticmethod
  145. def init_logger(class_name):
  146. with LoggerFactory.lock:
  147. logger = LoggerFactory.new_logger(class_name)
  148. handler = None
  149. if class_name:
  150. handler = LoggerFactory.get_handler(class_name)
  151. logger.addHandler(handler)
  152. LoggerFactory.logger_dict[class_name] = logger, handler
  153. else:
  154. LoggerFactory.logger_dict["default"] = logger, handler
  155. LoggerFactory.assemble_global_handler(logger)
  156. return logger, handler
  157. @staticmethod
  158. def assemble_global_handler(logger):
  159. if LoggerFactory.LOG_DIR:
  160. for level in LoggerFactory.levels:
  161. if level >= LoggerFactory.LEVEL:
  162. level_logger_name = logging._levelToName[level]
  163. logger.addHandler(
  164. LoggerFactory.get_global_handler(
  165. level_logger_name, level))
  166. if LoggerFactory.append_to_parent_log and LoggerFactory.PARENT_LOG_DIR:
  167. for level in LoggerFactory.levels:
  168. if level >= LoggerFactory.LEVEL:
  169. level_logger_name = logging._levelToName[level]
  170. logger.addHandler(
  171. LoggerFactory.get_global_handler(level_logger_name, level, LoggerFactory.PARENT_LOG_DIR))
  172. def setDirectory(directory=None):
  173. LoggerFactory.set_directory(directory)
  174. def setLevel(level):
  175. LoggerFactory.LEVEL = level
  176. def getLogger(className=None, useLevelFile=False):
  177. if className is None:
  178. frame = inspect.stack()[1]
  179. module = inspect.getmodule(frame[0])
  180. className = 'stat'
  181. return LoggerFactory.get_logger(className)
  182. def exception_to_trace_string(ex):
  183. return "".join(traceback.TracebackException.from_exception(ex).format())
  184. class ROpenHandler(TimedRotatingFileHandler):
  185. def _open(self):
  186. prevumask = os.umask(000)
  187. rtv = TimedRotatingFileHandler._open(self)
  188. os.umask(prevumask)
  189. return rtv
  190. def sql_logger(job_id='', log_type='sql'):
  191. key = job_id + log_type
  192. if key in LoggerFactory.schedule_logger_dict.keys():
  193. return LoggerFactory.schedule_logger_dict[key]
  194. return get_job_logger(job_id=job_id, log_type=log_type)
  195. def ready_log(msg, job=None, task=None, role=None, party_id=None, detail=None):
  196. prefix, suffix = base_msg(job, task, role, party_id, detail)
  197. return f"{prefix}{msg} ready{suffix}"
  198. def start_log(msg, job=None, task=None, role=None, party_id=None, detail=None):
  199. prefix, suffix = base_msg(job, task, role, party_id, detail)
  200. return f"{prefix}start to {msg}{suffix}"
  201. def successful_log(msg, job=None, task=None, role=None,
  202. party_id=None, detail=None):
  203. prefix, suffix = base_msg(job, task, role, party_id, detail)
  204. return f"{prefix}{msg} successfully{suffix}"
  205. def warning_log(msg, job=None, task=None, role=None,
  206. party_id=None, detail=None):
  207. prefix, suffix = base_msg(job, task, role, party_id, detail)
  208. return f"{prefix}{msg} is not effective{suffix}"
  209. def failed_log(msg, job=None, task=None, role=None,
  210. party_id=None, detail=None):
  211. prefix, suffix = base_msg(job, task, role, party_id, detail)
  212. return f"{prefix}failed to {msg}{suffix}"
  213. def base_msg(job=None, task=None, role: str = None,
  214. party_id: typing.Union[str, int] = None, detail=None):
  215. if detail:
  216. detail_msg = f" detail: \n{detail}"
  217. else:
  218. detail_msg = ""
  219. if task is not None:
  220. return f"task {task.f_task_id} {task.f_task_version} ", f" on {task.f_role} {task.f_party_id}{detail_msg}"
  221. elif job is not None:
  222. return "", f" on {job.f_role} {job.f_party_id}{detail_msg}"
  223. elif role and party_id:
  224. return "", f" on {role} {party_id}{detail_msg}"
  225. else:
  226. return "", f"{detail_msg}"
  227. def exception_to_trace_string(ex):
  228. return "".join(traceback.TracebackException.from_exception(ex).format())
  229. def get_logger_base_dir():
  230. job_log_dir = file_utils.get_rag_flow_directory('logs')
  231. return job_log_dir
  232. def get_job_logger(job_id, log_type):
  233. rag_flow_log_dir = file_utils.get_rag_flow_directory('logs', 'rag_flow')
  234. job_log_dir = file_utils.get_rag_flow_directory('logs', job_id)
  235. if not job_id:
  236. log_dirs = [rag_flow_log_dir]
  237. else:
  238. if log_type == 'audit':
  239. log_dirs = [job_log_dir, rag_flow_log_dir]
  240. else:
  241. log_dirs = [job_log_dir]
  242. if LoggerFactory.log_share:
  243. oldmask = os.umask(000)
  244. os.makedirs(job_log_dir, exist_ok=True)
  245. os.makedirs(rag_flow_log_dir, exist_ok=True)
  246. os.umask(oldmask)
  247. else:
  248. os.makedirs(job_log_dir, exist_ok=True)
  249. os.makedirs(rag_flow_log_dir, exist_ok=True)
  250. logger = LoggerFactory.new_logger(f"{job_id}_{log_type}")
  251. for job_log_dir in log_dirs:
  252. handler = LoggerFactory.get_handler(class_name=None, level=LoggerFactory.LEVEL,
  253. log_dir=job_log_dir, log_type=log_type, job_id=job_id)
  254. error_handler = LoggerFactory.get_handler(
  255. class_name=None,
  256. level=logging.ERROR,
  257. log_dir=job_log_dir,
  258. log_type=log_type,
  259. job_id=job_id)
  260. logger.addHandler(handler)
  261. logger.addHandler(error_handler)
  262. with LoggerFactory.lock:
  263. LoggerFactory.schedule_logger_dict[job_id + log_type] = logger
  264. return logger