Ви не можете вибрати більше 25 тем Теми мають розпочинатися з літери або цифри, можуть містити дефіси (-) і не повинні перевищувати 35 символів.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. #
  2. # Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import os
  17. import typing
  18. import traceback
  19. import logging
  20. import inspect
  21. from logging.handlers import TimedRotatingFileHandler
  22. from threading import RLock
  23. from api.utils import file_utils
  24. class LoggerFactory(object):
  25. TYPE = "FILE"
  26. LOG_FORMAT = "[%(levelname)s] [%(asctime)s] [%(module)s.%(funcName)s] [line:%(lineno)d]: %(message)s"
  27. logging.basicConfig(format=LOG_FORMAT)
  28. LEVEL = logging.DEBUG
  29. logger_dict = {}
  30. global_handler_dict = {}
  31. LOG_DIR = None
  32. PARENT_LOG_DIR = None
  33. log_share = True
  34. append_to_parent_log = None
  35. lock = RLock()
  36. # CRITICAL = 50
  37. # FATAL = CRITICAL
  38. # ERROR = 40
  39. # WARNING = 30
  40. # WARN = WARNING
  41. # INFO = 20
  42. # DEBUG = 10
  43. # NOTSET = 0
  44. levels = (10, 20, 30, 40)
  45. schedule_logger_dict = {}
  46. @staticmethod
  47. def set_directory(directory=None, parent_log_dir=None,
  48. append_to_parent_log=None, force=False):
  49. if parent_log_dir:
  50. LoggerFactory.PARENT_LOG_DIR = parent_log_dir
  51. if append_to_parent_log:
  52. LoggerFactory.append_to_parent_log = append_to_parent_log
  53. with LoggerFactory.lock:
  54. if not directory:
  55. directory = file_utils.get_project_base_directory("logs")
  56. if not LoggerFactory.LOG_DIR or force:
  57. LoggerFactory.LOG_DIR = directory
  58. if LoggerFactory.log_share:
  59. oldmask = os.umask(000)
  60. os.makedirs(LoggerFactory.LOG_DIR, exist_ok=True)
  61. os.umask(oldmask)
  62. else:
  63. os.makedirs(LoggerFactory.LOG_DIR, exist_ok=True)
  64. for loggerName, ghandler in LoggerFactory.global_handler_dict.items():
  65. for className, (logger,
  66. handler) in LoggerFactory.logger_dict.items():
  67. logger.removeHandler(ghandler)
  68. ghandler.close()
  69. LoggerFactory.global_handler_dict = {}
  70. for className, (logger,
  71. handler) in LoggerFactory.logger_dict.items():
  72. logger.removeHandler(handler)
  73. _handler = None
  74. if handler:
  75. handler.close()
  76. if className != "default":
  77. _handler = LoggerFactory.get_handler(className)
  78. logger.addHandler(_handler)
  79. LoggerFactory.assemble_global_handler(logger)
  80. LoggerFactory.logger_dict[className] = logger, _handler
  81. @staticmethod
  82. def new_logger(name):
  83. logger = logging.getLogger(name)
  84. logger.propagate = False
  85. logger.setLevel(LoggerFactory.LEVEL)
  86. return logger
  87. @staticmethod
  88. def get_logger(class_name=None):
  89. with LoggerFactory.lock:
  90. if class_name in LoggerFactory.logger_dict.keys():
  91. logger, handler = LoggerFactory.logger_dict[class_name]
  92. if not logger:
  93. logger, handler = LoggerFactory.init_logger(class_name)
  94. else:
  95. logger, handler = LoggerFactory.init_logger(class_name)
  96. return logger
  97. @staticmethod
  98. def get_global_handler(logger_name, level=None, log_dir=None):
  99. if not LoggerFactory.LOG_DIR:
  100. return logging.StreamHandler()
  101. if log_dir:
  102. logger_name_key = logger_name + "_" + log_dir
  103. else:
  104. logger_name_key = logger_name + "_" + LoggerFactory.LOG_DIR
  105. # if loggerName not in LoggerFactory.globalHandlerDict:
  106. if logger_name_key not in LoggerFactory.global_handler_dict:
  107. with LoggerFactory.lock:
  108. if logger_name_key not in LoggerFactory.global_handler_dict:
  109. handler = LoggerFactory.get_handler(
  110. logger_name, level, log_dir)
  111. LoggerFactory.global_handler_dict[logger_name_key] = handler
  112. return LoggerFactory.global_handler_dict[logger_name_key]
  113. @staticmethod
  114. def get_handler(class_name, level=None, log_dir=None,
  115. log_type=None, job_id=None):
  116. if not log_type:
  117. if not LoggerFactory.LOG_DIR or not class_name:
  118. return logging.StreamHandler()
  119. # return Diy_StreamHandler()
  120. if not log_dir:
  121. log_file = os.path.join(
  122. LoggerFactory.LOG_DIR,
  123. "{}.log".format(class_name))
  124. else:
  125. log_file = os.path.join(log_dir, "{}.log".format(class_name))
  126. else:
  127. log_file = os.path.join(log_dir, "rag_flow_{}.log".format(
  128. log_type) if level == LoggerFactory.LEVEL else 'rag_flow_{}_error.log'.format(log_type))
  129. os.makedirs(os.path.dirname(log_file), exist_ok=True)
  130. if LoggerFactory.log_share:
  131. handler = ROpenHandler(log_file,
  132. when='D',
  133. interval=1,
  134. backupCount=14,
  135. delay=True)
  136. else:
  137. handler = TimedRotatingFileHandler(log_file,
  138. when='D',
  139. interval=1,
  140. backupCount=14,
  141. delay=True)
  142. if level:
  143. handler.level = level
  144. return handler
  145. @staticmethod
  146. def init_logger(class_name):
  147. with LoggerFactory.lock:
  148. logger = LoggerFactory.new_logger(class_name)
  149. handler = None
  150. if class_name:
  151. handler = LoggerFactory.get_handler(class_name)
  152. logger.addHandler(handler)
  153. LoggerFactory.logger_dict[class_name] = logger, handler
  154. else:
  155. LoggerFactory.logger_dict["default"] = logger, handler
  156. LoggerFactory.assemble_global_handler(logger)
  157. return logger, handler
  158. @staticmethod
  159. def assemble_global_handler(logger):
  160. if LoggerFactory.LOG_DIR:
  161. for level in LoggerFactory.levels:
  162. if level >= LoggerFactory.LEVEL:
  163. level_logger_name = logging._levelToName[level]
  164. logger.addHandler(
  165. LoggerFactory.get_global_handler(
  166. level_logger_name, level))
  167. if LoggerFactory.append_to_parent_log and LoggerFactory.PARENT_LOG_DIR:
  168. for level in LoggerFactory.levels:
  169. if level >= LoggerFactory.LEVEL:
  170. level_logger_name = logging._levelToName[level]
  171. logger.addHandler(
  172. LoggerFactory.get_global_handler(level_logger_name, level, LoggerFactory.PARENT_LOG_DIR))
  173. def setDirectory(directory=None):
  174. LoggerFactory.set_directory(directory)
  175. def setLevel(level):
  176. LoggerFactory.LEVEL = level
  177. def getLogger(className=None, useLevelFile=False):
  178. if className is None:
  179. frame = inspect.stack()[1]
  180. module = inspect.getmodule(frame[0])
  181. className = 'stat'
  182. return LoggerFactory.get_logger(className)
  183. def exception_to_trace_string(ex):
  184. return "".join(traceback.TracebackException.from_exception(ex).format())
  185. class ROpenHandler(TimedRotatingFileHandler):
  186. def _open(self):
  187. prevumask = os.umask(000)
  188. rtv = TimedRotatingFileHandler._open(self)
  189. os.umask(prevumask)
  190. return rtv
  191. def sql_logger(job_id='', log_type='sql'):
  192. key = job_id + log_type
  193. if key in LoggerFactory.schedule_logger_dict.keys():
  194. return LoggerFactory.schedule_logger_dict[key]
  195. return get_job_logger(job_id=job_id, log_type=log_type)
  196. def ready_log(msg, job=None, task=None, role=None, party_id=None, detail=None):
  197. prefix, suffix = base_msg(job, task, role, party_id, detail)
  198. return f"{prefix}{msg} ready{suffix}"
  199. def start_log(msg, job=None, task=None, role=None, party_id=None, detail=None):
  200. prefix, suffix = base_msg(job, task, role, party_id, detail)
  201. return f"{prefix}start to {msg}{suffix}"
  202. def successful_log(msg, job=None, task=None, role=None,
  203. party_id=None, detail=None):
  204. prefix, suffix = base_msg(job, task, role, party_id, detail)
  205. return f"{prefix}{msg} successfully{suffix}"
  206. def warning_log(msg, job=None, task=None, role=None,
  207. party_id=None, detail=None):
  208. prefix, suffix = base_msg(job, task, role, party_id, detail)
  209. return f"{prefix}{msg} is not effective{suffix}"
  210. def failed_log(msg, job=None, task=None, role=None,
  211. party_id=None, detail=None):
  212. prefix, suffix = base_msg(job, task, role, party_id, detail)
  213. return f"{prefix}failed to {msg}{suffix}"
  214. def base_msg(job=None, task=None, role: str = None,
  215. party_id: typing.Union[str, int] = None, detail=None):
  216. if detail:
  217. detail_msg = f" detail: \n{detail}"
  218. else:
  219. detail_msg = ""
  220. if task is not None:
  221. return f"task {task.f_task_id} {task.f_task_version} ", f" on {task.f_role} {task.f_party_id}{detail_msg}"
  222. elif job is not None:
  223. return "", f" on {job.f_role} {job.f_party_id}{detail_msg}"
  224. elif role and party_id:
  225. return "", f" on {role} {party_id}{detail_msg}"
  226. else:
  227. return "", f"{detail_msg}"
  228. def exception_to_trace_string(ex):
  229. return "".join(traceback.TracebackException.from_exception(ex).format())
  230. def get_logger_base_dir():
  231. job_log_dir = file_utils.get_rag_flow_directory('logs')
  232. return job_log_dir
  233. def get_job_logger(job_id, log_type):
  234. rag_flow_log_dir = file_utils.get_rag_flow_directory('logs', 'rag_flow')
  235. job_log_dir = file_utils.get_rag_flow_directory('logs', job_id)
  236. if not job_id:
  237. log_dirs = [rag_flow_log_dir]
  238. else:
  239. if log_type == 'audit':
  240. log_dirs = [job_log_dir, rag_flow_log_dir]
  241. else:
  242. log_dirs = [job_log_dir]
  243. if LoggerFactory.log_share:
  244. oldmask = os.umask(000)
  245. os.makedirs(job_log_dir, exist_ok=True)
  246. os.makedirs(rag_flow_log_dir, exist_ok=True)
  247. os.umask(oldmask)
  248. else:
  249. os.makedirs(job_log_dir, exist_ok=True)
  250. os.makedirs(rag_flow_log_dir, exist_ok=True)
  251. logger = LoggerFactory.new_logger(f"{job_id}_{log_type}")
  252. for job_log_dir in log_dirs:
  253. handler = LoggerFactory.get_handler(class_name=None, level=LoggerFactory.LEVEL,
  254. log_dir=job_log_dir, log_type=log_type, job_id=job_id)
  255. error_handler = LoggerFactory.get_handler(
  256. class_name=None,
  257. level=logging.ERROR,
  258. log_dir=job_log_dir,
  259. log_type=log_type,
  260. job_id=job_id)
  261. logger.addHandler(handler)
  262. logger.addHandler(error_handler)
  263. with LoggerFactory.lock:
  264. LoggerFactory.schedule_logger_dict[job_id + log_type] = logger
  265. return logger