logger.py 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. #!/usr/bin/env python3
  2. # -*- coding:utf-8 -*-
  3. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  4. from loguru import logger
  5. import inspect
  6. import os
  7. import sys
  8. def get_caller_name(depth=0):
  9. """
  10. Args:
  11. depth (int): Depth of caller conext, use 0 for caller depth. Default value: 0.
  12. Returns:
  13. str: module name of the caller
  14. """
  15. # the following logic is a little bit faster than inspect.stack() logic
  16. frame = inspect.currentframe().f_back
  17. for _ in range(depth):
  18. frame = frame.f_back
  19. return frame.f_globals["__name__"]
  20. class StreamToLoguru:
  21. """
  22. stream object that redirects writes to a logger instance.
  23. """
  24. def __init__(self, level="INFO", caller_names=("apex", "pycocotools")):
  25. """
  26. Args:
  27. level(str): log level string of loguru. Default value: "INFO".
  28. caller_names(tuple): caller names of redirected module.
  29. Default value: (apex, pycocotools).
  30. """
  31. self.level = level
  32. self.linebuf = ""
  33. self.caller_names = caller_names
  34. def write(self, buf):
  35. full_name = get_caller_name(depth=1)
  36. module_name = full_name.rsplit(".", maxsplit=-1)[0]
  37. if module_name in self.caller_names:
  38. for line in buf.rstrip().splitlines():
  39. # use caller level log
  40. logger.opt(depth=2).log(self.level, line.rstrip())
  41. else:
  42. sys.__stdout__.write(buf)
  43. def flush(self):
  44. pass
  45. def redirect_sys_output(log_level="INFO"):
  46. redirect_logger = StreamToLoguru(log_level)
  47. sys.stderr = redirect_logger
  48. sys.stdout = redirect_logger
  49. def setup_logger(save_dir, distributed_rank=0, filename="log.txt", mode="a"):
  50. """setup logger for training and testing.
  51. Args:
  52. save_dir(str): location to save log file
  53. distributed_rank(int): device rank when multi-gpu environment
  54. filename (string): log save name.
  55. mode(str): log file write mode, `append` or `override`. default is `a`.
  56. Return:
  57. logger instance.
  58. """
  59. loguru_format = (
  60. "<green>{time:YYYY-MM-DD HH:mm:ss}</green> | "
  61. "<level>{level: <8}</level> | "
  62. "<cyan>{name}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>"
  63. )
  64. logger.remove()
  65. save_file = os.path.join(save_dir, filename)
  66. if mode == "o" and os.path.exists(save_file):
  67. os.remove(save_file)
  68. # only keep logger in rank0 process
  69. if distributed_rank == 0:
  70. logger.add(
  71. sys.stderr,
  72. format=loguru_format,
  73. level="INFO",
  74. enqueue=True,
  75. )
  76. logger.add(save_file)
  77. # redirect stdout/stderr to loguru
  78. redirect_sys_output("INFO")