mirror of
https://github.com/Cog-Creators/Red-DiscordBot.git
synced 2025-11-08 20:28:55 -05:00
Logging enhancements and cleanup (#2502)
* Logging enhancements and cleanup - Removed debug log messages every time `Config.get_conf` is used or a JSON file is read/saved. The basic configuration is now logged once with DEBUG when the bot starts up instead. - Changed logging output format to reverse date order, include seconds, and use the logger's name instead of the module, function and line number. - Log files are now kept in the `DATAPATH/core/logs` directory. Each time Red is restarted, a new log is created, and the old ones renamed in a rotating fashion. There can be a maximum of 9 logs in total. - Each log file now has a smaller max size of 500KB before it will be split into multiple parts. There are also a maximum of 9 parts of each log. - Discord.py logger now uses the same output formatter as red's loggers - Moved logging setup code into `redbot.logging` module. Signed-off-by: Toby Harradine <tobyharradine@gmail.com> * Reformat Signed-off-by: Toby Harradine <tobyharradine@gmail.com> * Implement discussed changes - We maintain a red.log over multiple runtimes, alongside a latest.log and previous.log for individual runtimes. - Naming convention changed a bit. E.g. when latest.log is just one part, it will be named latest.log. When it becomes two parts, they will both be named latest-part1.log and latest-part2.log. - Rotation direction is reversed. This means as the files end up being named in chronological order. Signed-off-by: Toby Harradine <tobyharradine@gmail.com>
This commit is contained in:
parent
30af83aa6a
commit
301c800319
@ -2,29 +2,31 @@
|
||||
|
||||
# Discord Version check
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
import discord
|
||||
|
||||
import redbot.logging
|
||||
from redbot.core.bot import Red, ExitCodes
|
||||
from redbot.core.cog_manager import CogManagerUI
|
||||
from redbot.core.data_manager import create_temp_config, load_basic_configuration, config_file
|
||||
from redbot.core.json_io import JsonIO
|
||||
from redbot.core.global_checks import init_global_checks
|
||||
from redbot.core.events import init_events
|
||||
from redbot.core.cli import interactive_config, confirm, parse_cli_flags
|
||||
from redbot.core.core_commands import Core
|
||||
from redbot.core.dev_commands import Dev
|
||||
from redbot.core import modlog, bank
|
||||
from redbot.core import modlog, bank, data_manager
|
||||
from signal import SIGTERM
|
||||
import asyncio
|
||||
import logging.handlers
|
||||
import logging
|
||||
import os
|
||||
|
||||
# Let's not force this dependency, uvloop is much faster on cpython
|
||||
if sys.implementation.name == "cpython":
|
||||
try:
|
||||
import uvloop
|
||||
except ImportError:
|
||||
uvloop = None
|
||||
pass
|
||||
else:
|
||||
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
|
||||
@ -32,6 +34,7 @@ if sys.implementation.name == "cpython":
|
||||
if sys.platform == "win32":
|
||||
asyncio.set_event_loop(asyncio.ProactorEventLoop())
|
||||
|
||||
log = logging.getLogger("red.main")
|
||||
|
||||
#
|
||||
# Red - Discord Bot v3
|
||||
@ -40,46 +43,6 @@ if sys.platform == "win32":
|
||||
#
|
||||
|
||||
|
||||
def init_loggers(cli_flags):
|
||||
# d.py stuff
|
||||
dpy_logger = logging.getLogger("discord")
|
||||
dpy_logger.setLevel(logging.WARNING)
|
||||
console = logging.StreamHandler()
|
||||
console.setLevel(logging.WARNING)
|
||||
dpy_logger.addHandler(console)
|
||||
|
||||
# Red stuff
|
||||
|
||||
logger = logging.getLogger("red")
|
||||
|
||||
red_format = logging.Formatter(
|
||||
"%(asctime)s %(levelname)s %(module)s %(funcName)s %(lineno)d: %(message)s",
|
||||
datefmt="[%d/%m/%Y %H:%M]",
|
||||
)
|
||||
|
||||
stdout_handler = logging.StreamHandler(sys.stdout)
|
||||
stdout_handler.setFormatter(red_format)
|
||||
|
||||
if cli_flags.debug:
|
||||
os.environ["PYTHONASYNCIODEBUG"] = "1"
|
||||
logger.setLevel(logging.DEBUG)
|
||||
else:
|
||||
logger.setLevel(logging.INFO)
|
||||
|
||||
from redbot.core.data_manager import core_data_path
|
||||
|
||||
logfile_path = core_data_path() / "red.log"
|
||||
fhandler = logging.handlers.RotatingFileHandler(
|
||||
filename=str(logfile_path), encoding="utf-8", mode="a", maxBytes=10 ** 7, backupCount=5
|
||||
)
|
||||
fhandler.setFormatter(red_format)
|
||||
|
||||
logger.addHandler(fhandler)
|
||||
logger.addHandler(stdout_handler)
|
||||
|
||||
return logger
|
||||
|
||||
|
||||
async def _get_prefix_and_token(red, indict):
|
||||
"""
|
||||
Again, please blame <@269933075037814786> for this.
|
||||
@ -91,14 +54,14 @@ async def _get_prefix_and_token(red, indict):
|
||||
|
||||
|
||||
def list_instances():
|
||||
if not config_file.exists():
|
||||
if not data_manager.config_file.exists():
|
||||
print(
|
||||
"No instances have been configured! Configure one "
|
||||
"using `redbot-setup` before trying to run the bot!"
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
data = JsonIO(config_file)._load_json()
|
||||
data = JsonIO(data_manager.config_file)._load_json()
|
||||
text = "Configured Instances:\n\n"
|
||||
for instance_name in sorted(data.keys()):
|
||||
text += "{}\n".format(instance_name)
|
||||
@ -125,13 +88,21 @@ def main():
|
||||
if cli_flags.no_instance:
|
||||
print(
|
||||
"\033[1m"
|
||||
"Warning: The data will be placed in a temporary folder and removed on next system reboot."
|
||||
"Warning: The data will be placed in a temporary folder and removed on next system "
|
||||
"reboot."
|
||||
"\033[0m"
|
||||
)
|
||||
cli_flags.instance_name = "temporary_red"
|
||||
create_temp_config()
|
||||
load_basic_configuration(cli_flags.instance_name)
|
||||
log = init_loggers(cli_flags)
|
||||
data_manager.create_temp_config()
|
||||
data_manager.load_basic_configuration(cli_flags.instance_name)
|
||||
redbot.logging.init_logging(
|
||||
level=cli_flags.logging_level, location=data_manager.core_data_path() / "logs"
|
||||
)
|
||||
|
||||
log.debug("====Basic Config====")
|
||||
log.debug("Data Path: %s", data_manager._base_data_path())
|
||||
log.debug("Storage Type: %s", data_manager.storage_type())
|
||||
|
||||
red = Red(cli_flags=cli_flags, description=description, pm_help=None)
|
||||
init_global_checks(red)
|
||||
init_events(red, cli_flags)
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import argparse
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
def confirm(m=""):
|
||||
@ -97,7 +98,14 @@ def parse_cli_flags(args):
|
||||
"login. This is useful for testing the boot "
|
||||
"process.",
|
||||
)
|
||||
parser.add_argument("--debug", action="store_true", help="Sets the loggers level as debug")
|
||||
parser.add_argument(
|
||||
"--debug",
|
||||
action="store_const",
|
||||
dest="logging_level",
|
||||
const=logging.DEBUG,
|
||||
default=logging.INFO,
|
||||
help="Sets the loggers level as debug",
|
||||
)
|
||||
parser.add_argument("--dev", action="store_true", help="Enables developer mode")
|
||||
parser.add_argument(
|
||||
"--mentionable",
|
||||
|
||||
@ -569,13 +569,9 @@ class Config:
|
||||
# We have to import this here otherwise we have a circular dependency
|
||||
from .data_manager import basic_config
|
||||
|
||||
log.debug("Basic config: \n\n{}".format(basic_config))
|
||||
|
||||
driver_name = basic_config.get("STORAGE_TYPE", "JSON")
|
||||
driver_details = basic_config.get("STORAGE_DETAILS", {})
|
||||
|
||||
log.debug("Using driver: '{}'".format(driver_name))
|
||||
|
||||
driver = get_driver(
|
||||
driver_name, cog_name, uuid, data_path_override=cog_path_override, **driver_details
|
||||
)
|
||||
|
||||
@ -47,7 +47,6 @@ class JsonIO:
|
||||
And:
|
||||
https://www.mjmwired.net/kernel/Documentation/filesystems/ext4.txt#310
|
||||
"""
|
||||
log.debug("Saving file {}".format(self.path))
|
||||
filename = self.path.stem
|
||||
tmp_file = "{}-{}.tmp".format(filename, uuid4().fields[0])
|
||||
tmp_path = self.path.parent / tmp_file
|
||||
@ -80,7 +79,6 @@ class JsonIO:
|
||||
|
||||
# noinspection PyUnresolvedReferences
|
||||
def _load_json(self):
|
||||
log.debug("Reading file {}".format(self.path))
|
||||
with self.path.open(encoding="utf-8", mode="r") as f:
|
||||
data = json.load(f)
|
||||
return data
|
||||
|
||||
151
redbot/logging.py
Normal file
151
redbot/logging.py
Normal file
@ -0,0 +1,151 @@
|
||||
import logging.handlers
|
||||
import pathlib
|
||||
import re
|
||||
import sys
|
||||
from typing import List, Tuple, Optional
|
||||
|
||||
MAX_OLD_LOGS = 8
|
||||
|
||||
|
||||
class RotatingFileHandler(logging.handlers.RotatingFileHandler):
|
||||
"""Custom rotating file handler.
|
||||
|
||||
This file handler rotates a bit differently to the one in stdlib.
|
||||
|
||||
For a start, this works off of a "stem" and a "directory". The stem
|
||||
is the base name of the log file, without the extension. The
|
||||
directory is where all log files (including backups) will be placed.
|
||||
|
||||
Secondly, this logger rotates files downwards, and new logs are
|
||||
*started* with the backup number incremented. The stdlib handler
|
||||
rotates files upwards, and this leaves the logs in reverse order.
|
||||
|
||||
Thirdly, naming conventions are not customisable with this class.
|
||||
Logs will initially be named in the format "{stem}.log", and after
|
||||
rotating, the first log file will be renamed "{stem}-part1.log",
|
||||
and a new file "{stem}-part2.log" will be created for logging to
|
||||
continue.
|
||||
|
||||
A few things can't be modified in this handler: it must use append
|
||||
mode, it doesn't support use of the `delay` arg, and it will ignore
|
||||
custom namers and rotators.
|
||||
|
||||
When this handler is instantiated, it will search through the
|
||||
directory for logs from previous runtimes, and will open the file
|
||||
with the highest backup number to append to.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
stem: str,
|
||||
directory: pathlib.Path,
|
||||
maxBytes: int = 0,
|
||||
backupCount: int = 0,
|
||||
encoding: Optional[str] = None,
|
||||
) -> None:
|
||||
self.baseStem = stem
|
||||
self.directory = directory.resolve()
|
||||
# Scan for existing files in directory, append to last part of existing log
|
||||
log_part_re = re.compile(rf"{stem}-part(?P<partnum>\d+).log")
|
||||
highest_part = 0
|
||||
for path in directory.iterdir():
|
||||
match = log_part_re.match(path.name)
|
||||
if match and int(match["partnum"]) > highest_part:
|
||||
highest_part = int(match["partnum"])
|
||||
if highest_part:
|
||||
filename = directory / f"{stem}-part{highest_part}.log"
|
||||
else:
|
||||
filename = directory / f"{stem}.log"
|
||||
super().__init__(
|
||||
filename,
|
||||
mode="a",
|
||||
maxBytes=maxBytes,
|
||||
backupCount=backupCount,
|
||||
encoding=encoding,
|
||||
delay=False,
|
||||
)
|
||||
|
||||
def doRollover(self):
|
||||
if self.stream:
|
||||
self.stream.close()
|
||||
self.stream = None
|
||||
initial_path = self.directory / f"{self.baseStem}.log"
|
||||
if self.backupCount > 0 and initial_path.exists():
|
||||
initial_path.replace(self.directory / f"{self.baseStem}-part1.log")
|
||||
|
||||
match = re.match(
|
||||
rf"{self.baseStem}(?:-part(?P<part>\d+)?)?.log", pathlib.Path(self.baseFilename).name
|
||||
)
|
||||
latest_part_num = int(match.groupdict(default="1").get("part", "1"))
|
||||
if self.backupCount < 1:
|
||||
# No backups, just delete the existing log and start again
|
||||
pathlib.Path(self.baseFilename).unlink()
|
||||
elif latest_part_num > self.backupCount:
|
||||
# Rotate files down one
|
||||
# red-part2.log becomes red-part1.log etc, a new log is added at the end.
|
||||
for i in range(1, self.backupCount):
|
||||
next_log = self.directory / f"{self.baseStem}-part{i + 1}.log"
|
||||
if next_log.exists():
|
||||
prev_log = self.directory / f"{self.baseStem}-part{i}.log"
|
||||
next_log.replace(prev_log)
|
||||
else:
|
||||
# Simply start a new file
|
||||
self.baseFilename = str(
|
||||
self.directory / f"{self.baseStem}-part{latest_part_num + 1}.log"
|
||||
)
|
||||
|
||||
self.stream = self._open()
|
||||
|
||||
|
||||
def init_logging(level: int, location: pathlib.Path) -> None:
|
||||
dpy_logger = logging.getLogger("discord")
|
||||
dpy_logger.setLevel(logging.WARNING)
|
||||
base_logger = logging.getLogger("red")
|
||||
base_logger.setLevel(level)
|
||||
|
||||
formatter = logging.Formatter(
|
||||
"[{asctime}] [{levelname}] {name}: {message}", datefmt="%Y-%m-%d %H:%M:%S", style="{"
|
||||
)
|
||||
|
||||
stdout_handler = logging.StreamHandler(sys.stdout)
|
||||
stdout_handler.setFormatter(formatter)
|
||||
base_logger.addHandler(stdout_handler)
|
||||
dpy_logger.addHandler(stdout_handler)
|
||||
|
||||
if not location.exists():
|
||||
location.mkdir(parents=True, exist_ok=True)
|
||||
# Rotate latest logs to previous logs
|
||||
previous_logs: List[pathlib.Path] = []
|
||||
latest_logs: List[Tuple[pathlib.Path, str]] = []
|
||||
for path in location.iterdir():
|
||||
match = re.match(r"latest(?P<part>-part\d+)?\.log", path.name)
|
||||
if match:
|
||||
part = match.groupdict(default="")["part"]
|
||||
latest_logs.append((path, part))
|
||||
match = re.match(r"previous(?:-part\d+)?.log", path.name)
|
||||
if match:
|
||||
previous_logs.append(path)
|
||||
# Delete all previous.log files
|
||||
for path in previous_logs:
|
||||
path.unlink()
|
||||
# Rename latest.log files to previous.log
|
||||
for path, part in latest_logs:
|
||||
path.replace(location / f"previous{part}.log")
|
||||
|
||||
latest_fhandler = RotatingFileHandler(
|
||||
stem="latest",
|
||||
directory=location,
|
||||
maxBytes=1_000_000, # About 1MB per logfile
|
||||
backupCount=MAX_OLD_LOGS,
|
||||
encoding="utf-8",
|
||||
)
|
||||
all_fhandler = RotatingFileHandler(
|
||||
stem="red",
|
||||
directory=location,
|
||||
maxBytes=1_000_000,
|
||||
backupCount=MAX_OLD_LOGS,
|
||||
encoding="utf-8",
|
||||
)
|
||||
for fhandler in (latest_fhandler, all_fhandler):
|
||||
fhandler.setFormatter(formatter)
|
||||
base_logger.addHandler(fhandler)
|
||||
Loading…
x
Reference in New Issue
Block a user