Python logger dynamic filename - python

I want to configure my Python logger in such a way so that each instance of logger should log in a file having the same name as the name of the logger itself.
e.g.:
log_hm = logging.getLogger('healthmonitor')
log_hm.info("Testing Log") # Should log to /some/path/healthmonitor.log
log_sc = logging.getLogger('scripts')
log_sc.debug("Testing Scripts") # Should log to /some/path/scripts.log
log_cr = logging.getLogger('cron')
log_cr.info("Testing cron") # Should log to /some/path/cron.log
I want to keep it generic and dont want to hardcode all kind of logger names I can have. Is that possible?

How about simply wrap the handler code in a function:
import os
def myLogger(name):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(os.path.join('/some/path/', name + '.log'), 'w')
logger.addHandler(handler)
return logger
log_hm = myLogger('healthmonitor')
log_hm.info("Testing Log") # Should log to /some/path/healthmonitor.log
To prevent creating duplicate handlers, care needs to be taken to ensure that myLogger(name) is only called once per name. Usually that means putting myLogger(name) inside
if __name__ == '__main__':
log_hm = myLogger('healthmonitor')
of the main script.

import os
import logging
class MyFileHandler(object):
def __init__(self, dir, logger, handlerFactory, **kw):
kw['filename'] = os.path.join(dir, logger.name)
self._handler = handlerFactory(**kw)
def __getattr__(self, n):
if hasattr(self._handler, n):
return getattr(self._handler, n)
raise AttributeError, n
logger = logging.getLogger('test')
logger.setLevel(logging.INFO)
handler = MyFileHandler(os.curdir, logger, logging.FileHandler)
logger.addHandler(handler)
logger.info('hello mylogger')

The approach used in the above solution is correct, but that has issue of adding duplicate handlers when called more than once. Here is the improved version.
import os
def getLogger(name):
# logger.getLogger returns the cached logger when called multiple times
# logger.Logger created a new one every time and that avoids adding
# duplicate handlers
logger = logging.Logger(name)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(os.path.join('/some/path/', name + '.log'), 'a')
logger.addHandler(handler)
return logger
def test(i):
log_hm = getLogger('healthmonitor')
log_hm.info("Testing Log %s", i) # Should log to /some/path/healthmonitor.log
test(1)
test(2)

I'm trying to implement this solution with both dynamic path and file name but nothing is written in the file.
class PaymentViewSet(viewsets.ModelViewSet):
serializer_class = PaymentSerializer
queryset = Payment.objects.all()
permission_classes = [IsAuthenticated]
def paymentLog(self, paymentSerializer):
# file : logs/${terminalName}/${%Y-%m}-payments.log
terminalName = TerminalSerializer(Terminal.objects.get(pk=paymentSerializer.data.get("terminal"))).data.get("name")
filePath = os.path.join(settings.LOG_PATH, terminalName)
if not os.path.exists(filePath):
os.makedirs(filePath)
fileName = filePath + "/" + datetime.now().strftime("%Y-%m") +'-payments.log'
handler = logging.FileHandler(fileName)
handler.setFormatter('%(asctime)s [PAYMENT]- %(message)s')
logger = logging.Logger("payment")
logger.setLevel(logging.INFO)
logger.addHandler(handler)
# logger.propagate = False
logging.info(paymentSerializer.data)
# printout()
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
# log here
self.paymentLog(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
The path and file are created like intended but the log never writes.

Related

Add a new colored level logging

So this code outputs level names with colors (default levels), and I'd like to add extra levels of my own, and then give them a custom color.
Code:
import logging
import re
import time
import sys
def set_colour(level):
"""
Sets colour of text for the level name in
logging statements using a dispatcher.
"""
escaped = "[\033[1;%sm%s\033[1;0m]"
return {
'INFO': lambda: logging.addLevelName(logging.INFO, escaped % ('94', level)),
'WARNING': lambda: logging.addLevelName(logging.ERROR, escaped % ('93', level)),
'ERROR': lambda: logging.addLevelName(logging.WARNING, escaped % ('91', level))
}.get(level, lambda: None)()
class NoColorFormatter(logging.Formatter):
"""
Log formatter that strips terminal colour
escape codes from the log message.
"""
# Regex for ANSI colour codes
ANSI_RE = re.compile(r"\x1b\[[0-9;]*m")
def format(self, record):
"""Return logger message with terminal escapes removed."""
return "%s %s %s" % (
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
re.sub(self.ANSI_RE, "", record.levelname),
record.msg,
)
# Create logger
logger = logging.getLogger(__package__)
# Create formatters
logformatter = NoColorFormatter()
colorformatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
# Set logging colours
for level in 'INFO', 'ERROR', 'WARNING':
set_colour(level)
# Set logging level
logger.setLevel(logging.INFO)
# Set log handlers
loghandler = logging.FileHandler("log.txt", mode="a", encoding="utf8")
streamhandler = logging.StreamHandler(sys.stdout)
# Set log formatters
loghandler.setFormatter(logformatter)
streamhandler.setFormatter(colorformatter)
# Attach log handlers to logger
logger.addHandler(loghandler)
logger.addHandler(streamhandler)
# Example logging statements
logging.info("This is just an information for you")
logging.warning("This is just a warning for you")
logging.error("This is just an error for you")
What I want to accomplish is a new level, but with its unique color.
Here's the code on how I accomplish adding a new level:
def success(msg, *args, **kwargs):
if logging.getLogger().isEnabledFor(70):
logging.log(70, msg)
logging.addLevelName(70, "SUCCESS")
logging.success = success
logging.Logger.success = success
The above code works fine normally but does not include any color. How can I add this code to have a new level, but with a different color?
This requires the addition of a handful of lines, including this block (with the addition of another line to set the integer value for logging.SUCCESS:
def success(msg, *args, **kwargs):
if logging.getLogger().isEnabledFor(70):
logging.log(70, msg)
logging.addLevelName(70, "SUCCESS")
logging.SUCCESS = 70 # similar to logging.INFO -> 20
logging.success = success
logging.Logger.success = success
I've indicated the lines that have been added/modified here. To add further additional levels, defining the same structures for the new ones, and modifying the for loop and set_colour() functions should be enough.
import logging
import re
import time
import sys
def set_colour(level):
"""
Sets colour of text for the level name in
logging statements using a dispatcher.
"""
escaped = "[\033[1;%sm%s\033[1;0m]"
return {
'INFO': lambda: logging.addLevelName(logging.INFO, escaped % ('94', level)),
'WARNING': lambda: logging.addLevelName(logging.ERROR, escaped % ('93', level)),
'ERROR': lambda: logging.addLevelName(logging.WARNING, escaped % ('91', level)),
'SUCCESS': lambda: logging.addLevelName(logging.SUCCESS, escaped % ('31', level)) # new
}.get(level, lambda: None)()
class NoColorFormatter(logging.Formatter):
"""
Log formatter that strips terminal colour
escape codes from the log message.
"""
# Regex for ANSI colour codes
ANSI_RE = re.compile(r"\x1b\[[0-9;]*m")
def format(self, record):
"""Return logger message with terminal escapes removed."""
return "%s %s %s" % (
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
re.sub(self.ANSI_RE, "", record.levelname),
record.msg,
)
def success(msg, *args, **kwargs): # new
if logging.getLogger().isEnabledFor(70): # new
logging.log(70, msg) # new
# Create logger
logger = logging.getLogger(__package__)
# Create formatters
logformatter = NoColorFormatter()
colorformatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
# Create new level
logging.SUCCESS = 70 # new
logging.success = success # new
logging.Logger.success = success # new
# Set logging colours
for level in 'INFO', 'ERROR', 'WARNING', 'SUCCESS': # modified
set_colour(level)
# Set logging level
logger.setLevel(logging.INFO)
# Set log handlers
loghandler = logging.FileHandler("log.txt", mode="w", encoding="utf8")
streamhandler = logging.StreamHandler(sys.stdout)
# Set log formatters
loghandler.setFormatter(logformatter)
streamhandler.setFormatter(colorformatter)
# Attach log handlers to logger
logger.addHandler(loghandler)
logger.addHandler(streamhandler)
# Example logging statements
logging.info("This is just an information for you")
logging.warning("This is just an information for you")
logging.error("This is just an information for you")
logging.success("This is just an information for you")

Python Logging - Closing one file across multiple logs (ResourceWarning)

I created a custom logging.Logger that is used by several different objects in a script I'm running like so:
class TestLogger(logging.Logger):
def __init__(self, name, file=None):
super(TestLogger, self).__init__(name, level=logging.DEBUG)
self.log_file = file
...
def addLogFile(self, log_file):
self.log_file = log_file
self.setFormat()
# set the format of the log
def setFormat(self, default=True, end='\n'):
# remove any Handlers
self.removeStreamHandlers()
self.removeFileHandlers()
# get the log format string, default or message
format_str = DEFAULT_FORMAT if default else CUSTOM_FORMAT
std_formatter = logging.Formatter(format_str, datefmt=self.DATE_FORMAT)
# add the stream handler
console = logging.StreamHandler(sys.stdout)
console.setFormatter(std_formatter)
console.terminator = end
self.addHandler(console)
# add the file handler
if self.log_file:
file_formatter = logging.Formatter(format_str, datefmt=self.DATE_FORMAT)
logger = logging.FileHandler(self.log_file)
logger.setFormatter(file_formatter)
self.addHandler(logger)
# remove all stream handlers
def removeStreamHandlers(self):
stream_handlers = [h for h in self.handlers if isinstance(h, logging.StreamHandler)
and not isinstance(h, logging.FileHandler)]
for sh in stream_handlers:
self.removeHandler(sh)
# remove all file handlers
def removeFileHandlers(self):
file_handlers = [h for h in self.handlers if isinstance(h, logging.StreamHandler)
and isinstance(h, logging.FileHandler)]
for fh in file_handlers:
self.removeHandler(fh)
class Something:
def __init__(self):
self.log = TestLogger('Something')
...
def __del__(self):
self.log.removeFileHandlers()
self.log.removeStreamHandler()
class SomethingElse:
def __init__(self):
self.log = TestLogger('SomethingElse')
...
def __del__(self):
self.log.removeFileHandlers()
self.log.removeStreamHandler()
All of these objects are initialized and designed to share the same log file like so:
log_file = 'test.log'
s = Something()
se = SomethingElse()
s.addLogFile(log_file)
se.addLogFile(log_file)
...
del s, se
The problem seems to be that when I try to rerun my program, it throws a ResourceWarning every time I run setFormat(). It seems like the file isn't properly being closed and I'm not sure where this could be happening.
First of all, you should reuse the handlers. If you want to change the formatter just call setFormatter on the existing handlers and keep them.
If you really do want to throw away the handler and use a new one, there is a close() method on the FileHandler that is supposed to be called to clean up when the handler is done logging. So in your case you would change your code to look like this:
for fh in file_handlers:
fh.close()
self.removeHandler(fh)

Rotating file handler for JSON logs in Python

I am working on saving the json logs using python. Below is the code:
log_file = 'app_log.json'
log_json = dict()
log_json["Data"] = {}
log_json['Data']['Key1'] = "value1"
log_json['Data']['alert'] = False
log_json['Data']['Key2'] = "N/A"
log_json['Created'] = datetime.datetime.utcnow().isoformat()
with open(log_file, "a") as f:
json.dump(log_json, f)#, ensure_ascii=False)
f.write("\n")
Now above code is generating the log file. But I have noticed that the file size is increasing a lot and in future, I might face disk space issue. I was wondering if there is any pre built rotating file handler available for json in which we can mention fixed size lets say 100mb and upon reaching this size it will delete and recreate the new file.
I have previously used from logging.handlers import RotatingFileHandler to do this in case of .log files but also want to do this for .json files. Please help. Thanks
Python does not care about the log file name.
You can use the rotating handler which you used for .log file for .json file also.
See sample example below
# logging_example.py
import logging
import logging.handlers
import os
import time
logfile = os.path.join("/tmp", "demo_logging.json")
logger = logging.getLogger(__name__)
fh = logging.handlers.RotatingFileHandler(logfile, mode='a', maxBytes=1000, backupCount=5) # noqa:E501
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.setLevel(logging.DEBUG)
while 1:
time.sleep(1)
logger.info("Long string to increase the file size")
You can also look at logrotate if you are working in Unix environment. It is a great and simple tool with good documentation to do just exactly what you need.
You can implement structured logging with RotatingFileHandler
import json
import logging
import logging.handlers
from datetime import datetime
class StructuredMessage:
def __init__(self, message, /, **kwargs):
self.message = message
self.kwargs = kwargs
def __str__(self):
return '%s >>> %s' % (self.message, json.dumps(self.kwargs))
_ = StructuredMessage # optional, to improve readability
log_json = {}
log_json["Data"] = {}
log_json['Data']['Key1'] = "value1"
log_json['Data']['alert'] = False
log_json['Data']['Key2'] = "N/A"
log_json['Created'] = datetime.utcnow().isoformat()
LOG_FILENAME = 'logging_rotatingfile_example.out'
# Set up a specific logger with our desired output level
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=20, backupCount=5)
bf = logging.Formatter('%(message)s')
handler.setFormatter(bf)
logger.addHandler(handler)
logger.info(_('INFO', **log_json))
Note: check here for more info about structured-logging-python
you can use also use json-logging-python with RotatingFileHandler
import logging
import json
import traceback
from datetime import datetime
import copy
import json_logging
import sys
json_logging.ENABLE_JSON_LOGGING = True
def extra(**kw):
'''Add the required nested props layer'''
return {'extra': {'props': kw}}
class CustomJSONLog(logging.Formatter):
"""
Customized logger
"""
def get_exc_fields(self, record):
if record.exc_info:
exc_info = self.format_exception(record.exc_info)
else:
exc_info = record.exc_text
return {'python.exc_info': exc_info}
#classmethod
def format_exception(cls, exc_info):
return ''.join(traceback.format_exception(*exc_info)) if exc_info else ''
def format(self, record):
json_log_object = {"#timestamp": datetime.utcnow().isoformat(),
"level": record.levelname,
"message": record.getMessage(),
"caller": record.filename + '::' + record.funcName
}
json_log_object['data'] = {
"python.logger_name": record.name,
"python.module": record.module,
"python.funcName": record.funcName,
"python.filename": record.filename,
"python.lineno": record.lineno,
"python.thread": record.threadName,
"python.pid": record.process
}
if hasattr(record, 'props'):
json_log_object['data'].update(record.props)
if record.exc_info or record.exc_text:
json_log_object['data'].update(self.get_exc_fields(record))
return json.dumps(json_log_object)
json_logging.init_non_web(custom_formatter=CustomJSONLog, enable_json=True)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
LOG_FILENAME = 'logging_rotating_json_example.out'
handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=20, backupCount=5)
logger.addHandler(handler)
log_json = {}
log_json["Data"] = {}
log_json['Data']['Key1'] = "value1"
log_json['Data']['alert'] = False
log_json['Data']['Key2'] = "N/A"
logger.info('Starting')
logger.debug('Working', extra={"props":log_json})
Note: check here for more info about json-logging-python
You can try this just before you write / append to the file. This should check to see if the file has reached the max lines size, then it will remove one line of code from the beginning of the file before you append as usual to the end of the file.
filename = 'file.txt'
maxLines = 100
count = len(open(filename).readlines())
if(count > maxLines) {
with open(filename, 'r') as fin:
data = fin.read().splitlines(True)
with open(filename, 'w') as fout:
fout.writelines(data[1:])
}

Dynamic class object logging in python

I'm using logging conf file. Trying to get different file for each user. It works fine, creates the files with the names of the users I'm passing in. But then when I'm trying to create log entrance for each user by:
'user1.logging.info(...)', and 'user2.logging.info(...)', both entrances goes into the log file of user2. Why?
logging.conf file:
[loggers]
keys = root, user
[handlers]
keys = consoleHandler, userFileHandler
[formatters]
keys = Formatter
[logger_root]
level = DEBUG
handlers = consoleHandler
[logger_user]
level = DEBUG
handlers = consoleHandler, userFileHandler
qualname = user
propagate = 0
[handler_consoleHandler]
class = StreamHandler
level = DEBUG
formatter = Formatter
args = (sys.stdout,)
[handler_userFileHandler]
class = FileHandler
level = DEBUG
formatter = Formatter
args = ('%(logfilename)s', 'a+')
[formatter_Formatter]
format = %(asctime)s [%(funcName)s] [%(levelname)s] %(message)s
datefmt = %Y-%m-%d %H:%M:%S
class:
class User:
"""" Users instantiation class """
def __init__(self, email, password):
self.email = email
self.password = password
self.logger = self.Logger()
def __repr__(self):
return '{}'.format(self.email)
def Logger(self):
""" Users loggers instantiation """
logging.config.fileConfig('log/logging.conf', defaults={'logfilename': 'log/{}.log'.format(self.email)})
logger = logging.getLogger('user')
logger.info('info log for: {}'.format(self.email)) # That entrance works fine
return logger

How should I pass a custom logger from a file to multiple modules and while maintaining sub-module granularity?

I have a logging class that describes a base logger.
class logger:
def __init__(self):
self.filelocation = 'log/util.log'
self.loggers = {}
def init_logger(self,name="util",):
if self.loggers.get(name):
return self.loggers.get(name)
else:
module_logger = logging.getLogger(name)
module_logger.setLevel(logging.DEBUG)
module_logger.propagate = False
# create file handler which logs even debug messages
fh = logging.FileHandler(self.filelocation)
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.WARNING)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(module)s : %(levelname)s : %(asctime)s : %(name)s : %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
module_logger.addHandler(fh)
module_logger.addHandler(ch)
self.loggers[name] = module_logger
return module_logger
Now I have multiple modules referencing the above said logger. For ex
# mod1.py
logsession = logger().init_logger(name = "athena_s3")
dictsession = logger().init_logger(name = "dictinfo")
class mod1_class():
def __init__(self):
self.var1 = etc
def build_session(self):
"""
Build a session using the athena client
Input: None
Return: Active session
"""
if not self._session:
try:
self._session = Session(aws_access_key_id = self._aws_access_key_id,aws_secret_access_key = self._aws_secret_access_key)
logsession.info("Built new athena session")
Similarly I have another module that could reference the code from the above mod1.py. Now consider a test.py file that imports this mod1.py.
from mod1 import mod1_class
session = mod1_class().build_session()
### Do STUFF
How can I pass the logger from multiple test.py to mod1.py such that it maintains the same logger ?
So for example:
logs could be
test : INFO : time : athena_s3 : message
test : INFO : time : athena_s3 : athena_util : message
test2: INFO : time : athena_s2 : message

Categories

Resources