python logging string formatting - python

I am using python's log formatter to format log records and i have a fmt value of
fmt = "[%(filename)s:%(lineno)s] %(message)s"
What i would like is that "[file.py:20]" to be stretched to 10 characters wide (for example). If it was one value that would have been easy but is there any way to stretch this entire structure to a specified length?
I want something like:
tmp = "[%(filename)s:%(lineno)s]"
fmt = "%(tmp)10s %(message)s"
I would like to know if this is possible using string formatting or if I can trick python's formatter somehow to get what i want..

As an example, this Formatter ensures a fixed width "[%(filename)s:%(lineno)s]" by either truncating the filename, or right-padding (after the line number) with spaces.
class MyFormatter(logging.Formatter):
width = 10
def format(self, record):
max_filename_width = self.width - 3 - len(str(record.lineno))
filename = record.filename
if len(record.filename) > max_filename_width:
filename = record.filename[:max_filename_width]
a = "%s:%s" % (filename, record.lineno)
return "[%s] %s" % (a.ljust(self.width), record.msg)
if __name__ == '__main__':
logger = logging.getLogger('simple_example')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = MyFormatter()
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.debug('No one expects the spammish repetition')
EDIT:
If you want to ensure a minimum width of 10 characters, ditch the filename stuff.
def format(self, record):
a = "%s:%s" % (record.filename, record.lineno)
return "[%s] %s" % (a.ljust(self.width), record.msg)

Option 1
Start here: http://docs.python.org/library/logging.html#formatter-objects
You'll create your own customized subclass of Formatter that provides it's own unique format method.
Then you must be sure to call setFormatter() in each of your Handlers so that they use your new formatter.
Option 2
Create your own subclass of LogRecord with the additional property.
Subclass Logger and override makeRecord to create your new subclass of LogRecord.
Provide a customized format that uses this new property value.

Using #rob-cowie's answer as a basis, I've found the following useful:
class MyFormatter(logging.Formatter):
width = 24
datefmt='%Y-%m-%d %H:%M:%S'
def format(self, record):
cpath = '%s:%s:%s' % (record.module, record.funcName, record.lineno)
cpath = cpath[-self.width:].ljust(self.width)
record.message = record.getMessage()
s = "%-7s %s %s : %s" % (record.levelname, self.formatTime(record, self.datefmt), cpath, record.getMessage())
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
#if record.stack_info:
# if s[-1:] != "\n":
# s = s + "\n"
# s = s + self.formatStack(record.stack_info)
return s
logFormatter = MyFormatter()
logger = logging.getLogger("example")
logger.setFormatter(logFormatter)
Which gives output like:
WARNING 2014-03-28 16:05:09 module:function:31 : Message
WARNING 2014-03-28 16:05:09 dule:longerfunctions:140 : Message

Related

Add a new colored level logging

So this code outputs level names with colors (default levels), and I'd like to add extra levels of my own, and then give them a custom color.
Code:
import logging
import re
import time
import sys
def set_colour(level):
"""
Sets colour of text for the level name in
logging statements using a dispatcher.
"""
escaped = "[\033[1;%sm%s\033[1;0m]"
return {
'INFO': lambda: logging.addLevelName(logging.INFO, escaped % ('94', level)),
'WARNING': lambda: logging.addLevelName(logging.ERROR, escaped % ('93', level)),
'ERROR': lambda: logging.addLevelName(logging.WARNING, escaped % ('91', level))
}.get(level, lambda: None)()
class NoColorFormatter(logging.Formatter):
"""
Log formatter that strips terminal colour
escape codes from the log message.
"""
# Regex for ANSI colour codes
ANSI_RE = re.compile(r"\x1b\[[0-9;]*m")
def format(self, record):
"""Return logger message with terminal escapes removed."""
return "%s %s %s" % (
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
re.sub(self.ANSI_RE, "", record.levelname),
record.msg,
)
# Create logger
logger = logging.getLogger(__package__)
# Create formatters
logformatter = NoColorFormatter()
colorformatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
# Set logging colours
for level in 'INFO', 'ERROR', 'WARNING':
set_colour(level)
# Set logging level
logger.setLevel(logging.INFO)
# Set log handlers
loghandler = logging.FileHandler("log.txt", mode="a", encoding="utf8")
streamhandler = logging.StreamHandler(sys.stdout)
# Set log formatters
loghandler.setFormatter(logformatter)
streamhandler.setFormatter(colorformatter)
# Attach log handlers to logger
logger.addHandler(loghandler)
logger.addHandler(streamhandler)
# Example logging statements
logging.info("This is just an information for you")
logging.warning("This is just a warning for you")
logging.error("This is just an error for you")
What I want to accomplish is a new level, but with its unique color.
Here's the code on how I accomplish adding a new level:
def success(msg, *args, **kwargs):
if logging.getLogger().isEnabledFor(70):
logging.log(70, msg)
logging.addLevelName(70, "SUCCESS")
logging.success = success
logging.Logger.success = success
The above code works fine normally but does not include any color. How can I add this code to have a new level, but with a different color?
This requires the addition of a handful of lines, including this block (with the addition of another line to set the integer value for logging.SUCCESS:
def success(msg, *args, **kwargs):
if logging.getLogger().isEnabledFor(70):
logging.log(70, msg)
logging.addLevelName(70, "SUCCESS")
logging.SUCCESS = 70 # similar to logging.INFO -> 20
logging.success = success
logging.Logger.success = success
I've indicated the lines that have been added/modified here. To add further additional levels, defining the same structures for the new ones, and modifying the for loop and set_colour() functions should be enough.
import logging
import re
import time
import sys
def set_colour(level):
"""
Sets colour of text for the level name in
logging statements using a dispatcher.
"""
escaped = "[\033[1;%sm%s\033[1;0m]"
return {
'INFO': lambda: logging.addLevelName(logging.INFO, escaped % ('94', level)),
'WARNING': lambda: logging.addLevelName(logging.ERROR, escaped % ('93', level)),
'ERROR': lambda: logging.addLevelName(logging.WARNING, escaped % ('91', level)),
'SUCCESS': lambda: logging.addLevelName(logging.SUCCESS, escaped % ('31', level)) # new
}.get(level, lambda: None)()
class NoColorFormatter(logging.Formatter):
"""
Log formatter that strips terminal colour
escape codes from the log message.
"""
# Regex for ANSI colour codes
ANSI_RE = re.compile(r"\x1b\[[0-9;]*m")
def format(self, record):
"""Return logger message with terminal escapes removed."""
return "%s %s %s" % (
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
re.sub(self.ANSI_RE, "", record.levelname),
record.msg,
)
def success(msg, *args, **kwargs): # new
if logging.getLogger().isEnabledFor(70): # new
logging.log(70, msg) # new
# Create logger
logger = logging.getLogger(__package__)
# Create formatters
logformatter = NoColorFormatter()
colorformatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
# Create new level
logging.SUCCESS = 70 # new
logging.success = success # new
logging.Logger.success = success # new
# Set logging colours
for level in 'INFO', 'ERROR', 'WARNING', 'SUCCESS': # modified
set_colour(level)
# Set logging level
logger.setLevel(logging.INFO)
# Set log handlers
loghandler = logging.FileHandler("log.txt", mode="w", encoding="utf8")
streamhandler = logging.StreamHandler(sys.stdout)
# Set log formatters
loghandler.setFormatter(logformatter)
streamhandler.setFormatter(colorformatter)
# Attach log handlers to logger
logger.addHandler(loghandler)
logger.addHandler(streamhandler)
# Example logging statements
logging.info("This is just an information for you")
logging.warning("This is just an information for you")
logging.error("This is just an information for you")
logging.success("This is just an information for you")

python logger limit string size

Python logger library -
Hey, is there a way to limit the string size and return another string if it exceeds the max limit?
I have a log file max size set, but it doesn't fit the needs, since the logger sometimes receives a base64 for log, which makes all the logging in the terminal useless.
example:
export = '<Base64 Long String>'
logger.info(f'result: {export}')
Since the code is a part of a big project, I cannot change it in the function itself, is there a way to set it on the logger level?
Use a custom logging.Formatter
import logging
class NotTooLongStringFormatter(logging.Formatter):
def __init__(self, max_length=10):
super(NotTooLongStringFormatter, self).__init__()
self.max_length = max_length
def format(self, record):
if len(record.msg) > self.max_length:
record.msg = record.msg[:self.max_length] + "..."
return super().format(record)
LOG = logging.getLogger("mylogger")
h = logging.StreamHandler()
h.setFormatter(NotTooLongStringFormatter(20))
LOG.addHandler(h)
LOG.error("a" * 10) # aaaaaaaaaa
LOG.info("a" * 100) # aaaaaaaaaaaaaaaaaaaa...
To keep a detailled log, with a specific format, just pass it to super.__init__
def __init__(self, max_length=10):
fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
super(NotTooLongStringFormatter, self).__init__(fmt)
self.max_length = max_length
2022-01-09 12:29:44,862 - mylogger - WARNING - aaaaaaaaaa
2022-01-09 12:29:44,863 - mylogger - WARNING - aaaaaaaaaaaaaaaaaaaa...

Rotating file handler for JSON logs in Python

I am working on saving the json logs using python. Below is the code:
log_file = 'app_log.json'
log_json = dict()
log_json["Data"] = {}
log_json['Data']['Key1'] = "value1"
log_json['Data']['alert'] = False
log_json['Data']['Key2'] = "N/A"
log_json['Created'] = datetime.datetime.utcnow().isoformat()
with open(log_file, "a") as f:
json.dump(log_json, f)#, ensure_ascii=False)
f.write("\n")
Now above code is generating the log file. But I have noticed that the file size is increasing a lot and in future, I might face disk space issue. I was wondering if there is any pre built rotating file handler available for json in which we can mention fixed size lets say 100mb and upon reaching this size it will delete and recreate the new file.
I have previously used from logging.handlers import RotatingFileHandler to do this in case of .log files but also want to do this for .json files. Please help. Thanks
Python does not care about the log file name.
You can use the rotating handler which you used for .log file for .json file also.
See sample example below
# logging_example.py
import logging
import logging.handlers
import os
import time
logfile = os.path.join("/tmp", "demo_logging.json")
logger = logging.getLogger(__name__)
fh = logging.handlers.RotatingFileHandler(logfile, mode='a', maxBytes=1000, backupCount=5) # noqa:E501
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.setLevel(logging.DEBUG)
while 1:
time.sleep(1)
logger.info("Long string to increase the file size")
You can also look at logrotate if you are working in Unix environment. It is a great and simple tool with good documentation to do just exactly what you need.
You can implement structured logging with RotatingFileHandler
import json
import logging
import logging.handlers
from datetime import datetime
class StructuredMessage:
def __init__(self, message, /, **kwargs):
self.message = message
self.kwargs = kwargs
def __str__(self):
return '%s >>> %s' % (self.message, json.dumps(self.kwargs))
_ = StructuredMessage # optional, to improve readability
log_json = {}
log_json["Data"] = {}
log_json['Data']['Key1'] = "value1"
log_json['Data']['alert'] = False
log_json['Data']['Key2'] = "N/A"
log_json['Created'] = datetime.utcnow().isoformat()
LOG_FILENAME = 'logging_rotatingfile_example.out'
# Set up a specific logger with our desired output level
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=20, backupCount=5)
bf = logging.Formatter('%(message)s')
handler.setFormatter(bf)
logger.addHandler(handler)
logger.info(_('INFO', **log_json))
Note: check here for more info about structured-logging-python
you can use also use json-logging-python with RotatingFileHandler
import logging
import json
import traceback
from datetime import datetime
import copy
import json_logging
import sys
json_logging.ENABLE_JSON_LOGGING = True
def extra(**kw):
'''Add the required nested props layer'''
return {'extra': {'props': kw}}
class CustomJSONLog(logging.Formatter):
"""
Customized logger
"""
def get_exc_fields(self, record):
if record.exc_info:
exc_info = self.format_exception(record.exc_info)
else:
exc_info = record.exc_text
return {'python.exc_info': exc_info}
#classmethod
def format_exception(cls, exc_info):
return ''.join(traceback.format_exception(*exc_info)) if exc_info else ''
def format(self, record):
json_log_object = {"#timestamp": datetime.utcnow().isoformat(),
"level": record.levelname,
"message": record.getMessage(),
"caller": record.filename + '::' + record.funcName
}
json_log_object['data'] = {
"python.logger_name": record.name,
"python.module": record.module,
"python.funcName": record.funcName,
"python.filename": record.filename,
"python.lineno": record.lineno,
"python.thread": record.threadName,
"python.pid": record.process
}
if hasattr(record, 'props'):
json_log_object['data'].update(record.props)
if record.exc_info or record.exc_text:
json_log_object['data'].update(self.get_exc_fields(record))
return json.dumps(json_log_object)
json_logging.init_non_web(custom_formatter=CustomJSONLog, enable_json=True)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
LOG_FILENAME = 'logging_rotating_json_example.out'
handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=20, backupCount=5)
logger.addHandler(handler)
log_json = {}
log_json["Data"] = {}
log_json['Data']['Key1'] = "value1"
log_json['Data']['alert'] = False
log_json['Data']['Key2'] = "N/A"
logger.info('Starting')
logger.debug('Working', extra={"props":log_json})
Note: check here for more info about json-logging-python
You can try this just before you write / append to the file. This should check to see if the file has reached the max lines size, then it will remove one line of code from the beginning of the file before you append as usual to the end of the file.
filename = 'file.txt'
maxLines = 100
count = len(open(filename).readlines())
if(count > maxLines) {
with open(filename, 'r') as fin:
data = fin.read().splitlines(True)
with open(filename, 'w') as fout:
fout.writelines(data[1:])
}

The easiest way of logging

I would like to collect info with the help of logging.
The idea is simple. I have hash_value of some data, which I want to write to log. So, I set up my logging this way:
import logging
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s :%(message)s', level=logging.INFO)
As you can see, now timing and some message will automatically write to log file, for example I can use it like this:
logger.info('Initial data: {}'.format(data))
But what if I want to write hash_value of my data automatically? Like it is happening with time now.
I looked through documentation and find nothing useful. There is no attribute for variable in module logging.
So I am forced to do it awry. Like this:
hash_value = hash(data)
logger.info('Initial data: {} {}'.format(hash_value, data))
I would expect from this code:
logging.basicConfig(format='%(asctime)s: %(variable)s :%(message)s', level=logging.INFO)
and
logger.info('Initial data: {}'.format(hash_value, data))
to do the job. But it does not work (and it should not basically) and I did not find the solution in documentation.
So, how to avoid this awry code:
logger.info('Initial data: {} {}'.format(hash_value, data))
which I am having now?
import logging
import sys
MY_PARAMS = ("variable1", "var2", )
class ExtraFilter(logging.Filter):
def filter(self, record):
# this one used for second, simplier handler
# to avoid duplicate of logging entries if "extra" keyword was passed.
# Check all of your custom params:
# if all of them are present - record should be filtered
# * all because if any of them is missing - there would be silent exception and record wont be logged at all
# bellow is just an example how to check.
# You can use something like this:
# if all(hasattr(record, param) for param in MY_PARAMS): return False
if hasattr(record, "variable1"):
return False
return True
# init logging
log = logging.getLogger()
# init handlers and formatters
h1 = logging.StreamHandler(sys.stdout)
f1 = logging.Formatter('%(asctime)s: %(variable1)s: %(var2)s: %(message)s')
h2 = logging.StreamHandler(sys.stdout)
f2 = logging.Formatter('%(asctime)s: %(message)s')
h1.setFormatter(f1)
h2.setFormatter(f2)
h2.addFilter(ExtraFilter())
log.addHandler(h1)
log.addHandler(h2)
# example of data:
extra = {"variable1": "test1", "var2": "test2"}
log.setLevel(logging.DEBUG)
log.debug("debug message", extra=extra)
log.info("info message")
The above code will produce following output:
2017-11-04 09:16:36,787: test1: test2: debug message
2017-11-04 09:16:36,787: info message
It is not awry code, you want to add two informations, therefore you must either pass two parameters to format or concatenate the string more "manually"
You could go with
Logging.info("initial data " + hash_value + " " + data)
Or you could change the "data" object so its "str" or the repr method adds the hash by itself (preferably the repr in this case)
Class Data():
....
def __repr__(self):
Return self.hash() + " " self.data
Which in this case will print the hash and the string version of the parameter data( or simply whatever you want to show as string) passing only one parameter in the string format.
Anyway, you could make the formating string prettier with....
Logging.info("Initial data {hash} {data}".format(hash=hash_value, data=data))
By the way, in C++ and Java you would also need to declare two "entries" for those two atributes. In java would be something like this:
LOGGING.info("Initial data {} {}", hash, data);

How to log time spent from when script started instead of actual time when using python logging module?

I am looking for a way to change python logging module to display time spent form when the script started instead of current time.
Use %(relativeCreated)s in your format string, as indicated in the documentation.
Update: You can use normal Python format specifiers to control precision, e.g. %(relativeCreated).0f to show floating point values with zero decimal places.
You can subclass logging.Formatter and reimplement formatTime. Something like that:
start_time = datetime.now()
class MyFormatter(logging.Formatter):
def formatTime(self, record, datefmt=None):
delta = (datetime.now() - start_time).total_seconds()
return "{}".format(delta)
And then:
handler = logging.StreamHandler()
fmt = MyFormatter('%(filename)s %(levelname)-8s [%(asctime)s] %(message)s')
handler.setFormatter(fmt)
log = logging.getLogger('main')
log.addHandler(handler)
log.debug("=)")
You could try something like this.
Define a custom formatter at the beginning of your script:
import time
import logging
import datetime as dt
class MyFormatter(logging.Formatter):
def __init__(self,fmt=None,datefmt=None):
super(MyFormatter,self).__init__(fmt,datefmt)
self.reftime = dt.datetime.fromtimestamp(time.mktime(time.localtime()))
def formatTime(self, record, datefmt=None):
ctime = dt.datetime.fromtimestamp(time.mktime(self.converter(record.created)))
ct = (ctime - self.reftime).timetuple()
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime("%Y-%m-%d %H:%M:%S", ct)
s = "%s,%03d" % (t, record.msecs)
return s
Then setup your logging system:
handler = logging.StreamHandler(MyFormatter())
logger = logging.getLogger()
logger.addHandler(handler)
etc...

Categories

Resources