Python: logging into a file and also do sys.stdout - python

I need to write a function that logs into a file (using logging module) and also prints the same content on the console at the same time.
What I have is :
def printScreenAndLog(msg):
log = logging.getLogger()
log.info(msg)
now = str(datetime.datetime.now())
print now,"%s" % msg
def main():
options, args = usage()
log = logging.getLogger("CMDR")
log.setLevel(logging.DEBUG)
fh = logging.FileHandler('cmdr.log')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
log.addHandler(fh)
printScreenAndLog("Testing")
if __name__ == "__main__":
main()

This function should do what you require:
def configure_logging():
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# log to stdout
logging.basicConfig(level=logging.DEBUG, format=log_format)
# also log to file
formatter = logging.Formatter(log_format)
handler = logging.FileHandler("cmdr.log")
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logging.getLogger('').addHandler(handler)

did you try to set the logging level to info instead of debug?
or use log.debug(msg) in your printScreenAndLog function?

Related

Avoid showing logger entries in console

I have one "basic" logging configuration that logs to stdout in my project.
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
SERVER_FORMAT = "[%(asctime)s] %(levelname)s [%(name)s:%(filename)s:%(lineno)d] %(message)s"
DATETIME_FORMAT = "%d/%b/%Y %H:%M:%S"
logging.basicConfig(
level=logging.INFO,
format=SERVER_FORMAT,
datefmt=DATETIME_FORMAT,
handlers=[stream_handler])
I have also another logger user_logger that should not print anything. Instead, it should store log entries in a variable.
user_logger = logging.getLogger('user_logger')
user_logger.setLevel(logging.INFO)
log_capture_string = io.StringIO()
log_capture_string_handler = logging.StreamHandler(log_capture_string)
log_capture_string_handler.setLevel(logging.INFO)
log_capture_string_handler.setFormatter(logging.Formatter(USER_FORMAT))
user_logger.handlers = [log_capture_string_handler]
The problem is that when I call:
user_logger.info('This should only be in "log_capture_string"')
it prints it to the console.
Do you know how to avoid that?
You should set user_logger.propagate = False. logging.propagate docs
If this evaluates to false, logging messages are not passed to the handlers of ancestor loggers.
So, your root logger will not send any data to stderr.
This example outputs nothing
import io
import logging
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
SERVER_FORMAT = "[%(asctime)s] %(levelname)s [%(name)s:%(filename)s:%(lineno)d] %(message)s"
DATETIME_FORMAT = "%d/%b/%Y %H:%M:%S"
logging.basicConfig(
level=logging.INFO,
format=SERVER_FORMAT,
datefmt=DATETIME_FORMAT,
handlers=[stream_handler])
user_logger = logging.getLogger('user_logger')
user_logger.propagate = False
user_logger.setLevel(logging.INFO)
log_capture_string = io.StringIO()
log_capture_string_handler = logging.StreamHandler(log_capture_string)
log_capture_string_handler.setLevel(logging.INFO)
log_capture_string_handler.setFormatter(logging.Formatter(SERVER_FORMAT))
user_logger.handlers = [log_capture_string_handler]
user_logger.info('This should only be in "log_capture_string"')

why logger dont print anything?

import logging
import sys
class A(object):
def __init__(self):
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
ch.setFormatter(formatter)
logger = logging.getLogger("logger_a")
logger.setLevel(logging.DEBUG)
logger.addHandler(ch)
self.logger = logger
def Xprint(self):
self.logger.info("this log a!!")
Xprint()
def Xprint():
logger = logging.getLogger("logger_b")
print logger.info("this log b!!")
a = A()
a.Xprint()
the output:
2019-10-17 19:02:20,574 logger_a INFO: this log a!!
None
why doesn't logger_b print anything?
The default loglevel is WARNING. If you want to make the logger_b to log too, then you need to do something like,
$ cat log.py
import logging
import sys
class A(object):
def __init__(self):
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
ch.setFormatter(formatter)
logger = logging.getLogger("logger_a")
logger.setLevel(logging.DEBUG)
logger.addHandler(ch)
self.logger = logger
self.handler = ch
def Xprint(self):
self.logger.info("this log a!!")
Xprint(self.handler)
def Xprint(handler):
logger = logging.getLogger('logger_b') # no handler is configured yet
logger.setLevel(logging.DEBUG) # set the level
logger.addHandler(handler) # added handler
logger.info("this log b!!")
a = A()
a.Xprint()
Output:
$ python log.py
2019-10-17 17:02:26,418 logger_a INFO: this log a!!
2019-10-17 17:02:26,418 logger_b INFO: this log b!!

Right way to insert a variable in python logging format

Not able to get this one to work. Probably lack of understanding of python logging module.
Use case - print one variable on all log messages. i.e. "jobID". When multiple instances of this utility will run in parallel in same server - syslog or ../log/messages can be parsed live based on this jobID. Here is attempt with LoggerAdapter method (error lines commented) -
def startlog(self, log_folder, testname=None):
if not os.path.exists(log_folder):
os.makedirs(log_folder)
ltime = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
if testname:
logfile = "%s/%s_log_%s.log" % (log_folder, testname, ltime)
else:
logfile = "%s/log_%s.log" % (log_folder, ltime)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formated = logging.Formatter('%(asctime)s - %(levelname)s - %(module)10s - %(funcName)10s - %(message)s')
#What is desired is -
#formated = logging.Formatter('%(asctime)s - %(jobid)s - %(levelname)s - %(module)10s - %(funcName)10s - %(message)s')
if not testname:
fh = logging.FileHandler(filename=logfile)
fh.setFormatter(formated)
logger.addHandler(fh)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formated)
logger.addHandler(ch)
# Next line errors -
# logger = logging.LoggerAdapter(logger, {"jobid": self.jobdata.jobid})
return logger
else:
fh = logging.FileHandler(filename=logfile)
fh.setFormatter(formated)
self.log.addHandler(fh)
# Next line errors -
# logger = logging.LoggerAdapter(self.log, {"jobid": self.jobdata.jobid})
return fh
2nd Try with Filters:
def startlog(self, log_folder, t_name=None):
if not os.path.exists(log_folder):
os.makedirs(log_folder)
ltime = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
if t_name:
logfile = "%s/%s_log_%s.log" % (log_folder, t_name, ltime)
else:
logfile = "%s/log_%s.log" % (log_folder, ltime)
root = logging.getLogger()
root.setLevel(logging.INFO)
root.addFilter(ContextFilter(self.jobdata.jobid))
formated = logging.Formatter('%(asctime)s - %(jobid)s - %(levelname)s - %(module)10s - %(funcName)10s - %(message)s')
if not t_name:
fh = logging.FileHandler(filename=logfile)
fh.setFormatter(formated)
root.addHandler(fh)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formated)
root.addHandler(ch)
return root
else:
fh = logging.FileHandler(filename=logfile)
fh.setFormatter(formated)
self.log.addHandler(fh)
return fh
class ContextFilter(logging.Filter):
"""
This is a filter which injects contextual information into the log.
"""
def __init__(self, jobid):
self.jobid = jobid
def filter(self, record):
record.jobid = self.jobid
return True
Issue faced with filters is 'keyerror' from other modules(paramiko - transport.py). Similar to How to properly add custom logging filters in Python modules
Traceback (most recent call last):
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/logging/__init__.py", line 859, in emit
msg = self.format(record)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/logging/__init__.py", line 732, in format
return fmt.format(record)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/logging/__init__.py", line 474, in format
s = self._fmt % record.__dict__
KeyError: 'jobid'
Logged from file transport.py, line 1567
Dumb me, i need to add filter to the log handler. This is working so far -
def startlog(self, log_folder, t_name=None):
if not os.path.exists(log_folder):
os.makedirs(log_folder)
ltime = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
if t_name:
logfile = "%s/%s_log_%s.log" % (log_folder, t_name, ltime)
else:
logfile = "%s/log_%s.log" % (log_folder, ltime)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formated = logging.Formatter('%(asctime)s - %(jobid)s - %(levelname)s - %(module)10s - %(funcName)10s - %(message)s')
formatted_file = logging.Formatter('%(asctime)s - %(levelname)s - %(module)10s - %(funcName)10s - %(message)s')
if not t_name:
fh = logging.FileHandler(filename=logfile)
fh.setFormatter(formatted_file)
fh.addFilter(ContextFilter(self.jobdata.jobid))
logger.addHandler(fh)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formated)
ch.addFilter(ContextFilter(self.jobdata.jobid))
logger.addHandler(ch)
return logger
else:
fh = logging.FileHandler(filename=logfile)
fh.setFormatter(formatted_file)
fh.addFilter(ContextFilter(self.jobdata.jobid))
self.log.addHandler(fh)
return fh

Refining the logging method in python tornado

I am trying to log the actions in a function and I have written the following function to log responses to different files based on the type of response i.e. info,error,debug and warning.
logging.basicConfig(filename='indivi_service.log',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' )
def setup_logger(logger_name, log_file, level=logging.DEBUG):
l = logging.getLogger(logger_name)
formatter = logging.Formatter()
fileHandler = logging.FileHandler(log_file)
fileHandler.setFormatter(formatter)
handler = TimedRotatingFileHandler(logger_name,
when="d",
interval=1,
backupCount=100)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(handler)
setup_logger('debug', r'debug')
setup_logger('error', r'error')
setup_logger('warning', r'warning')
setup_logger('info', r'info')
debug = logging.getLogger('debug')
error = logging.getLogger('error')
warning = logging.getLogger('warning')
info = logging.getLogger('info')
class Info(APIHandler):
#gen.coroutine
def post(self):
req = json.loads(self.request.body)
resp, content = client(item_id=req['item_id'])
debug.debug(content)
info.info(hello world)
warning.warn('warning message')
error.error('error message')
The problem that I am facing is that a response is printed twice each time I call a function.
for example:
info.log
hello world
hello world
Can anyone tell me why is it happening like. This is the case with all the log files.
Thanks
try:
import logging
import logging.handlers
logging.basicConfig(filename='indivi_service.log',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' )
def setup_logger(logger_name, log_file, level=logging.DEBUG):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler = logging.handlers.TimedRotatingFileHandler(str(log_file)+'.log', when="d", interval=1, backupCount=100)
handler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(handler)
setup_logger('debug', r'debug')
setup_logger('error', r'error')
setup_logger('warning', r'warning')
setup_logger('info', r'info')
debug = logging.getLogger('debug')
error = logging.getLogger('error')
warning = logging.getLogger('warning')
info = logging.getLogger('info')
if __name__ == "__main__":
info.info('hello world')
error.info('hello world')
after run this script, file info.log has one 'hello world' and error.log also has only one 'hello world'.

How to rebuild the log file in Python?

Program A by Python:
LOG_PATH = fdoc_log + "/store_plus.log"
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(filename=LOG_PATH, filemode = 'w', level=logging.DEBUG, format=FORMAT)
Program B by bash:
mv store_plus.log store_plus.log.bk
The Program A will run in the background and don't stop. When
the Program B delete the file of store_plus.log, the Program A can't write log as well.
If I want the Program A rebuild the store_plus.log, How to solve it ?
Thank you
PS: the way :
f = open(LOG_PATH, "a")
f.close()
It can't work.
An example taken from pymotw-logging and all credit to Doug Hellmann.
import glob
import logging
import logging.handlers
LOG_FILENAME = '/tmp/logging_rotatingfile_example.out'
# Set up a specific logger with our desired output level
my_logger = logging.getLogger('MyLogger')
my_logger.setLevel(logging.DEBUG)
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=20, backupCount=5)
my_logger.addHandler(handler)
# Log some messages
for i in range(20):
my_logger.debug('i = %d' % i)
# See what files are created
logfiles = glob.glob('%s*' % LOG_FILENAME)
for filename in logfiles:
print filename
This way is OK by WatchedFileHandler :
logger = logging.getLogger('simple_example')
logger.setLevel(logging.DEBUG)
ch = logging.handlers.WatchedFileHandler('a_log')
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)

Categories

Resources