Python Logger is not working to write to file? - python

I'm trying to create logger file inside directory using the below code -
def createLogger(name,log_path=None):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.setLevel(logging.ERROR)
logger.setLevel(logging.INFO)
logger.setLevel(logging.CRITICAL)
formatter = logging.Formatter("%(asctime)s - %(levelname)-8s - %(message)s")
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
if log_path is not None:
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
name = "temp_logs"
file = open(os.path.join(f'{current_date}', f'{name}_{current_timestamp}.txt'), 'w')
log_path = f"{current_date}/{name}_{current_timestamp}.txt"
logger = createLogger(name = name ,log_path = log_path)
write_message = logger.debug('This is a test file')
file.writelines(str(write_message))
file.close()
However, it is just writing None as message inside the file.
Am I missing anything here? Please help.

Several issues. setLevel sets a threshold. When called multiple times, the last one wins (i.e. CRITICAL). You will not see any debug messages, warnings or error.
Also, you are writing to the log file directly. What's the point of setting a logger with a formatter and then bypassing it?
Try this:
Leave only this one setLevel call: logger.setLevel(logging.DEBUG) in createLogger() and then:
name = "temp_logs"
logger = createLogger(name = name ,log_path = './test.log')
logger.debug('This is a debug message')
logger.error('This is an error')

Related

Log files get reset on the restart of the application instead of appending the new logs to it

import logging
def setup_logger(logger_name, log_file, level=logging.DEBUG):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
I'm trying to write the different logs(levels) to the different files the functionality is working fine but when ever the application is restarted the log files get reset instead of appending the data to it.
#method called from the different method
setup_logger('logger', 'login.log')
setup_logger('logger_market', 'transaction.log')
logger = logging.getLogger('logger')
logger_market = logging.getLogger('logger_market')
logger.info(f'Test for the login.log file')
logger_market.info(f'this to test')
I'm new to logging in python and the documentation is a bit complex for me at the moment.
I think you do not need to set mode
just use following: fileHandler = logging.FileHandler(log_file)

Python3 logging

I'm trying to create my own logger in python 3.6.8 to send output both to stdout and a log file (chosen by date, if the log file doesn't exist yet for today's date it gets created, if there already is a file with the same date just append).
from datetime import date
import logging
import logging.handlers
class Log:
def __init__(self):
pass
def getCleanerLogger(self,moduleName, logFolder, format):
filename = logFolder+ str(date.today()) + '-log.log'
handler = logging.FileHandler(filename)
shandler = logging.StreamHandler()
shandler.setLevel(logging.INFO)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(format)
handler.setFormatter(formatter)
shandler.setFormatter(formatter)
logger = logging.getLogger(moduleName)
logger.addHandler(handler)
logger.addHandler(shandler)
print("I've been called")
return logger
import Conf
conf = Conf.configuration()
print(conf['logFolder'] + " " + conf['logFormat'])
logger = Log()
logger = logger.getCleanerLogger("Log", conf['logFolder'], conf['logFormat'])
logger.info('initializing')
logger.debug('initializing debug')
in the json conf file these are the keys I load
"logFolder": "log/",
"logFormat": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
the log file gets created with the correct logic but there is no logging in either the console or the log file, only the prints go to stdout, no error or exception is raised, I really don't understand why this isn't working. I can only log with logging.root.level('msg') after loading a basiconfig.
Every handler has own logging level but logger has also global logging level which has bigger prioritet so you have to change this level to level which doesn't block handlers - ie.
logger.setLevel(logging.DEBUG)
Mininal working code with few smaller changes.
It doesn't use file with settings so everyone can easily copy it and run it.
from datetime import date
import os
import logging
import logging.handlers
class Log:
def get_cleaner_logger(self, module_name, log_folder, format):
if not os.path.exists(log_folder):
os.makedirs(log_folder)
filename = os.path.join(log_folder, date.today().strftime('%Y-%m-%d-log.log'))
print(filename)
logger = logging.getLogger(module_name)
print('before:', logger.level)
logger.setLevel(logging.DEBUG)
print('after:', logger.level)
formatter = logging.Formatter(format)
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger.addHandler(handler)
shandler = logging.StreamHandler()
shandler.setLevel(logging.INFO)
shandler.setFormatter(formatter)
logger.addHandler(shandler)
print("I've been called")
return logger
conf = {
"logFolder": "log/",
"logFormat": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
}
logger = Log()
logger = logger.get_cleaner_logger("Log", conf['logFolder'], conf['logFormat'])
logger.info('initializing')
logger.debug('initializing debug')
BTW: I changed some names based on PEP 8 -- Style Guide for Python Code

Python logging - new log file each loop iteration

I would like to generate a new log file on each iteration of a loop in Python using the logging module. I am analysing data in a for loop, where each iteration of the loop contains information on a new object. I would like to generate a log file per object.
I looked at the docs for the logging module and there is capability to change log file on time intervals or when the log file fills up, but I cannot see how to iteratively generate a new log file with a new name. I know ahead of time how many objects are in the loop.
My imagined pseudo code would be:
import logging
for target in targets:
logfile_name = f"{target}.log"
logging.basicConfig(format='%(asctime)s - %(levelname)s : %(message)s',
datefmt='%Y-%m/%dT%H:%M:%S',
filename=logfile_name,
level=logging.DEBUG)
# analyse target infomation
logging.info('log target info...')
However, the logging information is always appended to the fist log file for target 1.
Is there a way to force a new log file at the beginning of each loop?
Rather than using logging directly, you need to use logger objects. Go thorough the docs here.
Create a new logger object as a first statement in the loop. The below is a working solution.
import logging
import sys
def my_custom_logger(logger_name, level=logging.DEBUG):
"""
Method to return a custom logger with the given name and level
"""
logger = logging.getLogger(logger_name)
logger.setLevel(level)
format_string = ("%(asctime)s — %(name)s — %(levelname)s — %(funcName)s:"
"%(lineno)d — %(message)s")
log_format = logging.Formatter(format_string)
# Creating and adding the console handler
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(log_format)
logger.addHandler(console_handler)
# Creating and adding the file handler
file_handler = logging.FileHandler(logger_name, mode='a')
file_handler.setFormatter(log_format)
logger.addHandler(file_handler)
return logger
if __name__ == "__main__":
for item in range(10):
logger = my_custom_logger(f"Logger{item}")
logger.debug(item)
This writes to a different log file for each iteration.
This might not be the best solution, but it will create new log file for each iteration. What this is doing is, adding a new file handler in each iteration.
import logging
targets = ["a", "b", "c"]
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
for target in targets:
log_file = "{}.log".format(target)
log_format = "|%(levelname)s| : [%(filename)s]--[%(funcName)s] : %(message)s"
formatter = logging.Formatter(log_format)
# create file handler and set the formatter
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
# add handler to the logger
logger.addHandler(file_handler)
# sample message
logger.info("Log file: {}".format(target))
This is not necessarily the best answer but worked for my case, and just wanted to put it here for future references. I created a function that looks as follows:
def logger(filename, level=None, format=None):
"""A wrapper to the logging python module
This module is useful for cases where we need to log in a for loop
different files. It also will allow more flexibility later on how the
logging format could evolve.
Parameters
----------
filename : str
Name of logfile.
level : str, optional
Level of logging messages, by default 'info'. Supported are: 'info'
and 'debug'.
format : str, optional
Format of logging messages, by default '%(message)s'.
Returns
-------
logger
A logger object.
"""
levels = {"info": logging.INFO, "debug": logging.DEBUG}
if level is None:
level = levels["info"]
else:
level = levels[level.lower()]
if format is None:
format = "%(message)s"
# https://stackoverflow.com/a/12158233/1995261
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logger = logging.basicConfig(filename=filename, level=level, format=format)
return logger
As you can see (you might need to scroll down the code above to see the return logger line), I am using logging.basicConfig(). All modules I have in my package that log stuff, have the following at the beginning of the files:
import logging
import other stuff
logger = logging.getLogger()
class SomeClass(object):
def some_method(self):
logger.info("Whatever")
.... stuff
When doing a loop, I have call things this way:
if __name__ == "__main__":
for i in range(1, 11, 1):
directory = "_{}".format(i)
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + "/training.log"
logger(filename=filename)
I hope this is helpful.
I'd like to slightly modify #0Nicholas's method. The direction is right, but the first FileHandler will continue log information into the first log file as long as the function is running. Therefore, we would want to pop the handler out of the logger's handlers list:
import logging
targets = ["a", "b", "c"]
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
log_format = "|%(levelname)s| : [%(filename)s]--[%(funcName)s] : %(message)s"
formatter = logging.Formatter(log_format)
for target in targets:
log_file = f"{target}.log"
# create file handler and set the formatter
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
# add handler to the logger
logger.addHandler(file_handler)
# sample message
logger.info(f"Log file: {target}")
# close the log file
file_handler.close()
# remove the handler from the logger. The default behavior is to pop out
# the last added one, which is the file_handler we just added in the
# beginning of this iteration.
logger.handlers.pop()
Here is a working version for this problem. I was only able to get it to work if the targets already have .log before going into the loop so you may want to add one more for before going into targets and override all targets with .log extension
import logging
targets = ["a.log","b.log","c.log"]
for target in targets:
log = logging.getLogger(target)
formatter = logging.Formatter('%(asctime)s - %(levelname)s : %(message)s', datefmt='%Y-%m/%dT%H:%M:%S')
fileHandler = logging.FileHandler(target, mode='a')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
log.addHandler(fileHandler)
log.addHandler(streamHandler)
log.info('log target info...')

RotatingFileHandler with console logging

I've written an init function to configure the python logging module to log to a file and the console. I want to limit the log file size using the RotatingFileHandler. The code below doesn't cause any errors and does everything I want, except it doesn't rotate the logs. I set a low file size to test things out.
How can I configure to use rotating logs and the console with different formats like below?
import logging, logging.handlers
LOG_LEVEL = logging.DEBUG
CONSOLE_LEVEL = logging.DEBUG
def init_logger(fullpath, console_level=CONSOLE_LEVEL, log_level=LOG_LEVEL):
"""
Setup the logger object
Args:
fullpath (str): full path to the log file
"""
logging.basicConfig(level=LOG_LEVEL,
format='%(asctime)s %(threadName)-10s %(name)-12s %
(levelname)-8s %(message)s',
datefmt='%m-%d-%y %H:%M:%S',
filename=fullpath,
filemode='w')
_logger = logging.getLogger('_root')
_logger.setLevel(log_level)
log_handler = logging.handlers.RotatingFileHandler(filename=fullpath,
maxBytes=50, backupCount=10)
log_handler.setLevel(log_level)
_logger.addHandler(log_handler)
console = logging.StreamHandler()
console.setLevel(console_level)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %
(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
logging.debug("Creating log file")
This is your logging's structure:
---root <- I mean real root, which you will get from logging.getLogger([""])
---NormalFileHandler <- Configged by logging.basicConfig, this handler won't rotate file.
---StreamHandler
------_root <- Your root logger, although I don't understand why you want this.
------RotatingFileHandler <- This one will rotate file.
And then you use logging.debug("Creating log file"), which is the same as calling debug on root logger: logging.getLogger().debug(...). So this log passed to StreamHandler and NormalFileHandler.
That's why you see the file isn't got rotated.
The correct configuration should be:
def init_logger(fullpath, console_level=CONSOLE_LEVEL, log_level=LOG_LEVEL):
"""
Setup the logger object
Args:
fullpath (str): full path to the log file
"""
logger = logging.getLogger('YourLogger')
logger.setLevel(log_level)
log_handler = logging.handlers.RotatingFileHandler(filename=fullpath,
maxBytes=50, backupCount=10)
log_handler.setLevel(log_level)
formatter = logging.Formatter('%(asctime)s %(threadName)-10s %(name)-12s %
(levelname)-8s %(message)s')
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
console = logging.StreamHandler()
console.setLevel(console_level)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %
(message)s')
console.setFormatter(formatter)
logger.addHandler(console) # Or you can add it to root logger, but it is not recommended, you should use your own logger instead of root logger. Or it will cause some problems.
logger.debug("Creating log file")
And then when you want to use logger, you should use:
logger = logging.getLogger('YourLogger')
logger.info(...)
logger.debug(...)

Using python's RotatingFileHandler with no logger object

I have my own logging system that is not based on a logging object.
Can I still enjoy the services of RotatingFileHnadler? I've tried to define the handler after I set the log file. But I don't see it performed any rotation (no files added named for example mylog.log.1, mylog.log.2 ...)
What am I missing?
handler = RotatingFileHandler(self.fullName+'.debug',mode='a', maxBytes=1, backupCount=1)
logFormatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
fileHandler = RotatingFileHandler('/var/log/blabla.log',
maxBytes=10 * 1000 * 1000,
encoding='utf8',
backupCount=5)
fileHandler.setFormatter(logFormatter)
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.INFO)
rootLogger.addHandler(fileHandler)

Categories

Resources