Two writes using python logging - python

I have two files of classes with essentially the same set up of logging:
"""code - of 1 class the parent with mods from Reut's answer"""
logger = None
def __init__(self, verboseLevel=4):
'''
Constructor
'''
loggingLevels={1: logging.DEBUG,
2: logging.INFO,
3: logging.WARNING,
4: logging.ERROR,
5: logging.CRITICAL}
#debug(), info(), warning(), error(), critical()
if not tdoa.logger:
tdoa.logger=logging.getLogger('TDOA')
if (verboseLevel in range(1,6)):
logging.basicConfig(format='%(message)s',level=loggingLevels[verboseLevel])
else:
logging.basicConfig(format='%(levelname)s:%(message)s',level=logging.DEBUG)
tdoa.logger.critical("Incorrect logging level specified!")
self.logger = tdoa.logger
self.logger.debug("TDOA calculator using Newton's method.")
self.verboseLevel = verboseLevel
"""code of second "subclass" (with Reut's changes) (who's function is printing twice):"""
def __init__(self, verboseLevel=1, numberOfBytes=2, filename='myfile.log', ipaddr='127.0.0.1',getelset= True):
#debug(), info(), warning(), error(), critical()
# go through all this to know that only one logger is instantiated per class
# Set debug level
# set up various handlers (remove Std_err one for deployment unless you want them going to screen
# create console handler with a higher log level
if not capture.logger:
capture.logger=logging.getLogger('SatGeo')
console = logging.StreamHandler()
if (verboseLevel in range(1,6)):
console.setLevel(self.loggingLevels[verboseLevel])
logging.basicConfig(format='%(message)s',level=self.loggingLevels[verboseLevel],
filename=filename,filemode='a') #format='%(levelname)s:%(message)s'
else:
logging.basicConfig(format='%(message)s',level=logging.DEBUG,
filename=filename,filemod='a')
console.setLevel(logging.DEBUG)
capture.logger.critical("Incorrect logging level specified!")
# create formatter and add it to the handlers
#formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
#console.setFormatter(formatter)
# add the handlers to logger
handlers=capture.logger.handlers
if (console not in handlers):
capture.logger.addHandler(console)
else:
capture.logger.critical("not adding handler")
self.logger=capture.logger
I have a function in the "called class (satgeo)" that 'writes' to the logger:
def printMyself(self, rowDict):
ii=1
for res in rowDict:
self.logger.critical('{0}************************************'.format(ii))
ii+=1
for key, value in res.items():
self.logger.critical(' Name: {0}\t\t Value:{1}'.format(key, value))
When I call it by itself I get one output per self.logger call; but when I call it from the tdoa class it writes TWICE:
for example:
Name: actualLat Value:36.455444
Name: actualLat Value:36.455444
Any idea of how to fix this?

You are adding a handler to the parent class each time you construct a class instance using this line:
self.logger.addHandler(console)
So if you do something like:
for _ in range(x):
SubClass1()
some_operation_with_logging()
You should be seeing x messages, since you just added x handlers to the logger by doing x calls to parent's __init__.
You don't want to be doing that, make sure you add a handler only once!
You can access a logger's list of handlers using: logger.handlers.
Also, if you're using the same logger in both classes (named "TDOA") by using this line in both:
self.logger=logging.getLogger('TDOA')
Make sure you either synchronize the logger instantiation, or use separate loggers.
What I use:
Instead of having a private logger for each instance, you probably want a logger for all of them, or to be more precise - for the class itself:
class ClassWithLogger(object):
logger = None
def __init__(self):
if not ClassWithLogger.logger:
ClassWithLogger.logger = logging.getLogger("ClassWithLogger")
ClassWithLogger.logger.addHandler(logging.StreamHandler())
# log using ClassWithLogger.logger ...
# convenience:
self.logger = ClassWithLogger.logger
And now you know logger is instantiated once per class (instead of once per instance), and all instances of a certain class use the same logger.

I have since found a few links suggesting that submodules should take a logger as an input during the init:
def __init__ (self, pattern= None, action=None, logger = None):
# Set up logging for the class
self.log = logger or logging.getLogger(__name__)
self.log.addHandler(logging.NullHandler())
Note: the nullhandler is added to avoid a warning if the user decides to not provide a logger.
Then, if you want to debug your submodule:
if __name__ == "__main__":
log_level = logging.INFO
log = logging.getLogger('cmdparser')
log.setLevel(log_level)
fh = logging.FileHandler('cmdparser.log')
fh.setLevel(log_level)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(log_level)
# create formatter and add it to the handlers
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
log.addHandler(fh)
log.addHandler(ch)
<myfunction>(pattern,action, log)
Then provide the log to the module at instantiation.
I hope this helps.

Related

is this a bad approach to creating a global logger, how can I improve it

Given my code, I would create a root logger instance logger = GlobalLogger(level=10).logger in __init__.py and include it in submodules where I need logging. Is there a better way to create this class instead of calling the attribute .logger to get the root logging class, or a better design approach overall?
import typing
import logging
class GlobalLogger:
MINIMUM_GLOBAL_LEVEL = logging.DEBUG
GLOBAL_HANDLER = logging.StreamHandler()
LOG_FORMAT = "[%(asctime)s] - %(levelname)s - [%(name)s.%(funcName)s:%(lineno)d] - %(message)s"
LOG_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
def __init__(self, level: typing.Union[int, str] = MINIMUM_GLOBAL_LEVEL):
self.level = level
self.logger = self._get_logger()
self.log_format = self._log_formatter()
self.GLOBAL_HANDLER.setFormatter(self.log_format)
def _get_logger(self):
logger = logging.getLogger(__name__)
logger.setLevel(self.level)
logger.addHandler(self.GLOBAL_HANDLER)
return logger
def _log_formatter(self):
return logging.Formatter(fmt=self.LOG_FORMAT, datefmt=self.LOG_DATETIME_FORMAT)
There is no point to do this, because loggers in Python are already managed by one global manager instance in the stdlib logging/__init__.py file.
Instead, just use logger = logging.getLogger(name) to get/create a logger at the top of each module which needs to log events. If you provide the same name you will get the same logger back, using logging.getLogger(__name__) to have one logger per module is just a popular convention.

Python: how to set the global log level properly?

I'm setting the log level based on a configuration. Currently I call Settings() from the inside of Logger, but I'd like to pass it instead or set it globally - for all loggers.
I do not want to call getLogger(name, debug=Settings().isDebugMode()).
Any ideas? Thanks!
class Logger(logging.getLoggerClass()):
def __init__(self, name):
super().__init__(name)
debug_mode = Settings().isDebugMode()
if debug_mode:
self.setLevel(level=logging.DEBUG)
else:
self.setLevel(level=logging.INFO)
def getLogger(name):
logging.setLoggerClass(Logger)
return logging.getLogger(name)
The usual way to achieve this would be to only set a level on the root logger and keep all other loggers as NOTSET. This will have the effect that every logger works as if they had the level that is set on root. You can read about the mechanics of how that works in the documentation of setLevel().
Here is what that would look like in code:
import logging
root = logging.getLogger()
root.setLevel(logging.DEBUG) # set this based on your Settings().isDebugMode()
logger = logging.getLogger('some_logger')
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(name)s: %(message)s'))
logger.addHandler(sh)
logger.debug('this will print')
root.setLevel(logging.INFO) # change level of all loggers (global log level)
logger.debug('this will not print')

Subclassing logging.Logger to add own functionality

I'm writing code for a robotic system that needs to log to different places, depending on type of deployment/time during startup/...
I'd like to have an option to create a basic logger, then add handlers when appropriate.
I have a basic function in place to create a streamhandler:
def setup_logger() -> logging.Logger:
"""Setup logging.
Returns logger object with (at least) 1 streamhandler to stdout.
Returns:
logging.Logger: configured logger object
"""
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler() # handler to stdout
stream_handler.setLevel(logging.ERROR)
stream_handler.setFormatter(MilliSecondsFormatter(LOG_FMT))
logger.addHandler(stream_handler)
return logger
When the system has internet access, I'd like to add a mail handler (separate class, subclassed from logging.handlers.BufferingHandler).
(Example below with a simple rotating file handler to simplify)
def add_rotating_file(logger: logging.Logger) -> logging.Logger:
rot_fil_handler = logging.handlers.RotatingFileHandler(LOGFILE,
maxBytes=LOGMAXBYTES,
backupCount=3)
rot_fil_handler.setLevel(logging.DEBUG)
rot_fil_handler.setFormatter(MilliSecondsFormatter(LOG_FMT))
logger.addHandler(rot_fil_handler)
return logger
Usage would be:
logger = setup_logger()
logger = add_rotating_file(logger)
This looks "wrong" to me. Giving the logger to the function as an argument and then returning it seems weird and I would think I would better create a class, subclassing logging.Logger.
So something like this:
class pLogger(logging.Logger):
def __init__(self):
super().__init__()
self._basic_configuration()
def _basic_configuration(self):
self.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler() # handler to stdout
stream_handler.setLevel(logging.ERROR)
stream_handler.setFormatter(MilliSecondsFormatter(LOG_FMT))
self.addHandler(stream_handler)
def add_rotating_handler(self):
rot_file_handler = logging.handlers.RotatingFileHandler(LOGFILE,
maxBytes=LOGMAXBYTES,
backupCount=3)
self.addHandler(rot_file_handler)
However, the super().init() function needs the logger name as an argument and -as far as I know-, the root logger should be created using logging.getLogger(), so without a name.
Another way would be to not subclass anything, but create a self.logger in my class, which seems wrong as well.
I found this stackexchange question which seems related but I can't figure out how to interpret the answer.
What's the "correct" way to do this?
There's no particular reason I can see for returning the logger from add_rotating_file(), if that's what seems odd to you. And this (having handlers added based on conditions) doesn't seem like a reason to create a logger subclass. There are numerous ways you could arrange some basic handlers and some additional handlers based on other conditions, but it seems simplest to do something like this:
def setup_logger() -> logging.Logger:
formatter = MilliSecondsFormatter(LOG_FMT)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout) # default is stderr
handler.setLevel(logging.ERROR)
handler.setFormatter(formatter)
logger.addHandler(handler)
if internet_is_available:
handler = MyCustomEmailHandler(...) # with whatever params you need
handler.setLevel(...)
handler.setFormatter(...) # a suitable formatter instance
logger.addHandler(handler)
if rotating_file_wanted:
handler = RotatingFileHandler(LOGFILE,
maxBytes=LOGMAXBYTES,
backupCount=3)
handler.setLevel(...)
handler.setFormatter(...) # a suitable formatter instance
logger.addHandler(handler)
# and so on for other handlers
return logger # and you don't even need to do this - you could pass the logger in instead
`

Logging across sub-modules – how to access parent logger name from sub-modules?

I am trying to add logging to my python application that has several modules and submodules. Several sites say to create child loggers in modules. The advantage I see is that the child logger inheriting the parent logging config, it will provide consistency for the logging output (handlers, formatters, ...).
So far I am defining the __main__ class logger name in each class and concatenate it with the current class’ name (parentName.childName) to get the module’s class loggers. It does not feel right, nor scalable. How could I improve this so I don’t have to hard code the __main__ class logger name in each class? Here is what my code looks like:
Py file that I run:
###app.py
import logging
from SubModule1 import *
class myApplication():
loggerName='myAPI'
def __init__(self, config_item_1=None,config_item_2=None,...config_item_N=None):
self.logger=self.logConfig()
self.logger.info("Starting my Application")
self.object1=mySubClass1(arg1, arg2, ... argM)
def logConfig(self):
fileLogFormat='%(asctime)s - %(levelname)s - %(message)s'
consoleLogFormat='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# create logger
#logger = logging.getLogger(__name__)
logger = logging.getLogger(self.loggerName)
logger.setLevel(logging.DEBUG)
####CONSOLE HANDLER####
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter
consoleFormatter = logging.Formatter(consoleLogFormat)
# add formatter to ch
ch.setFormatter(consoleFormatter)
# add ch to logger
logger.addHandler(ch)
####FILE HANDLER####
# create file handler and set level to debug
fh = logging.FileHandler('API_Log.txt')
fh.setLevel(logging.DEBUG)
# create formatter
fileFormatter = logging.Formatter(fileLogFormat)
# add formatter to ch
fh.setFormatter(fileFormatter)
# add ch to logger
logger.addHandler(fh)
logger.info('Logging is started!!')
return logger
def connect(self):
self.logger.info("Connecting to API")
self.object1.connect()
if __name__=="__main__":
config={"config_item_1": x,
"config_item_2": y,
...
"config_item_N": z
}
myApp=myApplication(config['config_item_1'],config['config_item_2'],...config['config_item_N'])
myApp.connect()
Module:
###SubModule1.py
import logging
import app
class mySubClass1():
appRootLoggerName='myAPI'
def __init__(self,item1):
self.logger=logging.getLogger(self.appRootLoggerName + '.'+mySubClass1.__name__)
self.logger.debug("mySubClass1 object created - mySubClass1")
self.classObject1=item1
The line below is the one that is bothering me. What alternative to self.appRootLoggerName + '.'+mySubClass1.__name__) would keep the same logging configuration shared across my application’s modules/classes?
logging.getLogger(self.appRootLoggerName + '.'+mySubClass1.__name__)

Python - Avoid passing logger reference between functions?

I have a simple Python script that uses the in-built logging.
I'm configuring logging inside a function. Basic structure would be something like this:
#!/usr/bin/env python
import logging
import ...
def configure_logging():
logger = logging.getLogger("my logger")
logger.setLevel(logging.DEBUG)
# Format for our loglines
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# Setup console logging
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
# Setup file logging as well
fh = logging.FileHandler(LOG_FILENAME)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def count_parrots():
...
logger.debug??
if __name__ == '__main__':
logger = configure_logging()
logger.debug("I'm a log file")
parrots = count_parrots()
I can call logger fine from inside __main__. However, how do I call logger from inside the count_parrots() function? What's the most pythonic way of handling configuring a logger like this?
You can either use the root (default) logger, and thus the module level functions logging.debug, ... or get your logger in the function using it.
Indeed, the getLogger function is a factory-like function with a registry (singleton like), i.e. it always returns the same instance for the given logger name.
You can thus get your logger in count_parrots by simply using
logger = logging.getLogger("my logger")
at the beginning. However, the convention is to use a dotted hierarchical name for your logger. See http://docs.python.org/library/logging.html#logging.getLogger
EDIT:
You can use a decorator to add the logging behaviour to your individual functions, for example:
def debug(loggername):
logger = logging.getLogger(loggername)
def log_(enter_message, exit_message=None):
def wrapper(f):
def wrapped(*args, **kargs):
logger.debug(enter_message)
r = f(*args, **kargs)
if exit_message:
logger.debug(exit_message)
return r
return wrapped
return wrapper
return log_
my_debug = debug('my.logger')
#my_debug('enter foo', 'exit foo')
def foo(a, b):
return a+b
you can "hardcode" the logger name and remove the top-level closure and my_debug.
You can just do :
logger = logging.getLogger("my logger")
in your count_parrots() method. When you pass the name that was used earlier (i.e. "my logger") the logging module would return the same instance that was created corresponding to that name.
Update: From the logging tutorial
(emphais mine)
getLogger() returns a reference to a
logger instance with the specified
name if it is provided, or root if
not. The names are period-separated
hierarchical structures. Multiple
calls to getLogger() with the same
name will return a reference to the
same logger object.
The typical way to handle logging is to have a per-module logger stored in a global variable. Any functions and methods within that module then just reference that same logger instance.
This is discussed briefly in the intro to the advance logging tutorial in the documentation:
http://docs.python.org/howto/logging.html#advanced-logging-tutorial
You can pass logger instances around as parameters, but doing so is typically rare.
I got confused by how global variables work in Python. Within a function you only need to declare global logger if you were doing something like logger = logging.getLogger("my logger") and hoping to modify the global logger.
So to modify your example, you can create a global logger object at the start of the file. If your module can be imported by another one, you should add the NullHandler so that if the importer of the library doesn't want logging enabled, they don't have any issues with your lib (ref).
#!/usr/bin/env python
import logging
import ...
logger = logging.getLogger("my logger").addHandler(logging.NullHandler())
def configure_logging():
logger.setLevel(logging.DEBUG)
# Format for our loglines
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# Setup console logging
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
# Setup file logging as well
fh = logging.FileHandler(LOG_FILENAME)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
def count_parrots():
...
logger.debug('counting parrots')
...
return parrots
if __name__ == '__main__':
configure_logging()
logger.debug("I'm a log file")
parrots = count_parrots()
If you don't need the log messages on your console, you can use in a minimalist way.
Alternatively you can use tail -f myapp.log to see the messages on the console.
import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', \
filename='myapp.log', \
level=logging.INFO)
def do_something():
logging.info('Doing something')
def main():
logging.info('Started')
do_something()
logging.info('Finished')
if __name__ == '__main__':
main()
You can give logger as argument to count_parrots() Or, what I would do, create class parrots and use logger as one of its method.

Categories

Resources