I am trying to automate a personal telescope and I have managed to code a camera module, a test driver to run everything and I am working on a logger module for future module debugging. I am having a very hard and frustrating time trying to get errors from the camera module to send to the logger module and then print to my desktop. Pls help.
I have tried and tried again to get any form of log from the cAmera.py folder to print onto the log sheet. There is a problem with filling the log function's 'self' requirement inside of the camera modules. So I made a work around and made logger go before self in camera and I was able to print a test case but not the cases in the if statement.
logger.py
import logging
from main.common import cAmera
from main.common.cAmera import *
class main:
# creates filehandlers
fh = logging.FileHandler('C:\\Users\\Nicholas Pepin\\Desktop\\CameraErrorLog.log')
fh.setLevel(logging.DEBUG)
# Creates console logger for higher level logging
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# Creates Formatters
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# adds handlers to the logger
logger = logging.getLogger()
logger.addHandler(fh)
logger.addHandler(ch)
logger.setLevel(logging.DEBUG)
logger.info('Camera process has started : ' + __name__)
cAmera.Camera.log(logging, logger) # need to fulfil self
# Theoretically, it works. but in 'cAmera.Camera.log()' it says that self needs to be fulfilled
# and I have no idea how to do that. If someone could help guide me to a paper or video that would
# explain it, that would be very helpful.
cAmera.py
import os
import time
import win32com.client
import logging
class Camera:
def __init__(self, logger, output_directory):
# __all__ = ['__init__', 'Camera'] # for some reason, opens up the class and __init__ file?
# output_directory starts from user path
self.logger = logger
logger=logging.getLogger(__name__)
self.output_directory = output_directory
self.camera = win32com.client.Dispatch("MaxIm.CCDCamera") # Sets the camera connection path to the CCDCamera
try:
self.camera.LinkEnabled = True
logger.info("Camera is connected : "+__name__)
except:
logger.critical("Camera is not connected : "+__name__)
self.camera.DisableAutoShutdown = True # All of these settings are just basic camera setup settings.
self.camera.AutoDownload = True
def expose(self, exposure_time, filter, type="light"):
if type == "light":
type = 1
elif type == "dark":
type = 0
else:
print("ERROR: Invalid exposure type.")
return
self.camera.SetFullFrame()
self.camera.Expose(exposure_time, type, filter)
time.sleep(exposure_time)
while self.camera.ImageReady == False:
time.sleep(1)
if self.camera.ImageReady:
# self.camera.StartDownload
path = os.path.expanduser('~')
self.camera.SaveImage(os.path.join(path, 'Desktop', "test_pictures.fit"))
def log(logger, self):
logger.info("Camera test " + __name__)
if self.camera.LinkEnabled:
logger.info("Camera is connected : "+__name__)
elif not self.camera.LinkEnabled:
logger.critical("Camera cannot connect : "+__name__)
def set_gain(self):
pass
def set_binning(self):
pass
test_driver.py
from main.common.cAmera import *
from main.common.logger import *
#activates the camera function
camera_object = Camera("camera_work")
camera_object.expose(10, 1, type="dark")
#activates the logger function
main('camera_work')
I hope to see just an in-depth logger printout on my computer detailing the different ways the code screwed up. Also, if anyone can critique my code and provide help on making me a better programmer, that would be much appreciated.
You have logger in class init as parameter but you don't use it.
class Camera:
def __init__(self, logger, output_directory):
self.logger = logger
logger=logging.getLogger(__name__)
Remove logger=logging.getLogger(__name__)
class Camera:
def __init__(self, logger, output_directory):
self.logger = logger
and run as
Camera(logger, "/output/directory")
or assing this logger as default value in class
class Camera:
def __init__(self, output_directory, logger=None):
if logger:
self.logger = logger
else:
self.logger = logging.getLogger(__name__)
and use with existing logger
logger = ...
Camera("directory/output", logger)
or with logger created inside class
Camera("directory/output")
Now you should use self.logger in all methods in Camera
def __init__(self, logger, output_directory):
if logger:
self.logger = logger
else:
self.logger = logging.getLogger(__name__)
self.output_directory = output_directory
self.camera = win32com.client.Dispatch("MaxIm.CCDCamera")
try:
self.camera.LinkEnabled = True
self.logger.info("Camera is connected : {}".format(__name__))
except Exception as ex:
sel.logger.critical("Camera is not connected : {} ({})".format(__name__, ex))
self.camera.DisableAutoShutdown = True
self.camera.AutoDownload = True
Ther same in method `log
def log(self):
self.logger.info("Camera test : {}".format(__name__))
if self.camera.LinkEnabled:
self.logger.info("Camera is connected : {}".format(__name__))
elif not self.camera.LinkEnabled:
self.logger.critical("Camera cannot connect : {}".format(__name__))
To use Camera you have to create instance
cam = cAmera.Camera("directory/output", logger)
cam.log()
If you create instance before main (which probably should be def instead of class) then put it as arguments
camera_object = Camera("directory/output")
camera_object.expose(10, 1, type="dark")
main('camera_work', camera_object)
def main(text, camera):
cam = camera
cam.log()
or better do it after you create logger
main('camera_work')
def main(text):
logger = ...
camera_object = Camera("directory/output", logger)
camera_object.expose(10, 1, type="dark")
camera_object.log()
Related
I'm setting the log level based on a configuration. Currently I call Settings() from the inside of Logger, but I'd like to pass it instead or set it globally - for all loggers.
I do not want to call getLogger(name, debug=Settings().isDebugMode()).
Any ideas? Thanks!
class Logger(logging.getLoggerClass()):
def __init__(self, name):
super().__init__(name)
debug_mode = Settings().isDebugMode()
if debug_mode:
self.setLevel(level=logging.DEBUG)
else:
self.setLevel(level=logging.INFO)
def getLogger(name):
logging.setLoggerClass(Logger)
return logging.getLogger(name)
The usual way to achieve this would be to only set a level on the root logger and keep all other loggers as NOTSET. This will have the effect that every logger works as if they had the level that is set on root. You can read about the mechanics of how that works in the documentation of setLevel().
Here is what that would look like in code:
import logging
root = logging.getLogger()
root.setLevel(logging.DEBUG) # set this based on your Settings().isDebugMode()
logger = logging.getLogger('some_logger')
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(name)s: %(message)s'))
logger.addHandler(sh)
logger.debug('this will print')
root.setLevel(logging.INFO) # change level of all loggers (global log level)
logger.debug('this will not print')
I have a logger in on of my files which has a handler attached to it and it's level has been set to debug. Despite that, when running my program, the debug statement is not printed to the console. The root logger is still set to warning, but I understood that if I add a handler to the logger, the log is passed to that handler and logged before being passed to the parent loggers (which is eventually a null logger). It doesn't seem that is the case. For context here is the code in the file:
logger = logging.getLogger(__name__)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
class OpenBST:
app_data_folder = Path(user_data_dir(appname=lib_info.lib_name,
appauthor="HydrOffice"))
def __init__(self,
progress: CliProgress = CliProgress(use_logger=True),
app_data_path: Path = app_data_folder) -> None:
app_data_path.mkdir(exist_ok=True, parents=True)
self.progress = progress
self._prj = None
self._app_info = OpenBSTInfo(app_data_path=app_data_path)
self.current_project = None
logging.debug("App instance started")
And below is where it's called in an example script:
from pathlib import Path
from hyo2.openbst.lib.openbst import OpenBST
logging.basicConfig()
logger = logging.getLogger(__name__)
project_directory = Path(os.path.expanduser("~/Documents/openbst_projects"))
project_name = "test_project"
# Create App instance
obst = OpenBST()
Why doesn't the logger.debug('App instance started') not print out to the console?
EDIT:
The code below includes the suggestion from #Jesse R
__init__ was modified as such:
class OpenBST:
app_data_folder = Path(user_data_dir(appname=lib_info.lib_name,
appauthor="HydrOffice"))
def __init__(self,
progress: CliProgress = CliProgress(use_logger=True),
app_data_path: Path = app_data_folder) -> None:
app_data_path.mkdir(exist_ok=True, parents=True)
logger = logging.getLogger(__name__)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
self.progress = progress
self._prj = None
self._app_info = OpenBSTInfo(app_data_path=app_data_path)
self.current_project = None
logger.debug("App instance started")
No output is generated (exit code 0).
My understanding was a handler attached to a logger would execute before passing log up the chain (where the root is still set to warning).
You call logging.debug("App instance started"), which is not part of the logger that you declare from getLogger. You can set the debug level universally for logging with
logging.basicConfig(level=logging.DEBUG)
also calling logger = logging.getLogger(__name__) outside of the class does not inherit correctly, since you're not passing it but instead use logging. You can create a new logger by moving that declaration inside of the class.
For Example:
import logging
class SampleClass:
def __init__(self):
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.info('will log')
logging.info('will not log')
SampleClass()
Running:
$ python logtest.py
INFO:__main__:will log
I want to change the log-level temporarily.
My current strategy is to use mocking.
with mock.patch(...):
my_method_which_does_log()
All logging.info() calls inside the method should get ignored and not logged to the console.
How to implement the ... to make logs of level INFO get ignored?
The code is single-process and single-thread and executed during testing only.
I want to change the log-level temporarily.
A way to do this without mocking is logging.disable
class TestSomething(unittest.TestCase):
def setUp(self):
logging.disable(logging.WARNING)
def tearDown(self):
logging.disable(logging.NOTSET)
This example would only show messages of level WARNING and above for each test in the TestSomething class. (You call disable at the start and end of each test as needed. This seems a bit cleaner.)
To unset this temporary throttling, call logging.disable(logging.NOTSET):
If logging.disable(logging.NOTSET) is called, it effectively removes this overriding level, so that logging output again depends on the effective levels of individual loggers.
I don't think mocking is going to do what you want. The loggers are presumably already instantiated in this scenario, and level is an instance variable for each of the loggers (and also any of the handlers that each logger has).
You can create a custom context manager. That would look something like this:
Context Manager
import logging
class override_logging_level():
"A context manager for temporarily setting the logging level"
def __init__(self, level, process_handlers=True):
self.saved_level = {}
self.level = level
self.process_handlers = process_handlers
def __enter__(self):
# Save the root logger
self.save_logger('', logging.getLogger())
# Iterate over the other loggers
for name, logger in logging.Logger.manager.loggerDict.items():
self.save_logger(name, logger)
def __exit__(self, exception_type, exception_value, traceback):
# Restore the root logger
self.restore_logger('', logging.getLogger())
# Iterate over the loggers
for name, logger in logging.Logger.manager.loggerDict.items():
self.restore_logger(name, logger)
def save_logger(self, name, logger):
# Save off the level
self.saved_level[name] = logger.level
# Override the level
logger.setLevel(self.level)
if not self.process_handlers:
return
# Iterate over the handlers for this logger
for handler in logger.handlers:
# No reliable name. Just use the id of the object
self.saved_level[id(handler)] = handler.level
def restore_logger(self, name, logger):
# It's possible that some intervening code added one or more loggers...
if name not in self.saved_level:
return
# Restore the level for the logger
logger.setLevel(self.saved_level[name])
if not self.process_handlers:
return
# Iterate over the handlers for this logger
for handler in logger.handlers:
# Reconstruct the key for this handler
key = id(handler)
# Again, we could have possibly added more handlers
if key not in self.saved_level:
continue
# Restore the level for the handler
handler.setLevel(self.saved_level[key])
Test Code
# Setup for basic logging
logging.basicConfig(level=logging.ERROR)
# Create some loggers - the root logger and a couple others
lr = logging.getLogger()
l1 = logging.getLogger('L1')
l2 = logging.getLogger('L2')
# Won't see this message due to the level
lr.info("lr - msg 1")
l1.info("l1 - msg 1")
l2.info("l2 - msg 1")
# Temporarily override the level
with override_logging_level(logging.INFO):
# Will see
lr.info("lr - msg 2")
l1.info("l1 - msg 2")
l2.info("l2 - msg 2")
# Won't see, again...
lr.info("lr - msg 3")
l1.info("l1 - msg 3")
l2.info("l2 - msg 3")
Results
$ python ./main.py
INFO:root:lr - msg 2
INFO:L1:l1 - msg 2
INFO:L2:l2 - msg 2
Notes
The code would need to be enhanced to support multithreading; for example, logging.Logger.manager.loggerDict is a shared variable that's guarded by locks in the logging code.
Using #cryptoplex's approach of using Context Managers, here's the official version from the logging cookbook:
import logging
import sys
class LoggingContext(object):
def __init__(self, logger, level=None, handler=None, close=True):
self.logger = logger
self.level = level
self.handler = handler
self.close = close
def __enter__(self):
if self.level is not None:
self.old_level = self.logger.level
self.logger.setLevel(self.level)
if self.handler:
self.logger.addHandler(self.handler)
def __exit__(self, et, ev, tb):
if self.level is not None:
self.logger.setLevel(self.old_level)
if self.handler:
self.logger.removeHandler(self.handler)
if self.handler and self.close:
self.handler.close()
# implicit return of None => don't swallow exceptions
You could use dependency injection to pass the logger instance to the method you are testing. It is a bit more invasive though since you are changing your method a little, however it gives you more flexibility.
Add the logger parameter to your method signature, something along the lines of:
def my_method( your_other_params, logger):
pass
In your unit test file:
if __name__ == "__main__":
# define the logger you want to use:
logging.basicConfig( stream=sys.stderr )
logging.getLogger( "MyTests.test_my_method" ).setLevel( logging.DEBUG )
...
def test_my_method(self):
test_logger = logging.getLogger( "MyTests.test_my_method" )
# pass your logger to your method
my_method(your_normal_parameters, test_logger)
python logger docs: https://docs.python.org/3/library/logging.html
I use this pattern to write all logs to a list. It ignores logs of level INFO and smaller.
logs=[]
import logging
def my_log(logger_self, level, *args, **kwargs):
if level>logging.INFO:
logs.append((args, kwargs))
with mock.patch('logging.Logger._log', my_log):
my_method_which_does_log()
I am trying to use logging to create log files for a program. I'm doing something like this:
if not os.path.exists(r'.\logs'):
os.mkdir(r'.\logs')
logging.basicConfig(filename = rf'.\logs\log_{time.ctime().replace(":", "-").replace(" ", "_")}.log',
format = '%(asctime)s %(name)s %(levelname)s %(message)s',
level = logging.DEBUG)
def foo():
# do stuff ...
logging.debug('Done some stuff')
# do extra stuff ...
logging.debug('Did extra stuff')
# some parallel map that does NOT use logging in the mapping function
logging.debug('Done mapping')
if __name__ == '__main__':
foo()
All goes well ant the log is created with the correct information in it:
logs
log_Wed_Feb_14_09-23-32_2018.log
Only that at the end, for some reason, it also creates 2 additional log files and leaves them empty:
logs
log_Wed_Feb_14_09-23-32_2018.log
log_Wed_Feb_14_09-23-35_2018.log
log_Wed_Feb_14_09-23-39_2018.log
The timestamps are only a few seconds apart, but all of the logging still only goes in the first log file as it should.
Why is it doing this? Also is there a way to stop it from giving me extra empty files aside from just deleting any empty logs at the end of the program?
Solved. Kind of.
The behaviour using basic config kept happening so I tried to make a custom logger class:
class Logger:
"""Class used to encapsulate logging logic."""
__slots__ = ['dir',
'level',
'formatter',
'handler',
'logger']
def __init__(self,
name: str = '',
logdir: str = r'.\logs',
lvl: int = logging.INFO,
fmt: str = '%(asctime)s %(name)s %(levelname)s %(message)s',
hdl: str = rf'.\logs\log_{time.ctime().replace(":", "-").replace(" ", "_")}.log'):
print('construct')
if not os.path.exists(logdir):
os.mkdir(logdir)
self.dir = logdir
self.level = lvl
self.formatter = logging.Formatter(fmt = fmt)
self.handler = logging.FileHandler(filename = hdl)
self.handler.setFormatter(self.formatter)
self.logger = logging.getLogger(name)
self.logger.setLevel(self.level)
self.logger.addHandler(self.handler)
def log(self, msg: str):
"""Logs the given message to the set level of the logger."""
self.logger.log(self.level, msg)
def cleanup(self):
"""Iterates trough the root level of the log folder, removing all log files that have a size of 0."""
for log_file in (rf'{self.dir}\{log}' for log in next(os.walk(self.dir))[2]
if log.endswith('.log') and os.path.getsize(rf'{self.dir}\{log}') is 0):
os.remove(log_file)
def shutdown(self):
"""Prepares and executes the shutdown and cleanul actions."""
logging.shutdown()
self.handler.close()
self.cleanup()
And tried to pass it as a parameter to functions like this:
def foo(logger = Logger('foo_logger')):
But this approach made it construct a whole new logger each time I called the log method which let again to multiple files. By using one instance of Logger and defaulting the arguments to None I solved the problem of multiple files for this case.
However the initial Basic Config situation remains a mistery.
I am using a class to create logs for a program I am developing. in the log files I am getting duplicate lines. My code is as follows:
import logging
class Log():
def __init__(self,name=''):
self.name='name'
def InitLog(self,name):
self.logger = logging.getLogger(name)
self.hdlr = logging.FileHandler('/'+name+'.log')
self.formatter = logging.Formatter('%(message)s')
self.hdlr.setFormatter(self.formatter)
self.logger.addHandler(self.hdlr)
self.logger.setLevel(logging.INFO)
def E(self,msg):
self.logger.error(msg)
def I(self,msg):
self.logger.info(msg)
Calling the logger:
# Setup Log
log_url_thread_worker=Log()
log_url_thread_worker.InitLog(cyberlocker)
# Logging something
log_url_thread_worker.I(error)
can anyone see if im doing something stupid?
thanks