twisted logging to screen(stdout) not working - python

i have this small program taken from here
from twisted.logger import Logger
log = Logger()
def handleData(data):
log.debug("Got data: {data!r}.", data=data)
handleData({'a':20})
This does not prints anything to the screen .why is that?

The default python logger is set to WARN level, so DEBUG messages are suppressed. You can make that code work like -
import logging
from twisted.logger import Logger
log = Logger()
log.setLevel(logging.DEBUG)
def handleData(data):
log.debug("Got data: {data!r}.", data=data)
handleData({'a':20})

i figured it out from here https://github.com/moira-alert/worker/blob/master/moira/logs.py:
import logging
from twisted.logger import Logger, LogLevel
import sys
from twisted.logger import globalLogPublisher
from twisted.logger import textFileLogObserver
from twisted.logger import FilteringLogObserver, LogLevelFilterPredicate, LogLevel
log = Logger()
level = LogLevel.debug
predicate = LogLevelFilterPredicate(defaultLogLevel=level)
observer = FilteringLogObserver(textFileLogObserver(sys.stdout), [predicate])
observer._encoding = "utf-8"
globalLogPublisher.addObserver(observer)
log.info("Start logging with {l}", l=level)
def handleData(data):
log.debug("Got data: {data!r}.", data=data)
handleData({'a':20})
Is there any simpler way . it seems overly complicated just to set log level.

You didn't add observer for your logger object.
here is a simple observer that print the log to stdout
import sys
from twisted.logger import Logger, eventAsText, FileLogObserver
log = Logger()
log.observer.addObserver(FileLogObserver(sys.stdout, lambda e: eventAsText(e) + "\n"))
someData = 2
log.debug("Got data: {data!r}", data=someData)

Related

python logger prints everything twice

I try using this openshift-restclient-python library. My custom Logger prints everything twice after I run into this bug.
modules/logging/Logging.py
import logging
class CustomLogger:
logger=None
def __init__(self):
if (CustomLogger.logger==None):
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
stdout_handler = logging.StreamHandler()
stdout_handler.setLevel(logging.DEBUG)
logger.addHandler(stdout_handler)
CustomLogger.logger=logger
def getLogger(self):
return CustomLogger.logger
logger = CustomLogger().getLogger()
This ist my main.py:
#!/usr/bin/env python3
import sys
from modules.logging.Logging import logger
from kubernetes import client, config
from openshift.dynamic import DynamicClient
from openshift.helper.userpassauth import OCPLoginConfiguration
import warnings
warnings.filterwarnings("ignore")
apihost = 'myhost'
username = 'myuser'
password = 'insecure'
ca_cert = '/path/to/cert'
kubeConfig = OCPLoginConfiguration(ocp_username=username, ocp_password=password)
kubeConfig.host = apihost
kubeConfig.verify_ssl = False
kubeConfig.ssl_ca_cert = ca_cert
kubeConfig.get_token()
k8s_client = client.ApiClient(kubeConfig)
logger.warning("this is printed once")
dyn_client = DynamicClient(k8s_client)
logger.warning("this is printed twice")
v1_projects = dyn_client.resources.get(api_version='project.openshift.io/v1', kind='Project')
project_list = v1_projects.get()
sys.exit(0)
executing the main.py I get the following output
this is printed once
ERROR:root:load cache error: ResourceList.__init__() got an unexpected keyword argument 'base_resource_lookup'
this is printed twice
WARNING:modules.logging.Logging:this is printed twice
If I do not use my custom logger but a simple configuration as below in main.py then everything is printed once.
import logging
logging.basicConfig(level=logging.DEBUG)
I have found this answer so I also tried removing any handler but the only handler is the one that contains my customization, so I end up with a basic logger.
What am I doing wrong?
Thanks
EDIT:
There is an easier way reproducing the issue.
I still have my custom logger as posted before but my main.py now:
#!/usr/bin/env python3
import sys
from modules.logging.Logging import logger
import logging
print(logger.handlers)
print("number handlers: " +str(len(logger.handlers)))
logger.warning("this is printed once")
logging.basicConfig(level=logging.DEBUG)
logger.warning("this is printed twice")
print("number handlers: " +str(len(logger.handlers)))
for h in logger.handlers:
logger.removeHandler(h)
print("number handlers: " +str(len(logger.handlers)))
logger.warning("still get printed")
sys.exit(0)
the output:
[<StreamHandler <stderr> (DEBUG)>]
number handlers: 1
this is printed once
this is printed twice
WARNING:modules.logging.Logging:this is printed twice
number handlers: 1
number handlers: 0
WARNING:modules.logging.Logging:still get printed
The code logging.basicConfig(level=logging.DEBUG) doesn't add another handler but cause everything to be logged. I actually only want the customized logs printed by the streamingHandler. How can I revert what is done by logging.basicConfig(level=logging.DEBUG)?
Please try remove this peace of code from class CustomLogger
stdout_handler = logging.StreamHandler()
stdout_handler.setLevel(logging.DEBUG)
logger.addHandler(stdout_handler)
btw i was struggling with the same. Found answers using search on this website.
https://stackoverflow.com/a/26730545/15637940
https://stackoverflow.com/a/70876179/15637940
and a lot more answered questions...
I solved it that way:
class CustomLogger:
logger=None
def __init__(self):
if (CustomLogger.logger==None):
logging.basicConfig(filename='/dev/null', filemode='w', format='%(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
fmt = '%(asctime)s | %(message)s'
stdout_handler = logging.StreamHandler()
stdout_handler.setLevel(logging.DEBUG)
stdout_handler.setFormatter(CustomFormatter(fmt))
logger.addHandler(stdout_handler)
CustomLogger.logger=logger
def getLogger(self):
return CustomLogger.logger
logger = CustomLogger().getLogger()
It seems that the library I am using at some place logs to the RootLogger. According to this answer logging.basicConfig() is a constructor of a streamHandler that connects to the RootLogger.
If I use logger = logging.getLogger('root') instead of logger = logging.getLogger(__name__) then everything is printed once. However, in that case everything the library logs on DEBUG-Level is printed to the terminal.
The line logging.basicConfig(filename='/dev/null', filemode='w', format='%(name)s - %(levelname)s - %(message)s') causes that everything logged by the root logger is printed to /dev/null.

Logging multiple scripts from the same directory in python

I have two python scripts in the same directory. I try to catch logging messages from both of them:
#script.py
import requests
import logging
logger = logging.getLogger(__name__)
class Downloader:
def __init__(self, url):
self.url = url
def download(self):
logger.debug(f'Downloading {self.url}')
req = requests.get(self.url, timeout=1)
return req
#main.py
import logging
from script import Downloader
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.debug('create object')
d = Downloader('https://www.google.com')
res = d.download()
Basically I want to get rid of the debug-messages from the requests-module, so using logging.basicConfig() is not an option. But the way I do it, I do not get the debug-message from the imported script. Apparently because in script.py __name__ is not main.script.
How can I achieve this without hard coding anything to a string?
In a different module (e.g. logger.py):
import logging
def setup_logger(name, logfile, formatter, stream_handler=False, level=logging.DEBUG):
"""Function to create loggers."""
file_handler = logging.FileHandler(log_file)
stdout_handler = logging.StreamHandler()
file_handler.setFormatter(formatter)
stdout_handler.setFormatter(formatter)
logger = logging.getLogger(name)
if not logger.handlers:
logger.setLevel(level)
logger.addHandler(file_handler)
if stream_handler:
logger.addHandler(stdout_handler)
return logger
# Example formatter
formatter = logging.Formatter('%(asctime)s - %(levelname)s -> %(message)s\n')
# Generate the log object
log = setup_logger('logger_name', 'path_to_logfile', formatter)
Import this log object from your other modules to use it: from logger import log

How to use loguru with standard loggers?

I would like to use Loguru to intercept loggers from other modules.
Could anyone of you tell how to approach this topic, please?
Example:
import logging
import requests
from loguru import logger
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
logger_requests = logging.getLogger('requests')
logger_requests.setLevel(logging.DEBUG)
logger.debug('Message through loguru')
requests.get('https://stackoverflow.com')
Execution:
$ python test_logger.py > /dev/null
2021-03-23 19:35:27.141 | DEBUG | __main__:<module>:10 - Message through loguru
DEBUG:Starting new HTTPS connection (1): stackoverflow.com:443
DEBUG:https://stackoverflow.com:443 "GET / HTTP/1.1" 200 None
Answering explicitly...
You want to redirect requests logging through loguru. As mentioned in the comments, you can use:
logging.basicConfig(handlers=[InterceptHandler()], level=0, force=True)
However, it's also worth mentioning that decorating functions with logger.catch() will vastly improve error tracebacks should anything happen during script execution.
import logging
import sys
import requests
from loguru import logger
class InterceptHandler(logging.Handler):
"""
Add logging handler to augment python stdlib logging.
Logs which would otherwise go to stdlib logging are redirected through
loguru.
"""
#logger.catch(default=True, onerror=lambda _: sys.exit(1))
def emit(self, record):
# Get corresponding Loguru level if it exists.
try:
level = logger.level(record.levelname).name
except ValueError:
level = record.levelno
# Find caller from where originated the logged message.
frame, depth = sys._getframe(6), 6
while frame and frame.f_code.co_filename == logging.__file__:
frame = frame.f_back
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(level, record.getMessage())
##########################################################################
# The logger.catch() decorator improves error tracebacks
# ^^^^^^^^^^^^^^
##########################################################################
#logger.catch(default=True, onerror=lambda _: sys.exit(1))
def requests_http_get(url=None):
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
logging.basicConfig(handlers=[InterceptHandler()], level=0, force=True)
logger_requests = logging.getLogger('requests')
logger_requests.setLevel(logging.DEBUG)
logger.debug('Message through loguru')
requests.get(url)
if __name__=="__main__":
requests_http_get("https://stackoverflow.com/")

Why is my configured logger not being used?

Reading the logging HOWTO (https://docs.python.org/3/howto/logging.html) I came away under the impression that if I configured a logger, then I could subsequently request my logger from the factory via logging.getLogger() and python would know how to get the right logger (the one I configured) and everything would just auto-work, i.e. I wouldn't need to pass the configured logger instance around my code, I could just ask for it wherever I needed it. Instead, I'm observing something different.
File log_tester.py:
from util.logging_custom import SetupLogger
import logging
import datetime
def test():
logger = logging.getLogger()
logger.debug("In test()")
def main():
logger = SetupLogger("logger_test")
logger.setLevel(logging.DEBUG)
logger.info(f"now is {datetime.datetime.now()}", )
logger.debug("In main()")
test()
if __name__ == '__main__':
main()
File util/logging_custom.py:
import os
import time
import logging
from logging.handlers import RotatingFileHandler
def SetupLogger(name_prefix):
if not os.path.exists("log"):
os.makedirs("log")
recfmt = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s %(message)s')
handler = RotatingFileHandler(time.strftime(f"log/{name_prefix}.log"),maxBytes=5000000, backupCount=10)
handler.setFormatter(recfmt)
handler.setLevel(logging.DEBUG)
logger = logging.getLogger(f"{name_prefix} {__name__}")
logger.addHandler(handler)
return logger
When I run this code only the debug statement that is in main() ends up in the log file. The debug statement from test() ends up I'm not sure where exactly.
Contents of log/logger_test.log:
2019-02-07 09:14:39,906.906 INFO now is 2019-02-07 09:14:39.906848
2019-02-07 09:14:39,906.906 DEBUG In main()
My expectation was that In test() would also show up in my log file. Have I made some assumptions about how python logging works that are untrue? How do I make it so that all of the logging in my program (which has many classes and modules) goes to the same configured logger? Is that possible without passing around a logger instance everywhere, after it's created in main()?
Thanks.
The getLogger function will return a the logger by its name (kind of a singleton):
if it doesn't exist, it creates it
If it already exist, it returns it
Then what you could do is:
util/logging_custom.py
def SetupLogger(logger_name, level=logging.INFO):
if not os.path.exists("log"):
os.makedirs("log")
recfmt = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s %(message)s')
handler = RotatingFileHandler(time.strftime(f"log/{logger_name}.log"),maxBytes=5000000, backupCount=10)
handler.setFormatter(recfmt)
handler.setLevel(level)
logger = logging.getLogger(logger_name)
logger.addHandler(handler)
# no need to return the logger, I would even advice not to do so
log_tester.py
from util.logging_custom import SetupLogger
import logging
import datetime
logger = SetupLogger("logger_test", logging.DEBUG) # you only need to run this once, in your main script.
logger = logging.getLogger("logger_test")
def test():
logger.debug("In test()")
def main():
logger.info(f"now is {datetime.datetime.now()}", )
logger.debug("In main()")
test()
if __name__ == '__main__':
main()
any_other.py
import logging
logger = logging.getLogger("logger_test") # this will return the logger you already instantiate in log_tester.py
logger.info("that works!")
Update
To set the level and the handling of the root logger instead of the one you setted up, use logging.getLogger() without passing any name:
root_logger = logging.getLogger()
root_logger.addHandler(your_handler)
root_logger.setLevel(logging.DEBUG)
root_logger.info("hello world")
From the docs:
Multiple calls to getLogger() with the same name will return a
reference to the same logger object.
Your assumptions are quite correct. The problem here is the way you are calling getLogger() in test(). You should be passing the name you used in SetupLogger()'s getLogger() i.e. logger = logging.getLogger(f"{name_prefix} {__name__}").

selectively setting the console logging level

I am trying to use an FTP server stub during tests. I don't want the console output, but I would like to capture the logging to a file.
I want the FTP server to run in a different process, so I use multiprocessing.
My code as follows sets all logging to level WARNING:
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
import pyftpdlib.log as pyftpdliblog
import os
import logging
import multiprocessing as mp
authorizer = DummyAuthorizer()
authorizer.add_user('user', '12345', '.', perm='elradfmwM')
handler = FTPHandler
handler.authorizer = authorizer
pyftpdliblog.LEVEL = logging.WARNING
logging.basicConfig(filename='pyftpd.log', level=logging.INFO)
server = FTPServer(('', 2121), handler)
def main():
p = mp.Process(target=server.serve_forever)
p.start()
if __name__ == '__main__':
main()
How do I set only the console logging to level WARNING, or even better, completely shutdown without giving up the file logging?
So, after digging inside the code, I found out the following hint:
# This is a method of FTPServer and it is called before
# server.serve_forever
def _log_start(self):
if not logging.getLogger('pyftpdlib').handlers:
# If we get to this point it means the user hasn't
# configured logger. We want to log by default so
# we configure logging ourselves so that it will
# print to stderr.
from pyftpdlib.ioloop import _config_logging
_config_logging()
So, all I had to do is to define my own appropriate handlers:
logger = logging.getLogger('pyftpdlib')
logger.setLevel(logging.INFO)
hdlr = logging.FileHandler('pyftpd.log' )
logger.addHandler(hdlr)
Now, there is file logging, but console logging will not start.
Something like this:
import logging
date_format = "%Y/%m/%d %H:%M:%S"
log_file_path = "my_file.txt"
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
# own_module_logger = logging.getLogger(__name__)
pyftpdlib_logger = logging.getLogger("pyftpdlib")
# Setup logging to file (Only pyftpdlib)
filehandler = logging.FileHandler(filename = log_file_path)
filehandler.setLevel(logging.DEBUG)
fileformatter = logging.Formatter(fmt = "%(asctime)s - %(levelname)-8s - %(name)s.%(funcName)s - %(message)s",
datefmt = date_format)
filehandler.setFormatter(fileformatter)
pyftpdlib_logger.addHandler(filehandler)
pyftpdlib_logger.propagate = False
# Setup logging to console (All other)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
consoleformatter = logging.Formatter(fmt = "%(asctime)s - %(levelname)-8s - %(name)s.%(funcName)s - %(message)s",
datefmt = date_format)
console.setFormatter(consoleformatter)
root_logger.addHandler(console)
# Do your loggings
a = logging.getLogger()
a.info('root I')
a.debug('root D')
b = logging.getLogger("pyftpdlib")
b.info('P I')
b.debug('P D')
logging.shutdown()
So loggings of pyftpdlib go to file. Everything from your module to console. One of the key thing here is the propagate!

Categories

Resources