Kaggle Notebook does not Print Logs - python

my Kaggle-Notebook does not print Outputs.
import logging
import sys
# noinspection SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,PyArgumentList
def defaultLogger(name="fashiondataset", level=logging.DEBUG, handlers=None,
format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s'):
handlers = handlers if handlers else [logging.StreamHandler(sys.stdout)]
logging.basicConfig(level=level, format=format, handlers=handlers)
logger = logging.getLogger(name)
logging.getLogger("matplotlib").setLevel(logging.WARNING)
logging.getLogger("nltk_data").setLevel(logging.WARNING)
logging.getLogger("pysndfx").setLevel(logging.WARNING)
logging.getLogger('selenium.webdriver.remote.remote_connection').setLevel(logging.WARNING)
logging.getLogger('connectionpool').setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
return logger
print("Testing Logger")
logger = defaultLogger()
logger.info("Logging Info Test")
logger.debug("Logging Debug Test")
logger.error("Logging Error Test")
print("Testing Logger [DONE]")
Output:
Testing Logger
Testing Logger [DONE]
I also tried to add logging.getLogger(name).setLevel(logging.DEBUG) but it does not Change anything. TQDM / Tensorflow Progressbars are working.
Edit://
sys.stdout.write("test") Works

Related

python logger prints everything twice

I try using this openshift-restclient-python library. My custom Logger prints everything twice after I run into this bug.
modules/logging/Logging.py
import logging
class CustomLogger:
logger=None
def __init__(self):
if (CustomLogger.logger==None):
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
stdout_handler = logging.StreamHandler()
stdout_handler.setLevel(logging.DEBUG)
logger.addHandler(stdout_handler)
CustomLogger.logger=logger
def getLogger(self):
return CustomLogger.logger
logger = CustomLogger().getLogger()
This ist my main.py:
#!/usr/bin/env python3
import sys
from modules.logging.Logging import logger
from kubernetes import client, config
from openshift.dynamic import DynamicClient
from openshift.helper.userpassauth import OCPLoginConfiguration
import warnings
warnings.filterwarnings("ignore")
apihost = 'myhost'
username = 'myuser'
password = 'insecure'
ca_cert = '/path/to/cert'
kubeConfig = OCPLoginConfiguration(ocp_username=username, ocp_password=password)
kubeConfig.host = apihost
kubeConfig.verify_ssl = False
kubeConfig.ssl_ca_cert = ca_cert
kubeConfig.get_token()
k8s_client = client.ApiClient(kubeConfig)
logger.warning("this is printed once")
dyn_client = DynamicClient(k8s_client)
logger.warning("this is printed twice")
v1_projects = dyn_client.resources.get(api_version='project.openshift.io/v1', kind='Project')
project_list = v1_projects.get()
sys.exit(0)
executing the main.py I get the following output
this is printed once
ERROR:root:load cache error: ResourceList.__init__() got an unexpected keyword argument 'base_resource_lookup'
this is printed twice
WARNING:modules.logging.Logging:this is printed twice
If I do not use my custom logger but a simple configuration as below in main.py then everything is printed once.
import logging
logging.basicConfig(level=logging.DEBUG)
I have found this answer so I also tried removing any handler but the only handler is the one that contains my customization, so I end up with a basic logger.
What am I doing wrong?
Thanks
EDIT:
There is an easier way reproducing the issue.
I still have my custom logger as posted before but my main.py now:
#!/usr/bin/env python3
import sys
from modules.logging.Logging import logger
import logging
print(logger.handlers)
print("number handlers: " +str(len(logger.handlers)))
logger.warning("this is printed once")
logging.basicConfig(level=logging.DEBUG)
logger.warning("this is printed twice")
print("number handlers: " +str(len(logger.handlers)))
for h in logger.handlers:
logger.removeHandler(h)
print("number handlers: " +str(len(logger.handlers)))
logger.warning("still get printed")
sys.exit(0)
the output:
[<StreamHandler <stderr> (DEBUG)>]
number handlers: 1
this is printed once
this is printed twice
WARNING:modules.logging.Logging:this is printed twice
number handlers: 1
number handlers: 0
WARNING:modules.logging.Logging:still get printed
The code logging.basicConfig(level=logging.DEBUG) doesn't add another handler but cause everything to be logged. I actually only want the customized logs printed by the streamingHandler. How can I revert what is done by logging.basicConfig(level=logging.DEBUG)?
Please try remove this peace of code from class CustomLogger
stdout_handler = logging.StreamHandler()
stdout_handler.setLevel(logging.DEBUG)
logger.addHandler(stdout_handler)
btw i was struggling with the same. Found answers using search on this website.
https://stackoverflow.com/a/26730545/15637940
https://stackoverflow.com/a/70876179/15637940
and a lot more answered questions...
I solved it that way:
class CustomLogger:
logger=None
def __init__(self):
if (CustomLogger.logger==None):
logging.basicConfig(filename='/dev/null', filemode='w', format='%(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
fmt = '%(asctime)s | %(message)s'
stdout_handler = logging.StreamHandler()
stdout_handler.setLevel(logging.DEBUG)
stdout_handler.setFormatter(CustomFormatter(fmt))
logger.addHandler(stdout_handler)
CustomLogger.logger=logger
def getLogger(self):
return CustomLogger.logger
logger = CustomLogger().getLogger()
It seems that the library I am using at some place logs to the RootLogger. According to this answer logging.basicConfig() is a constructor of a streamHandler that connects to the RootLogger.
If I use logger = logging.getLogger('root') instead of logger = logging.getLogger(__name__) then everything is printed once. However, in that case everything the library logs on DEBUG-Level is printed to the terminal.
The line logging.basicConfig(filename='/dev/null', filemode='w', format='%(name)s - %(levelname)s - %(message)s') causes that everything logged by the root logger is printed to /dev/null.

Why set logging level for each module won't show logs in my code?

Some simple codes to illustrate my question:
/src
-- t1.py
-- t2.py
-- test.py
The test.py
import logging
import t1
import t2
def main():
# I need to set logging level for each module, so can't call logging.basicConfig
# logging.basicConfig(level=logging.DEBUG)
t1.foo()
t2.foo2()
if __name__ == "__main__":
main()
t1.py
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def foo():
logger.info('foo log message')
t2.py
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def foo2():
logger.info('foo2 log message')
When I run python3 test.py I won't get logs. But if I call logging.basicConfig(level=logging.DEBUG) in test.py I will get logs.
So what did I do wrong ?
--- update ---
What I can add to the answer I got is that the root logger's default level is 'WARNING'. So I did not get any output.
logger.setLevel(logging.DEBUG) sets level of messages processable by this logger.
logging.basicConfig(level=logging.DEBUG) does more than that. It creates a handler for the root logger which prints the logging records to stdout.
An example from the cookbook:
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)

Logger to not show INFO from external libaries

I want logger to print INFO messages from all my code but not from 3rd party libraries. This is discussed in multiple places, but the suggested solution does not work for me. Here is my simulation of external library, extlib.py:
#!/usr/bin/env python3
from logging import info
def f():
info("i am extlib f()")
My module:
#!/usr/bin/env python3
import logging
from logging import info
import extlib
logging.basicConfig(level=logging.INFO)
info("I am mymodule")
extlib.f()
Output:
INFO:root:I am mymodule
INFO:root:i am extlib f()
My attempt to only enable INFO for local module:
#!/usr/bin/env python3
import logging
from logging import info
import extlib
logging.getLogger(__name__).setLevel(logging.INFO)
info("I am mymodule")
extlib.f()
Output: nothing
Desired output:
INFO:root:I am mymodule
What am I doing wrong?
The problem is that they aren't using the logger class in the external library. If they were then you could filter it out. I'm not certain you can stop them from logging information since they are using the info function call. Here is a workaround though.
Suppressing output of module calling outside library
Update:
here's how you do loggers
import logging
# create logger
logger = logging.getLogger('simple_example')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
# 'application' code
logger.debug('debug message')
logger.info('info message')
logger.warn('warn message')
logger.error('error message')
logger.critical('critical message')

selectively setting the console logging level

I am trying to use an FTP server stub during tests. I don't want the console output, but I would like to capture the logging to a file.
I want the FTP server to run in a different process, so I use multiprocessing.
My code as follows sets all logging to level WARNING:
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
import pyftpdlib.log as pyftpdliblog
import os
import logging
import multiprocessing as mp
authorizer = DummyAuthorizer()
authorizer.add_user('user', '12345', '.', perm='elradfmwM')
handler = FTPHandler
handler.authorizer = authorizer
pyftpdliblog.LEVEL = logging.WARNING
logging.basicConfig(filename='pyftpd.log', level=logging.INFO)
server = FTPServer(('', 2121), handler)
def main():
p = mp.Process(target=server.serve_forever)
p.start()
if __name__ == '__main__':
main()
How do I set only the console logging to level WARNING, or even better, completely shutdown without giving up the file logging?
So, after digging inside the code, I found out the following hint:
# This is a method of FTPServer and it is called before
# server.serve_forever
def _log_start(self):
if not logging.getLogger('pyftpdlib').handlers:
# If we get to this point it means the user hasn't
# configured logger. We want to log by default so
# we configure logging ourselves so that it will
# print to stderr.
from pyftpdlib.ioloop import _config_logging
_config_logging()
So, all I had to do is to define my own appropriate handlers:
logger = logging.getLogger('pyftpdlib')
logger.setLevel(logging.INFO)
hdlr = logging.FileHandler('pyftpd.log' )
logger.addHandler(hdlr)
Now, there is file logging, but console logging will not start.
Something like this:
import logging
date_format = "%Y/%m/%d %H:%M:%S"
log_file_path = "my_file.txt"
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
# own_module_logger = logging.getLogger(__name__)
pyftpdlib_logger = logging.getLogger("pyftpdlib")
# Setup logging to file (Only pyftpdlib)
filehandler = logging.FileHandler(filename = log_file_path)
filehandler.setLevel(logging.DEBUG)
fileformatter = logging.Formatter(fmt = "%(asctime)s - %(levelname)-8s - %(name)s.%(funcName)s - %(message)s",
datefmt = date_format)
filehandler.setFormatter(fileformatter)
pyftpdlib_logger.addHandler(filehandler)
pyftpdlib_logger.propagate = False
# Setup logging to console (All other)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
consoleformatter = logging.Formatter(fmt = "%(asctime)s - %(levelname)-8s - %(name)s.%(funcName)s - %(message)s",
datefmt = date_format)
console.setFormatter(consoleformatter)
root_logger.addHandler(console)
# Do your loggings
a = logging.getLogger()
a.info('root I')
a.debug('root D')
b = logging.getLogger("pyftpdlib")
b.info('P I')
b.debug('P D')
logging.shutdown()
So loggings of pyftpdlib go to file. Everything from your module to console. One of the key thing here is the propagate!

python's logging module is not logging to a log file

I am trying out python's logger module to log info both in console and in a log and it works fine.I wanted that script to get invoked during machine star-tup and hence invoked my script from rc.local.
And my script gets invoked during boot-up, but the problem is, only console logging is working at that time. Logging to a file is not happening. Any clue on what could be the issue.
Is it anything related to syslog daemon?
import time
import logging
import datetime
global logger
global LOG_FILE
def initialize():
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
time = datetime.datetime.now()
LOG_FILE = "sample.log." + time.strftime("%Y%m%d")
log_file_handler = logging.FileHandler(LOG_FILE)
log_file_handler.setLevel(logging.DEBUG)
log_file_formatter = logging.Formatter('%(message)s')
log_file_handler.setFormatter(log_file_formatter)
logger.addHandler(log_file_handler)
log_file_handler = logging.StreamHandler()
log_file_handler.setLevel(logging.INFO)
log_file_formatter = logging.Formatter('%(message)s')
log_file_handler.setFormatter(log_file_formatter)
logger.addHandler(log_file_handler)
if __name__ == '__main__':
initialize()
logger.info("This should go both in console and log")
logger.debug("This should go only to Log")
import time
import logging
import datetime
def initialize():
# because logging is global module, so you can
# always get logger by logging.getLogger(__name__)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
time = datetime.datetime.now()
# if you only use LOG_FILE in function, you don't
# have to declare it outside.
LOG_FILE = "sample.log." + time.strftime("%Y%m%d")
log_file_handler = logging.FileHandler(LOG_FILE)
log_file_handler.setLevel(logging.DEBUG)
log_file_formatter = logging.Formatter('%(message)s')
log_file_handler.setFormatter(log_file_formatter)
logger.addHandler(log_file_handler)
log_file_handler = logging.StreamHandler()
log_file_handler.setLevel(logging.INFO)
log_file_formatter = logging.Formatter('%(message)s')
log_file_handler.setFormatter(log_file_formatter)
logger.addHandler(log_file_handler)
if __name__ == '__main__':
initialize()
logger = logging.getLogger(__name__)
logger.info("This should go both in console and log")
logger.debug("This should go only to Log")

Categories

Resources