If I log with logging, I can force the timestamp to be in UTC time by setting
logging.Formatter.converter = time.gmtime
like this:
import sys
import time
import logging
handler = logging.StreamHandler(sys.stdout)
out_fmt = '[%(asctime)s.%(msecs)03dZ] [%(levelname)s] %(message)s'
dt_fmt = '%Y-%m-%d %H:%M:%S'
logging.Formatter.converter = time.gmtime
formatter = logging.Formatter(out_fmt, dt_fmt)
handler.setFormatter(formatter)
root = logging.getLogger()
root.setLevel(10)
root.addHandler(handler)
logging.log(logging.INFO, 'Start logging')
Since my application will be using Twisted, I want to use twisted.logger instead for logging. And, indeed, it is much simpler:
import sys
from twisted.logger import Logger, textFileLogObserver
log = Logger(observer=textFileLogObserver(sys.stdout, '%Y-%m-%d %H:%M:%S.%fZ'))
log.info('Start logging')
Problem is, the timestamp is in local time - not in UTC time. Is there a way to make the Twisted logger use UTC time?
I ended up overriding formatTime in FileLogObserver:
import sys
from datetime import datetime
from twisted.python import log
def myFLOformatTime(self, when):
timeFormatString = self.timeFormat
if timeFormatString is None:
timeFormatString = '[%Y-%m-%d %H:%M:%S.%fZ]'
return datetime.utcfromtimestamp(when).strftime(timeFormatString)
if __name__ == '__main__':
log.FileLogObserver.formatTime = myFLOformatTime
log.startLogging(sys.stdout)
Related
I would like individual .log file for each thread. Unfortunately, after using logging.basicConfig, many different files are created for logs, but finally all logs end up in the last declared file.
What should threads do to have independent log files?
import logging
import threading
import time
from datetime import datetime
def test_printing(name):
logging.basicConfig(
format="%(asctime)s, %(levelname)-8s | %(filename)-23s:%(lineno)-4s | %(threadName)15s: %(message)s", # noqa
datefmt="%Y-%m-%d:%H:%M:%S",
level=logging.INFO,
force=True,
handlers=[
logging.FileHandler(f"{name}.log"),
logging.StreamHandler()])
logging.info(f"Test {name}")
time.sleep(20)
logging.info(f"Test {name} after 20s")
def function_thread():
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
thread = threading.Thread(
target=test_printing,
kwargs={"name": timestamp}
)
thread.start()
for i in range(5):
time.sleep(1)
function_thread()
From https://docs.python.org/3/library/logging.html#logger-objects
Note that Loggers should NEVER be instantiated directly, but always through the module-level function logging.getLogger(name).
So you have to create and configure a new logger inside each thread:
logger = logging.getLogger(name)
logger.basicConfig(...)
more info at: https://docs.python.org/3/howto/logging.html#logging-from-multiple-modules
Edit: Use already defined name as logger identifier, instead of __name__
Edit:
You cannot use logging.basicConfig, instead you need to configure each thread logger on its own.
Full code provided and tested:
import logging
import threading
import time
from datetime import datetime
def test_printing(name):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
fmt="%(asctime)s, %(levelname)-8s | %(filename)-23s:%(lineno)-4s | %(threadName)15s: %(message)s",
datefmt="%Y-%m-%d:%H:%M:%S")
sh = logging.StreamHandler()
fh = logging.FileHandler(f"{name}.log")
sh.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(sh)
logger.addHandler(fh)
logger.info(f"Test {name}")
time.sleep(20)
logger.info(f"Test {name} after 20s")
def function_thread():
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
thread = threading.Thread(
target=test_printing,
kwargs={"name": timestamp}
)
thread.start()
for i in range(5):
time.sleep(1)
function_thread()
I'm using a logging object to display the process of my Python script. Things were working fine until I specified a log file.
Working Code
from datetime import timedelta
import logging
import time
def main():
start_time = time.monotonic()
config = get_args()
# Logger configuration.
msg_format = '[%(asctime)s - %(levelname)s - %(filename)s: %(lineno)d (%(funcName)s)] %(message)s'
logging.basicConfig(format=msg_format, level=logging.INFO)
logger = logging.getLogger()
logger.info(msg='Starting process...')
# End of process.
end_time = time.monotonic()
process_time = str(timedelta(seconds=(end_time - start_time))).split('.')[0] # Get rid of trailing microseconds.
logger.info(msg=('End. Entire process took approximately %s' % process_time))
if __name__ == '__main__':
main()
Not Working Code
from datetime import datetime, timedelta
import logging
import os
import time
def main():
start_time = time.monotonic()
config = get_args()
# Logger configuration.
timestamp = datetime.now().strftime(format='%Y%m%d-%H%M')
log_filename = '_'.join(['log', timestamp])
log_file_path = os.path.join(config.log_dir, log_filename)
msg_format = '[%(asctime)s - %(levelname)s - %(filename)s: %(lineno)d (%(funcName)s)] %(message)s'
logging.basicConfig(filename=log_file_path, format=msg_format, level=logging.INFO)
logger = logging.getLogger()
logger.info(msg='Starting process...')
# End of process.
end_time = time.monotonic()
process_time = str(timedelta(seconds=(end_time - start_time))).split('.')[0] # Get rid of trailing microseconds.
logger.info(msg=('End. Entire process took approximately %s' % process_time))
if __name__ == '__main__':
main()
Would anyone know what might be the problem? I'm suspecting that specifying the log filename in the logging.basicConfig may have messed up the handlers or something, but I'm not 100% certain. Thanks!
The answer was relatively obvious after reading the documentation a bit more carefully. I wasn't aware that I hadn't included any handlers in my configuration. To fix the issue, all I had to do was change:
logging.basicConfig(filename=log_file_path, format=msg_format, level=logging.INFO)
to this:
logging.basicConfig(format=msg_format, level=logging.INFO, \
handlers=[logging.FileHandler(filename=log_file_path), logging.StreamHandler()])
Hope this helps anyone else running into the same mistake.
This answer helped me: logger configuration to log to file and print ot stdout
When using the logging module to create rotating log files, I can tell the logger to delay the rotation until there is actual data to be logged by using the delay=True argument of the TimedRotatingFileHandler class like this:
import time
import logging
from logging.handlers import TimedRotatingFileHandler
if __name__ == '__main__':
handler = TimedRotatingFileHandler('logfile.log', when='midnight', delay=True)
out_fmt = '[%(asctime)s.%(msecs)03dZ] [%(levelname)s] %(message)s'
dt_fmt = '%Y-%m-%d %H:%M:%S'
logging.Formatter.converter = time.gmtime
formatter = logging.Formatter(out_fmt, dt_fmt)
handler.setFormatter(formatter)
root = logging.getLogger()
root.setLevel(logging.DEBUG)
root.addHandler(handler)
This is useful when there is rarely any new information output to the log - for instance, if there is a whole day without anything new being logged; then you don't want to create an empty log file for that day.
Is it possible to achieve the same effect when using the Twisted logger (twisted.python.logfile.DailyLogFile)?
You can achieve your desired behavior, simply by overriding shouldRotate function in DailyLogFile class.
Something like below should do the trick:
class CustomDailyLogFile(LogFile, DailyLogFile):
def shouldRotate(self):
return self.toDate() > self.lastDate and self.rotateLength and self.size >= self.rotateLength
i have this small program taken from here
from twisted.logger import Logger
log = Logger()
def handleData(data):
log.debug("Got data: {data!r}.", data=data)
handleData({'a':20})
This does not prints anything to the screen .why is that?
The default python logger is set to WARN level, so DEBUG messages are suppressed. You can make that code work like -
import logging
from twisted.logger import Logger
log = Logger()
log.setLevel(logging.DEBUG)
def handleData(data):
log.debug("Got data: {data!r}.", data=data)
handleData({'a':20})
i figured it out from here https://github.com/moira-alert/worker/blob/master/moira/logs.py:
import logging
from twisted.logger import Logger, LogLevel
import sys
from twisted.logger import globalLogPublisher
from twisted.logger import textFileLogObserver
from twisted.logger import FilteringLogObserver, LogLevelFilterPredicate, LogLevel
log = Logger()
level = LogLevel.debug
predicate = LogLevelFilterPredicate(defaultLogLevel=level)
observer = FilteringLogObserver(textFileLogObserver(sys.stdout), [predicate])
observer._encoding = "utf-8"
globalLogPublisher.addObserver(observer)
log.info("Start logging with {l}", l=level)
def handleData(data):
log.debug("Got data: {data!r}.", data=data)
handleData({'a':20})
Is there any simpler way . it seems overly complicated just to set log level.
You didn't add observer for your logger object.
here is a simple observer that print the log to stdout
import sys
from twisted.logger import Logger, eventAsText, FileLogObserver
log = Logger()
log.observer.addObserver(FileLogObserver(sys.stdout, lambda e: eventAsText(e) + "\n"))
someData = 2
log.debug("Got data: {data!r}", data=someData)
I am trying to use an FTP server stub during tests. I don't want the console output, but I would like to capture the logging to a file.
I want the FTP server to run in a different process, so I use multiprocessing.
My code as follows sets all logging to level WARNING:
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
import pyftpdlib.log as pyftpdliblog
import os
import logging
import multiprocessing as mp
authorizer = DummyAuthorizer()
authorizer.add_user('user', '12345', '.', perm='elradfmwM')
handler = FTPHandler
handler.authorizer = authorizer
pyftpdliblog.LEVEL = logging.WARNING
logging.basicConfig(filename='pyftpd.log', level=logging.INFO)
server = FTPServer(('', 2121), handler)
def main():
p = mp.Process(target=server.serve_forever)
p.start()
if __name__ == '__main__':
main()
How do I set only the console logging to level WARNING, or even better, completely shutdown without giving up the file logging?
So, after digging inside the code, I found out the following hint:
# This is a method of FTPServer and it is called before
# server.serve_forever
def _log_start(self):
if not logging.getLogger('pyftpdlib').handlers:
# If we get to this point it means the user hasn't
# configured logger. We want to log by default so
# we configure logging ourselves so that it will
# print to stderr.
from pyftpdlib.ioloop import _config_logging
_config_logging()
So, all I had to do is to define my own appropriate handlers:
logger = logging.getLogger('pyftpdlib')
logger.setLevel(logging.INFO)
hdlr = logging.FileHandler('pyftpd.log' )
logger.addHandler(hdlr)
Now, there is file logging, but console logging will not start.
Something like this:
import logging
date_format = "%Y/%m/%d %H:%M:%S"
log_file_path = "my_file.txt"
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
# own_module_logger = logging.getLogger(__name__)
pyftpdlib_logger = logging.getLogger("pyftpdlib")
# Setup logging to file (Only pyftpdlib)
filehandler = logging.FileHandler(filename = log_file_path)
filehandler.setLevel(logging.DEBUG)
fileformatter = logging.Formatter(fmt = "%(asctime)s - %(levelname)-8s - %(name)s.%(funcName)s - %(message)s",
datefmt = date_format)
filehandler.setFormatter(fileformatter)
pyftpdlib_logger.addHandler(filehandler)
pyftpdlib_logger.propagate = False
# Setup logging to console (All other)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
consoleformatter = logging.Formatter(fmt = "%(asctime)s - %(levelname)-8s - %(name)s.%(funcName)s - %(message)s",
datefmt = date_format)
console.setFormatter(consoleformatter)
root_logger.addHandler(console)
# Do your loggings
a = logging.getLogger()
a.info('root I')
a.debug('root D')
b = logging.getLogger("pyftpdlib")
b.info('P I')
b.debug('P D')
logging.shutdown()
So loggings of pyftpdlib go to file. Everything from your module to console. One of the key thing here is the propagate!