I have done and tried this code in Python 3 for sending me an email when exceptions happen, but the STMPHandler is not working. It works perfectly with the same args and values for SMTPHandler and the same code copy-pasted from this answer
Working code:
import logging.handlers
smtp_handler = logging.handlers.SMTPHandler(mailhost=("smtp.gmail.com", 587),
fromaddr="somemail#gmail.com",
toaddrs="anothermail#gmail.com",
subject=u"Alfred error!",
credentials=("somemail#gmail.com", "asdasdasdasdasd"),
secure=())
logger = logging.getLogger()
logger.addHandler(smtp_handler)
try:
raise Exception()
except Exception as e:
logger.exception('Unhandled Exception')
All handlers are doing fine and the code from the answer works just with the exception() call.
I can't understand why this is not working.
log_config.py (not working code):
import os
import logging
import logging.handlers
version = "2.0.0"
LOGFILE_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'log', 'my_app.log')
logging.basicConfig(format='%(levelname)s - %(message)s', level=logging.DEBUG)
logging.getLogger('telegram').setLevel(logging.WARNING)
logging.getLogger('chardet.charsetprober').setLevel(logging.WARNING)
class TimedOutFilter(logging.Filter):
def filter(self, record):
if "Error while getting Updates: Timed out" in record.getMessage():
return False
def getLogger(name):
"""
Return a logger for the file
:param name: the file name
:return: a logger
"""
global version
logger = logging.getLogger(name)
fh = logging.handlers.TimedRotatingFileHandler(LOGFILE_PATH, when='midnight')
formatter = logging.Formatter('%(asctime)s - ({0}) %(name)s - %(levelname)s - %(message)s'.format(version))
fh.setFormatter(formatter)
fh.addFilter(TimedOutFilter())
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
if os.environ.get('SERVER', True):
mh = logging.handlers.SMTPHandler(mailhost=("smtp.gmail.com", 587),
fromaddr="somemail#gmail.com",
toaddrs="anothermail#gmail.com",
subject=u"Alfred error!",
credentials=("somemail#gmail.com", "asdasdasdasdasd"),
secure=())
mh.setFormatter(formatter)
mh.addFilter(TimedOutFilter())
mh.setLevel(logging.ERROR)
logger.addHandler(mh)
ch = logging.StreamHandler()
formatter = logging.Formatter('{0} %(levelname)s - %(message)s'.format(version))
ch.setFormatter(formatter)
ch.addFilter(TimedOutFilter())
ch.setLevel(logging.ERROR)
logger.addHandler(ch)
return logger
if __name__ == '__main__':
logger = getLogger(__name__)
try:
raise Exception()
except Exception as e:
logger.exception('Unhandled Exception')
logger.error('an error line')
logger.debug('a debug line')
The timedOutFilter was ruining everything because it only returned False but never True.
So just doing:
class TimedOutFilter(logging.Filter):
def filter(self, record):
if "Error while getting Updates: Timed out" in record.getMessage():
return False
return True # <==== Added this line only
fixed the whole thing.
Related
My main program logs to its own log file and the sub-process should have its own log file.
I replaced the logger object inside the multiprocessing process, but the logging data from the sub-process is additionally redirected to the main log file.
How can I prevent this?
The structure looks like this:
import logging
import sys
import os
from pathlib import Path
import multiprocessing
import time
import requests
class ProcessFilter(logging.Filter):
"""Only accept log records from a specific pid."""
def __init__(self, pid):
self._pid = pid
def filter(self, record):
return record.process == self._pid
def create_logger(file):
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.addFilter(ProcessFilter(pid=os.getpid()))
file_handler = logging.FileHandler(file)
stream_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('[%(asctime)s] %(levelname)s [%(filename)s.%(funcName)s:%(lineno)d] %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.addHandler(stream_handler)
return log
def subprocess_init():
global log
sub_log_file = str(Path.home()) + '/logfile_sub.log'
log = create_logger(sub_log_file)
do_subprocess_stuff()
def do_subprocess_stuff():
count = 0
while True:
create_log("subprocess", count)
time.sleep(5)
count += 1
def main_tasks():
num = 10
while num > 0:
create_log("main", num)
time.sleep(5)
num -= 1
def create_log(text, num):
log.debug(text + " log %s", num)
if __name__ == '__main__':
file = str(Path.home()) + '/logfile.log'
log = create_logger(file)
sub_process = multiprocessing.Process(target=subprocess_init, args=())
sub_process.daemon = True
sub_process.start()
main_tasks()
I am simply translating this answer to fit multiprocessing.
import logging
class ProcessFilter(logging.Filter):
"""Only accept log records from a specific pid."""
def __init__(self, pid):
self._pid = pid
def filter(self, record):
return record.process == self._pid
import logging
import os
def create_logger(file):
log = logging.getLogger('') # why use this logger and not __name__ ?
log.setLevel(logging.DEBUG)
log.addFilter(ProcessFilter(pid=os.getpid())) # logger wide filter
file_handler = logging.FileHandler(file)
stream_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('[%(asctime)s] %(levelname)s [%(filename)s.%(funcName)s:%(lineno)d] %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.addHandler(stream_handler)
return log
NB. you can also put the filter on a specific handler
Creating a custom logger for my purpose using python which can be used across different modules just by importing and calling a custom_log method.
This is MyLogger.py script.
import datetime
import logging
import logging.handlers
import os
import colorlog
from pathlib import Path
class MyLogger(logging.Logger):
def __init__(self, verbose=1):
log_dir_path = Path("../logs")
file_name_format = '{year:04d}{month:02d}{day:02d}-{hour:02d}{minute:02d}{second:02d}.log'
file_msg_format = '%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s'
console_msg_format = '%(asctime)s %(levelname)-8s: %(message)s'
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
if (verbose == 1):
max_bytes = 1024 ** 2
backup_count = 100
t = datetime.datetime.now()
file_name = file_name_format.format(year=t.year, month=t.month, day=t.day, hour=t.hour, minute=t.minute,
second=t.second)
file_name = os.path.join(log_dir_path, file_name)
file_handler = logging.handlers.RotatingFileHandler(filename=file_name, maxBytes=max_bytes, backupCount=backup_count)
file_handler.setLevel(logging.DEBUG)
file_formatter = logging.Formatter(file_msg_format)
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
if (verbose == 1):
cformat = '%(log_color)s' + console_msg_format
colors = {'DEBUG': 'green', 'INFO': 'cyan', 'WARNING': 'bold_yellow', 'ERROR': 'bold_red',
'CRITICAL': 'bold_purple'}
date_format = '%Y-%m-%d %H:%M:%S'
formatter = colorlog.ColoredFormatter(cformat, date_format, log_colors=colors)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
def custom_log(self, level, msg):
logging.log(getattr(logging,level),msg)
I have 2 other scripts within the same directory as below, just need to initialize MyLogger() in Test1.py at beginning of the test and expecting to use custom_log in all other scripts. Missing something here to make this work,either in the way of initializing or the way of importing. Any support how to get this done.
Test1.py
from MyLogger import MyLogger
class StartTest():
def __init__(self):
MyLogger.custom_log('DEBUG','Debug messages are only sent to the logfile.')
if __name__ == '__main__':
MyLogger()
StartTest()
Test2.py
from MyLogger import MyLogger
class Test():
def __init__(self):
MyLogger.custom_log('DEBUG','Debug messages are only sent to the logfile.')
def TestMethod(self):
MyLogger.custom_log('INFO','Debug messages are only sent to the logfile.')
You can achieve it by creating a logger with a specific name instead of using root logger. logging.log() uses always the root logger. So, you can define a logger with a specific name instead of creating a new Logging channel.
An example which is inline with your need
class MyLogger:
logger: logging.Logger = None
#staticmethod
def configure(verbose=1):
log_dir_path = Path("../logs")
file_name_format = '{year:04d}{month:02d}{day:02d}-{hour:02d}{minute:02d}{second:02d}.log'
file_msg_format = '%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s'
console_msg_format = '%(asctime)s %(levelname)-8s: %(message)s'
logger = logging.getLogger("mylogger")
logger.setLevel(logging.DEBUG)
cformat = '%(log_color)s' + console_msg_format
colors = {'DEBUG': 'green', 'INFO': 'cyan', 'WARNING': 'bold_yellow', 'ERROR': 'bold_red',
'CRITICAL': 'bold_purple'}
date_format = '%Y-%m-%d %H:%M:%S'
formatter = colorlog.ColoredFormatter(cformat, date_format, log_colors=colors)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
MyLogger.logger = logger
if __name__ == '__main__':
MyLogger.configure()
MyLogger.logger.error("debug message")
Once the MyLogger.configure is called, then you can use the MyLogger.logger.* any where in your app / scripts.
This can be done without a helper class as well. Create a logger configuration file. Configure your custom logger with a different name rather than root, and from your code always call logging.getLogger("name-of-logger") to create a logging instance.
I have written a below code to write the logs into cloudwatch using watchtower.
import os
import sys
import time
import boto3
import watchtower
import logging.handlers
from scripts.config import app_configurations
def fast_logging():
try:
boto3_session = boto3.session.Session()
LOG_GROUP = "Fast-Logging"
log_level = DEBUG
stream_name = os.path.join("fast_logging"+ "_" + time.strftime("%Y%m%d") + '.log')
logger = logging.getLogger("Fast-Logger")
logger.setLevel(log_level)
formatter = logging.Formatter('%(name)s - %(levelname)s - %(filename)s - %(module)s: %(funcName)s: '
'%(lineno)d - %(message)s')
log_handler = watchtower.CloudWatchLogHandler(log_group=LOG_GROUP, boto3_session=boto3_session,
stream_name=stream_name)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
return logger
except Exception as e:
raise e
logger = fast_logging()
The above code is working for the normal python code but not able to dump logs into cloudwatch stream for the logs in Fast API services.
I found that this code works for me
import os
import time
import boto3
import watchtower
import logging.handlers
def fast_logging():
try:
LOG_GROUP = "Fast-Logging"
log_level = "INFO"
stream_name = os.path.join("fast_logging"+ "_" + time.strftime("%Y%m%d") + '.log')
logger = logging.getLogger("Fast-Logger")
logger.setLevel(log_level)
formatter = logging.Formatter('%(name)s - %(levelname)s - %(filename)s - %(module)s: %(funcName)s: '
'%(lineno)d - %(message)s')
log_handler = watchtower.CloudWatchLogHandler(log_group=LOG_GROUP,
stream_name=stream_name)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
return logger
except Exception as e:
raise e
logger = fast_logging()
logger.info("test this")
Not able to get this one to work. Probably lack of understanding of python logging module.
Use case - print one variable on all log messages. i.e. "jobID". When multiple instances of this utility will run in parallel in same server - syslog or ../log/messages can be parsed live based on this jobID. Here is attempt with LoggerAdapter method (error lines commented) -
def startlog(self, log_folder, testname=None):
if not os.path.exists(log_folder):
os.makedirs(log_folder)
ltime = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
if testname:
logfile = "%s/%s_log_%s.log" % (log_folder, testname, ltime)
else:
logfile = "%s/log_%s.log" % (log_folder, ltime)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formated = logging.Formatter('%(asctime)s - %(levelname)s - %(module)10s - %(funcName)10s - %(message)s')
#What is desired is -
#formated = logging.Formatter('%(asctime)s - %(jobid)s - %(levelname)s - %(module)10s - %(funcName)10s - %(message)s')
if not testname:
fh = logging.FileHandler(filename=logfile)
fh.setFormatter(formated)
logger.addHandler(fh)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formated)
logger.addHandler(ch)
# Next line errors -
# logger = logging.LoggerAdapter(logger, {"jobid": self.jobdata.jobid})
return logger
else:
fh = logging.FileHandler(filename=logfile)
fh.setFormatter(formated)
self.log.addHandler(fh)
# Next line errors -
# logger = logging.LoggerAdapter(self.log, {"jobid": self.jobdata.jobid})
return fh
2nd Try with Filters:
def startlog(self, log_folder, t_name=None):
if not os.path.exists(log_folder):
os.makedirs(log_folder)
ltime = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
if t_name:
logfile = "%s/%s_log_%s.log" % (log_folder, t_name, ltime)
else:
logfile = "%s/log_%s.log" % (log_folder, ltime)
root = logging.getLogger()
root.setLevel(logging.INFO)
root.addFilter(ContextFilter(self.jobdata.jobid))
formated = logging.Formatter('%(asctime)s - %(jobid)s - %(levelname)s - %(module)10s - %(funcName)10s - %(message)s')
if not t_name:
fh = logging.FileHandler(filename=logfile)
fh.setFormatter(formated)
root.addHandler(fh)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formated)
root.addHandler(ch)
return root
else:
fh = logging.FileHandler(filename=logfile)
fh.setFormatter(formated)
self.log.addHandler(fh)
return fh
class ContextFilter(logging.Filter):
"""
This is a filter which injects contextual information into the log.
"""
def __init__(self, jobid):
self.jobid = jobid
def filter(self, record):
record.jobid = self.jobid
return True
Issue faced with filters is 'keyerror' from other modules(paramiko - transport.py). Similar to How to properly add custom logging filters in Python modules
Traceback (most recent call last):
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/logging/__init__.py", line 859, in emit
msg = self.format(record)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/logging/__init__.py", line 732, in format
return fmt.format(record)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/logging/__init__.py", line 474, in format
s = self._fmt % record.__dict__
KeyError: 'jobid'
Logged from file transport.py, line 1567
Dumb me, i need to add filter to the log handler. This is working so far -
def startlog(self, log_folder, t_name=None):
if not os.path.exists(log_folder):
os.makedirs(log_folder)
ltime = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
if t_name:
logfile = "%s/%s_log_%s.log" % (log_folder, t_name, ltime)
else:
logfile = "%s/log_%s.log" % (log_folder, ltime)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formated = logging.Formatter('%(asctime)s - %(jobid)s - %(levelname)s - %(module)10s - %(funcName)10s - %(message)s')
formatted_file = logging.Formatter('%(asctime)s - %(levelname)s - %(module)10s - %(funcName)10s - %(message)s')
if not t_name:
fh = logging.FileHandler(filename=logfile)
fh.setFormatter(formatted_file)
fh.addFilter(ContextFilter(self.jobdata.jobid))
logger.addHandler(fh)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formated)
ch.addFilter(ContextFilter(self.jobdata.jobid))
logger.addHandler(ch)
return logger
else:
fh = logging.FileHandler(filename=logfile)
fh.setFormatter(formatted_file)
fh.addFilter(ContextFilter(self.jobdata.jobid))
self.log.addHandler(fh)
return fh
I am trying to log the actions in a function and I have written the following function to log responses to different files based on the type of response i.e. info,error,debug and warning.
logging.basicConfig(filename='indivi_service.log',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' )
def setup_logger(logger_name, log_file, level=logging.DEBUG):
l = logging.getLogger(logger_name)
formatter = logging.Formatter()
fileHandler = logging.FileHandler(log_file)
fileHandler.setFormatter(formatter)
handler = TimedRotatingFileHandler(logger_name,
when="d",
interval=1,
backupCount=100)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(handler)
setup_logger('debug', r'debug')
setup_logger('error', r'error')
setup_logger('warning', r'warning')
setup_logger('info', r'info')
debug = logging.getLogger('debug')
error = logging.getLogger('error')
warning = logging.getLogger('warning')
info = logging.getLogger('info')
class Info(APIHandler):
#gen.coroutine
def post(self):
req = json.loads(self.request.body)
resp, content = client(item_id=req['item_id'])
debug.debug(content)
info.info(hello world)
warning.warn('warning message')
error.error('error message')
The problem that I am facing is that a response is printed twice each time I call a function.
for example:
info.log
hello world
hello world
Can anyone tell me why is it happening like. This is the case with all the log files.
Thanks
try:
import logging
import logging.handlers
logging.basicConfig(filename='indivi_service.log',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' )
def setup_logger(logger_name, log_file, level=logging.DEBUG):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler = logging.handlers.TimedRotatingFileHandler(str(log_file)+'.log', when="d", interval=1, backupCount=100)
handler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(handler)
setup_logger('debug', r'debug')
setup_logger('error', r'error')
setup_logger('warning', r'warning')
setup_logger('info', r'info')
debug = logging.getLogger('debug')
error = logging.getLogger('error')
warning = logging.getLogger('warning')
info = logging.getLogger('info')
if __name__ == "__main__":
info.info('hello world')
error.info('hello world')
after run this script, file info.log has one 'hello world' and error.log also has only one 'hello world'.