log setup with formatting and logrotate - python

Below program I am trying to setup the logs with my required formatting using basicConfig and use RotatingFileHandler to rotate the logs.
But getting the Error:
Traceback (most recent call last):
File "C:\Python27\lib\logging\handlers.py", line 77, in emit
self.doRollover()
File "C:\Python27\lib\logging\handlers.py", line 142, in doRollover
os.rename(self.baseFilename, dfn)
WindowsError: [Error 32] The process cannot access the file because it is being used by another process
Logged from file log_rotate.py, line 53
Could not figure out where is the problem, new to python.
Can someone please point me in right direction.
import os
import logging
import time
import string
from ctypes import windll
from logging.handlers import RotatingFileHandler
LOG_FILE = 'C:\\temp\\debug.log'
def get_drives():
drives = []
bitmask = windll.kernel32.GetLogicalDrives()
for letter in string.ascii_uppercase:
if bitmask & 1:
drives.append(letter)
bitmask >>= 1
return drives
def create_temp_dir():
drives = []
drives = get_drives()
temp_dir = drives[0]+':\\temp\\'
if not os.path.exists(temp_dir):
print ( " creating temp for logs and etc ", temp_dir )
os.makedirs(temp_dir)
return temp_dir
def make_log_setup():
global LOG_FILE
temp_dir = create_temp_dir()
log_file = temp_dir+'debug.log'
date_strftime_format = "%d-%b-%y %H:%M:%S"
message_format='%(asctime)s %(levelname)s %(module)s - %(funcName)s: %(message)s'
logging.basicConfig(filename = log_file, format = message_format, datefmt = date_strftime_format,level=logging.DEBUG)
def create_rotating_log(path):
"""
Creates a rotating log
"""
logger = logging.getLogger("Rotating Log")
logger.setLevel(logging.DEBUG)
# add a rotating handler
handler = RotatingFileHandler(path, maxBytes=20,
backupCount=5)
logger.addHandler(handler)
for i in range(6):
logger.info("This is test log line %s" % i)
time.sleep(1.5)
if __name__ == "__main__":
#log_file = "test.log"
make_log_setup()
create_rotating_log(LOG_FILE)

Related

multiprocessing subprocess log to separate file

My main program logs to its own log file and the sub-process should have its own log file.
I replaced the logger object inside the multiprocessing process, but the logging data from the sub-process is additionally redirected to the main log file.
How can I prevent this?
The structure looks like this:
import logging
import sys
import os
from pathlib import Path
import multiprocessing
import time
import requests
class ProcessFilter(logging.Filter):
"""Only accept log records from a specific pid."""
def __init__(self, pid):
self._pid = pid
def filter(self, record):
return record.process == self._pid
def create_logger(file):
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.addFilter(ProcessFilter(pid=os.getpid()))
file_handler = logging.FileHandler(file)
stream_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('[%(asctime)s] %(levelname)s [%(filename)s.%(funcName)s:%(lineno)d] %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.addHandler(stream_handler)
return log
def subprocess_init():
global log
sub_log_file = str(Path.home()) + '/logfile_sub.log'
log = create_logger(sub_log_file)
do_subprocess_stuff()
def do_subprocess_stuff():
count = 0
while True:
create_log("subprocess", count)
time.sleep(5)
count += 1
def main_tasks():
num = 10
while num > 0:
create_log("main", num)
time.sleep(5)
num -= 1
def create_log(text, num):
log.debug(text + " log %s", num)
if __name__ == '__main__':
file = str(Path.home()) + '/logfile.log'
log = create_logger(file)
sub_process = multiprocessing.Process(target=subprocess_init, args=())
sub_process.daemon = True
sub_process.start()
main_tasks()
I am simply translating this answer to fit multiprocessing.
import logging
class ProcessFilter(logging.Filter):
"""Only accept log records from a specific pid."""
def __init__(self, pid):
self._pid = pid
def filter(self, record):
return record.process == self._pid
import logging
import os
def create_logger(file):
log = logging.getLogger('') # why use this logger and not __name__ ?
log.setLevel(logging.DEBUG)
log.addFilter(ProcessFilter(pid=os.getpid())) # logger wide filter
file_handler = logging.FileHandler(file)
stream_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('[%(asctime)s] %(levelname)s [%(filename)s.%(funcName)s:%(lineno)d] %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.addHandler(stream_handler)
return log
NB. you can also put the filter on a specific handler

Python Fast API logging with Watchtower

I have written a below code to write the logs into cloudwatch using watchtower.
import os
import sys
import time
import boto3
import watchtower
import logging.handlers
from scripts.config import app_configurations
def fast_logging():
try:
boto3_session = boto3.session.Session()
LOG_GROUP = "Fast-Logging"
log_level = DEBUG
stream_name = os.path.join("fast_logging"+ "_" + time.strftime("%Y%m%d") + '.log')
logger = logging.getLogger("Fast-Logger")
logger.setLevel(log_level)
formatter = logging.Formatter('%(name)s - %(levelname)s - %(filename)s - %(module)s: %(funcName)s: '
'%(lineno)d - %(message)s')
log_handler = watchtower.CloudWatchLogHandler(log_group=LOG_GROUP, boto3_session=boto3_session,
stream_name=stream_name)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
return logger
except Exception as e:
raise e
logger = fast_logging()
The above code is working for the normal python code but not able to dump logs into cloudwatch stream for the logs in Fast API services.
I found that this code works for me
import os
import time
import boto3
import watchtower
import logging.handlers
def fast_logging():
try:
LOG_GROUP = "Fast-Logging"
log_level = "INFO"
stream_name = os.path.join("fast_logging"+ "_" + time.strftime("%Y%m%d") + '.log')
logger = logging.getLogger("Fast-Logger")
logger.setLevel(log_level)
formatter = logging.Formatter('%(name)s - %(levelname)s - %(filename)s - %(module)s: %(funcName)s: '
'%(lineno)d - %(message)s')
log_handler = watchtower.CloudWatchLogHandler(log_group=LOG_GROUP,
stream_name=stream_name)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
return logger
except Exception as e:
raise e
logger = fast_logging()
logger.info("test this")

Right way to insert a variable in python logging format

Not able to get this one to work. Probably lack of understanding of python logging module.
Use case - print one variable on all log messages. i.e. "jobID". When multiple instances of this utility will run in parallel in same server - syslog or ../log/messages can be parsed live based on this jobID. Here is attempt with LoggerAdapter method (error lines commented) -
def startlog(self, log_folder, testname=None):
if not os.path.exists(log_folder):
os.makedirs(log_folder)
ltime = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
if testname:
logfile = "%s/%s_log_%s.log" % (log_folder, testname, ltime)
else:
logfile = "%s/log_%s.log" % (log_folder, ltime)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formated = logging.Formatter('%(asctime)s - %(levelname)s - %(module)10s - %(funcName)10s - %(message)s')
#What is desired is -
#formated = logging.Formatter('%(asctime)s - %(jobid)s - %(levelname)s - %(module)10s - %(funcName)10s - %(message)s')
if not testname:
fh = logging.FileHandler(filename=logfile)
fh.setFormatter(formated)
logger.addHandler(fh)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formated)
logger.addHandler(ch)
# Next line errors -
# logger = logging.LoggerAdapter(logger, {"jobid": self.jobdata.jobid})
return logger
else:
fh = logging.FileHandler(filename=logfile)
fh.setFormatter(formated)
self.log.addHandler(fh)
# Next line errors -
# logger = logging.LoggerAdapter(self.log, {"jobid": self.jobdata.jobid})
return fh
2nd Try with Filters:
def startlog(self, log_folder, t_name=None):
if not os.path.exists(log_folder):
os.makedirs(log_folder)
ltime = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
if t_name:
logfile = "%s/%s_log_%s.log" % (log_folder, t_name, ltime)
else:
logfile = "%s/log_%s.log" % (log_folder, ltime)
root = logging.getLogger()
root.setLevel(logging.INFO)
root.addFilter(ContextFilter(self.jobdata.jobid))
formated = logging.Formatter('%(asctime)s - %(jobid)s - %(levelname)s - %(module)10s - %(funcName)10s - %(message)s')
if not t_name:
fh = logging.FileHandler(filename=logfile)
fh.setFormatter(formated)
root.addHandler(fh)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formated)
root.addHandler(ch)
return root
else:
fh = logging.FileHandler(filename=logfile)
fh.setFormatter(formated)
self.log.addHandler(fh)
return fh
class ContextFilter(logging.Filter):
"""
This is a filter which injects contextual information into the log.
"""
def __init__(self, jobid):
self.jobid = jobid
def filter(self, record):
record.jobid = self.jobid
return True
Issue faced with filters is 'keyerror' from other modules(paramiko - transport.py). Similar to How to properly add custom logging filters in Python modules
Traceback (most recent call last):
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/logging/__init__.py", line 859, in emit
msg = self.format(record)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/logging/__init__.py", line 732, in format
return fmt.format(record)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/logging/__init__.py", line 474, in format
s = self._fmt % record.__dict__
KeyError: 'jobid'
Logged from file transport.py, line 1567
Dumb me, i need to add filter to the log handler. This is working so far -
def startlog(self, log_folder, t_name=None):
if not os.path.exists(log_folder):
os.makedirs(log_folder)
ltime = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
if t_name:
logfile = "%s/%s_log_%s.log" % (log_folder, t_name, ltime)
else:
logfile = "%s/log_%s.log" % (log_folder, ltime)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formated = logging.Formatter('%(asctime)s - %(jobid)s - %(levelname)s - %(module)10s - %(funcName)10s - %(message)s')
formatted_file = logging.Formatter('%(asctime)s - %(levelname)s - %(module)10s - %(funcName)10s - %(message)s')
if not t_name:
fh = logging.FileHandler(filename=logfile)
fh.setFormatter(formatted_file)
fh.addFilter(ContextFilter(self.jobdata.jobid))
logger.addHandler(fh)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formated)
ch.addFilter(ContextFilter(self.jobdata.jobid))
logger.addHandler(ch)
return logger
else:
fh = logging.FileHandler(filename=logfile)
fh.setFormatter(formatted_file)
fh.addFilter(ContextFilter(self.jobdata.jobid))
self.log.addHandler(fh)
return fh

How to rebuild the log file in Python?

Program A by Python:
LOG_PATH = fdoc_log + "/store_plus.log"
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(filename=LOG_PATH, filemode = 'w', level=logging.DEBUG, format=FORMAT)
Program B by bash:
mv store_plus.log store_plus.log.bk
The Program A will run in the background and don't stop. When
the Program B delete the file of store_plus.log, the Program A can't write log as well.
If I want the Program A rebuild the store_plus.log, How to solve it ?
Thank you
PS: the way :
f = open(LOG_PATH, "a")
f.close()
It can't work.
An example taken from pymotw-logging and all credit to Doug Hellmann.
import glob
import logging
import logging.handlers
LOG_FILENAME = '/tmp/logging_rotatingfile_example.out'
# Set up a specific logger with our desired output level
my_logger = logging.getLogger('MyLogger')
my_logger.setLevel(logging.DEBUG)
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=20, backupCount=5)
my_logger.addHandler(handler)
# Log some messages
for i in range(20):
my_logger.debug('i = %d' % i)
# See what files are created
logfiles = glob.glob('%s*' % LOG_FILENAME)
for filename in logfiles:
print filename
This way is OK by WatchedFileHandler :
logger = logging.getLogger('simple_example')
logger.setLevel(logging.DEBUG)
ch = logging.handlers.WatchedFileHandler('a_log')
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)

getting odd error when calling python script within another python script

I am getting an IOError when calling a python script(script2) within another python script(script1).
Script 2 runs fine if called stand alone, however, if I call it from within script one, i get the following error.
C:\>C:\Python32\python.exe R:\Scripts\BatchAging.py
Traceback (most recent call last):
File "R:\Scripts\DeleteAgingFiles.py", line 59, in <module>
hdlr = logging.FileHandler(log)
File "C:\Python32\lib\logging\__init__.py", line 965, in __init__
StreamHandler.__init__(self, self._open())
File "C:\Python32\lib\logging\__init__.py", line 984, in _open
stream = open(self.baseFilename, self.mode)
IOError: [Errno 22] Invalid argument: 'C:\\ C:\\cleanup.log'
Script 1 (Called by auto scheduler)
# AGING CLEANUP SCRIPT
# BUILT & TESTED WITH PYTHON 3.2
import os,errno,sys,time,logging
from datetime import datetime
from subprocess import call
st = time.time()
#
# CONFIG STATIC VARS
# Remeber to escape backslash characters with an additional backslash.
#
pythonBin = 'C:\\Python32\\python.exe' #LOCATION OF PYTHON BIN
script = 'R:\\Scripts\\DeleteAgingFiles.py' #LOCATION OF AGING FILE CLEANUP SCRIPT
dirs = ['C:\\backup'] # DIRECTORY TO PRUNE
batchLog = 'C:\\batchLog.log'
log = 'C:\\cleanup.log' # LOCATION OF THE LOG FILE. (THIS WILL BE AUTO GENERATED)
maxAgeInDays = 14 # MAX AGE OF FILES\DIRS IN DAYS
mtime = True # USE MTIME INSTEAD OF CTIME
# ##################################################
#
# DO NOT MODIFY ANYTHING BELOW THIS LINE.
#
# ##################################################
logger = logging.getLogger('batchCleanup')
hdlr = logging.FileHandler(batchLog)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
logger.info("[STARTING BATCH CLEANUP] [SCRIPT=%s] [DIRS=%s] [MAXAGE=%s] [MTIME = %s]" % (sys.argv[0],dirs,maxAgeInDays,str(mtime)))
if mtime == True:
mtswitch = '-m'
else:
mtswitch = ''
for dir in dirs:
print([pythonBin,script,'-d ' + dir,'-l ' + log,'-a ' + str(maxAgeInDays),mtswitch])
try:
call([pythonBin,script,'-d ' + dir,'-l ' + log,'-a ' + str(maxAgeInDays),mtswitch])
except:
logger.error("[BATCH] Exception while processing directory: %s ]" % (dir))
logger.error("[BATCH] Unexpected error: %s" % sys.exc_info()[1])
rt = time.time() - st
logger.info("[BATCH CLEANUP COMPLETE] [TOTAL RUN TIME: %s second(s)" % rt)
Script 2 (called by script 1)
# AGING FILE CLEANUP SCRIPT
# BUILT & TESTED WITH PYTHON 3.2
import os,errno,sys,argparse,time,logging
from datetime import datetime
from shutil import rmtree
st = time.time()
#
# EXAMPLE USAGE:
#
# This cript can use either dynamic vars (imput args) or static vars.
# To change this behavior, change the commenting below.
#
# Dynamic vars:
# C:\Python32\python.exe R:\Scripts\DeleteAgingFiles.py -d C:\backup -l C:\aging.log -a 14 -m
#
# Static vars:
# C:\Python32\python.exe R:\Scripts\DeleteAgingFiles.py
#
#
# INPUT ARGUMENT PROCESSING
#
parser = argparse.ArgumentParser(description='Prune aging files from directory.')
parser.add_argument('-d','--dir',dest='dir',help='Full path to folder to be pruned',required=True)
parser.add_argument('-l','--log', dest='log',help='Full path to log file',required=True)
parser.add_argument('-a','--age', dest='age',type=int,help='Maximum age of files',required=True)
parser.add_argument('-m','--mtime',dest='mtime',action='store_true',default=False,help="Use mtime instead of ctime")
args = parser.parse_args()
dir = args.dir
log = args.log
maxAgeInDays = args.age
mtime = args.mtime
print(log)
#
# CONFIG STATIC VARS
# Remeber to escape backslash characters with an additional backslash.
#
# dir = 'C:\\backup' # DIRECTORY TO PRUNE
# log = 'C:\\cleanup.log' # LOCATION OF THE LOG FILE. (THIS WILL BE AUTO GENERATED)
# maxAgeInDays = 14 # MAX AGE OF FILES\DIRS IN DAYS
# mtime = False # USE MTIME INSTEAD OF CTIME
# ##################################################
#
# DO NOT MODIFY ANYTHING BELOW THIS LINE.
#
# ##################################################
logger = logging.getLogger('cleanup')
hdlr = logging.FileHandler(log)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
logger.info("[STARTING CLEANUP] [SCRIPT=%s] [DIR=%s] [MAXAGE=%s] [MTIME = %s]" % (sys.argv[0],dir,maxAgeInDays,str(mtime)))
os.chdir(dir)
files = os.listdir(dir)
for file in files:
if file == '.' or file == '..': continue
path = dir + os.sep + file
global objType
if mtime == True:
ts = datetime.fromtimestamp(os.stat(path).st_mtime)
else:
ts = datetime.fromtimestamp(os.stat(path).st_ctime)
global objType
if os.path.isdir(path):
objType = 'DIRECTORY'
else:
objType = 'FILE'
age = datetime.now() - ts
if age.days > maxAgeInDays :
try:
if os.path.isdir(path):
rmtree(path)
else:
os.remove(path)
except OSError as exc:
if exc.errno == errno.EACCES:
logger.warning("[PERMISSION DENIED] [%s] [%s] [AGE: %s day(s)]" % (objType,path,age.days))
else:
logger.error("Exception while processing: %s [%s] [AGE: %s day(s)]" % (path,objType,age.days))
logger.error("Unexpected error: %s" % sys.exc_info()[1])
else:
logger.info("[DELETED %s] [%s] [AGE: %s day(s)]" % (objType,path,age.days))
else :
logger.info("[IGNORED %s] [%s] [AGE: %s day(s)]" % (objType,path,age.days))
rt = time.time() - st
logger.info("[CLEANUP COMPLETE] [TOTAL RUN TIME: %s second(s)" % rt)
Cleaner is to import the script and run its main method:
import DeleteAgingFiles
DeleteAgingFiles.main()
Adding a main method to your script:
def main():
# the main code goes here
if __name__ == "__main__":
main()
The best solution to this problem, is, I would think, don't use call to access the other python script. Instead, import it as a module and call its functions directly.

Categories

Resources