I'd like to change the log's directory depending on the time (specifically the hour) when new log is created.
For example, let's say the message aaa is saved in a log file in directory d:\log\191105\09\log.log at 09:59 during running a program.
As the time passes, new log bbb is saved in a log file but the directory should be different, d:\log\191105\10.log at 10:00 without terminating the program.
My log manager code is following,
import logging
import datetime
import psutil
import os, os.path
class LogManager:
today = datetime.datetime.now()
process_name = psutil.Process().name()
process_id = str(psutil.Process().pid)
log_dir = "D:/LOG/" + today.strftime("%Y%m%d") + "/" + today.strftime("%H") + "/"
log_filename = process_name + '_[' + process_id + ']_' + today.strftime("%Y-%m-%d_%H") + '.log'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# date/directory config
log = logging.getLogger('mypython')
log.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s||%(levelname)s||%(message)s')
fileHandler = logging.FileHandler(log_dir+log_filename)
fileHandler.setFormatter(formatter)
log.addHandler(fileHandler)
def update(self):
self.today = datetime.datetime.now()
old_log_dir = self.log_dir
self.log_dir = "D:/LOG/" + self.today.strftime("%Y%m%d") + "/" + self.today.strftime("%H") + "/"
if (old_log_dir == self.log_dir):
return
self.log_filename = self.process_name + '_[' + self.process_id + ']_' + self.today.strftime("%Y-%m-%d_%H") + '.log'
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.log = logging.getLogger('mypython')
# here, i wanna update my filehandler
for hdlr in self.log.handlers[:]:
if isinstance(hdlr, self.log.FileHander):
self.log.removeHandler(hdlr)
self.fileHandler = logging.FileHandler(self.log_dir+self.log_filename)
self.fileHandler.setFormatter(self.formatter)
self.log.addHandler(self.fileHandler)
def i(self, ex):
self.update()
self.log.info(ex)
def w(self, ex):
self.update()
self.log.warning(ex)
and I call these functions like
import LogManager as lm
logManager = lm.LogManager()
logManager.i('message')
It works well, but seems like it does not update its fileHandler after passing the hour.
I tried to find if the log has updateHandler method... but it doesn't .
What should I do??
You can do this much easier by using what is already in the logging library. Specifically there is a TimedRotatingFileHandler for which you only need to change how it names the files it creates. I've created a working minimal demo for you which changes the folder that logs get saved into on every second.
Edit: change to rollover on every full hour instead of every second as per comment below
import os
import logging
from time import sleep, strftime
from logging.handlers import TimedRotatingFileHandler
def namer(default_name):
head, tail = os.path.split(default_name)
folder_name = 'logs'+strftime('%H%M%S')
folder = os.path.join(head, folder_name)
try:
os.mkdir(folder)
except:
# folder already exists
pass
return os.path.join(folder, tail)
logger = logging.getLogger()
handler = TimedRotatingFileHandler('base.log', when='H')
handler.namer = namer
# set the rollover time to the next full hour
handler.rolloverAt = datetime.now().replace(microsecond=0,second=0,minute=0).timestamp() + 60*60
logger.addHandler(handler)
logger.warning('test1')
sleep(2)
logger.warning('test2')
Related
I created a function that checking if a path is a directory.
The path is depend on a filed in class named "wafer_num".
The function returns me false, because the path it's getting is:
/usr/local/insight/results/images/toolsDB/lauto_ptest_s2022-08-09/w<class 'int'>
instead of: /usr/local/insight/results/images/toolsDB/lauto_ptest_s2022-08-09/w+int number
the whole path is constant until the int number that attached to the 'w' char in path.
for example: if wafer_num filed in class is: 2, than the path will be:
/usr/local/insight/results/images/toolsDB/lauto_ptest_s2022-08-09/w2
I'm initializing the class from another file. the initialization is working well because I have many other functions in my code that depends on this filed.
I'm attaching the function that making me troubles and the class too. many thaks:
# This is the other file that call a constructive function in my code:
import NR_success_check_BBs as check_BBs
# list of implemented recipes:
sanity = 'sanity_' + time_now + '_Ver_5.1'
R2M_delayer = 'R2M_E2E_delayer_NR_Ver_5.1'
R2M_cross_section = 'R2M_E2E_NR_Ver_5.1'
# ***********************************************************************************************
# ***********************************************************************************************
# the function set_recipe_name will create an object that contain all the data of that recipe
check_BBs.set_recipe_name(R2M_delayer)
# this function contain all the necessary functions to check the object.
check_BBs.success_check()
# EOF
# -------------------------------------------------------
# This is the code file [with constructive function attached] :
import datetime
import logging
import os.path
from pathlib import Path
import time
time_now = str(datetime.datetime.now()).split()[0]
log_file_name_to_write = 'test.log'
log_file_name_to_read = 'NR_' + time_now + '.log'
logging.basicConfig(level=logging.INFO, filename=log_file_name_to_write, filemode='a',format='%(asctime)s - %(levelname)s - %(message)s')
R2M_RecipeRun_path = '/usr/local/disk2/unix_nt/R2M/RecipeRun/'
R2M_dir = '/usr/local/disk2/unix_nt/R2M'
metro_callback_format = 'Metro_Reveal_'
class recipe:
name = ''
script_name = ''
wafer_num = int
images = 0
metrology_images = 0
images_output_dir_path = '/usr/local/insight/results/images/toolsDB/lauto_ptest_s/' + time_now + 'w' + str(recipe.wafer_num)
def set_recipe_name(name):
recipe.name = name
logging.info("Analyzing the recipe: " + recipe.name)
if recipe.name == 'sanity_' + time_now + '_Ver_5.1':
recipe.script_name = 'Sanity_CS.py'
recipe.wafer_num = 1
elif recipe.name == 'R2M_E2E_NR_Ver_5.1':
recipe.script_name = 'R2M.py'
recipe.wafer_num = 2
elif recipe.name == 'R2M_E2E_delayer_NR_Ver_5.1':
recipe.script_name = 'R2M_delayer.py'
recipe.wafer_num = 3
# ***********************************************************************************************
# ***********************************************************************************************
# This is the function that makes my trouble:
def is_results_images_directory_exist():
images_directory_path = Path('/usr/local/insight/results/images/toolsDB/lauto_ptest_s' + time_now + '/w' + wafer_num)
print(images_directory_path)
is_directory = os.path.isdir(images_directory_path)
print(is_directory)
if not is_directory:
logging.error("There is no images directory for the current recipe at results")
return False
return True
So I have an application that generally does a good job at collecting information and moving stuff from one AWS s3 bucket to another, and then processing it, but it doesn't really do a good job when people name their file with a pretext string.
Currently, I look for glob:
dump_files = glob.glob('docker-support*.zip')
What this does is I have logic built to only look for things that account for file names that utilize docker-support as the main identifier.
However, I need it to account for times when people do something like
super_Secret123-Production-whatever-docker-support*.zip
Basically, I would like for the function to rename it using that variable dump_files
Should I just set the variable to something like this:
dump_files = glob.glob('*docker-support*.zip')
or
dump_files = glob.glob('/^(.*?)\docker-support*.zip')
The main thing is I am going to want to pick it up, rename it and then strip the part of the file name that is before the actual file name needed for processing: docker-support*.zip as the application needs to look for files in S3 just named in that format.
Code that handles this:
#!/usr/bin/env python3
# main execution loop for dump analysis tool
# Author: Bryce Ryan, Mirantis Inc.
#
# checks for new files in dump_originals, when found, runs run-parts against that file
# v1.1
# pause.main.loop check
# improved error handling
# escape file name to run-parts to avoid metacharacters
#
#
import os
import tempfile
import time
import zipfile
import logging
import shutil
import glob
from datetime import date
import sys
from os import path
logging.basicConfig(filename='/dump/logs/analyzer_logs.txt', level=logging.DEBUG, format='%(asctime)s %(message)s', datefmt='%Y-%m-%dT%H:%M:%S%z' )
ROOT_DIR = os.path.abspath('..')
logging.debug("ROOT_DIR: {}".format(ROOT_DIR))
DUMP_DIR = os.path.join(ROOT_DIR, 'dump_originals')
logging.debug("DUMP_DIR: {}".format(DUMP_DIR))
WORK_DIR = os.path.join(ROOT_DIR, 'work_dir')
logging.debug("WORK_DIR: {}".format(WORK_DIR))
# can we actually create a file? just because we have perms, or think we do, doesn't mean there are
# enough inodes or capacity to do basic stuff.
with open(os.path.join(DUMP_DIR, "testfile"), 'w'):
pass
logging.info("Beginning event loop for lodestone. Looking for new files in {}".format(DUMP_DIR))
print("Beginning event loop for lodestone.")
sys.stdout.flush()
os.chdir(DUMP_DIR)
logging.basicConfig(filename="analyzer.logs", level=logging.DEBUG)
while True:
# here at the top of the loop, check to see if we should wait for a bit
# typically, because of testing or maintenance
# if the magic, undocumented file exists, wait for 5 sec and check again
# do this forever
while path.exists("/dump/pause.main.loop"):
print("Pausing main loop for 60s, waiting on /dump/pause.main.loop")
time.sleep(60)
dump_files = glob.glob('docker-support*.zip')
try:
if dump_files[0] != '':
logging.debug("files found")
print("================== BEGIN PROCESSING NEW FILE ========= ")
print("File found:", dump_files[0])
print(" ")
logging.info("Processing new file: " + dump_files[0] )
sys.stdout.flush()
support_dump_file = dump_files[0]
# check that it's an actual zip; if not, ignore it
if not zipfile.is_zipfile(support_dump_file):
print("File: " + str(support_dump_file))
print("Inbound file is not recognized as a zip file.\n\n")
logging.info("Inbound file not recognized as a zip file.")
# now move it out of the way so we don't see it again;
# ok if exists on destination and we ignore the error
shutil.move( support_dump_file, "../dump_complete/" )
# no further processing, so back to the top
sys.stdout.flush()
continue
temp_dir = tempfile.mkdtemp(prefix='dump.', dir=WORK_DIR)
os.chmod(temp_dir, 0o777)
logging.info("temp_dir is: " + temp_dir)
# cmd = ROOT_DIR + "/utilities/run-parts --exit-on-error --arg=analyze --arg=" + DUMP_DIR + " --arg=" + support_dump_file + " --arg=" + temp_dir + " " + ROOT_DIR + "/analysis"
cmd = ROOT_DIR + "/utilities/run-parts --arg=analyze --arg=" + DUMP_DIR + " --arg=\'" + support_dump_file + "\' --arg=" + temp_dir + " " + ROOT_DIR + "/analysis"
print(cmd)
logging.info("Will execute: " + cmd )
sys.stdout.flush()
try:
retcode = os.system(cmd)
tempdir =temp_dir
if retcode == 1:
print("Removing temporary work_dir")
logging.debug("Removing temporary work_dir", tempdir)
shutil.rmtree(tempdir, ignore_errors=True)
sys.stdout.flush()
finally:
print("Finally block for cmd. . .")
print("Removing temporary work_dir")
logging.debug("Removing work_dir " + tempdir)
print(tempdir)
sys.stdout.flush()
# shutil.rmtree(tempdir, ignore_errors=True)
os.system('/bin/rm -rf' + tempdir)
sys.stdout.flush()
except:
pass
# pause for a moment; save some processor cycles
sys.stdout.flush()
time.sleep(1)
Right now I do not have the function that will rename this in there.
I have a problem with python input.
Im creating a python sneakers bot, I have a cli setup that when opens it shows you the amount of .txt files that are in the directory and then an input asking you to choose which ones you wanna use to start your task. [1]
i implemented watchdogs that look into my directory to see if file are added or modified, when files get modified watchdogs script refreshes the cli but the input the user was asked still active. I need to stop the input [1] after the screen get cleaned, how can I make this possible?
here is my code:
def proxieschoice():
import findfiles
findfiles.my_observer.start()
proxiesfile = 0
proxynamelist = {}
print('------------------------------')
for file in glob.glob("*.txt"):
proxiesfile = proxiesfile +1
with open(file) as f:
count = sum(1 for _ in f)
proxynamelist[proxiesfile] = file
print(f"[{Fore.BLUE}{proxiesfile}{Style.RESET_ALL}] {file} [{count} proxies]")
print('------------------------------')
try:
prox = int(input(f"{Fore.BLUE}>> {Style.RESET_ALL} Which proxies you want to use? "))
except ValueError:
print('Invalid Input')
proxieschoice()
here is findfiles.py
import time
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import converse
patterns = ["*"]
ignore_patterns = None
ignore_directories = False
case_sensitive = True
my_event_handler = PatternMatchingEventHandler(patterns, ignore_patterns, ignore_directories, case_sensitive)
def on_created(event):
converse.cleanscreen()
converse.proxieschoice()
def on_deleted(event):
converse.cleanscreen()
converse.proxieschoice()
def on_modified(event):
converse.cleanscreen()
converse.proxieschoice()
def on_moved(event):
converse.cleanscreen()
converse.proxieschoice()
my_event_handler.on_created = on_created
my_event_handler.on_deleted = on_deleted
my_event_handler.on_modified = on_modified
my_event_handler.on_moved = on_moved
path = "."
go_recursively = True
my_observer = Observer()
my_observer.schedule(my_event_handler, path, recursive=go_recursively)
I am working on saving the json logs using python. Below is the code:
log_file = 'app_log.json'
log_json = dict()
log_json["Data"] = {}
log_json['Data']['Key1'] = "value1"
log_json['Data']['alert'] = False
log_json['Data']['Key2'] = "N/A"
log_json['Created'] = datetime.datetime.utcnow().isoformat()
with open(log_file, "a") as f:
json.dump(log_json, f)#, ensure_ascii=False)
f.write("\n")
Now above code is generating the log file. But I have noticed that the file size is increasing a lot and in future, I might face disk space issue. I was wondering if there is any pre built rotating file handler available for json in which we can mention fixed size lets say 100mb and upon reaching this size it will delete and recreate the new file.
I have previously used from logging.handlers import RotatingFileHandler to do this in case of .log files but also want to do this for .json files. Please help. Thanks
Python does not care about the log file name.
You can use the rotating handler which you used for .log file for .json file also.
See sample example below
# logging_example.py
import logging
import logging.handlers
import os
import time
logfile = os.path.join("/tmp", "demo_logging.json")
logger = logging.getLogger(__name__)
fh = logging.handlers.RotatingFileHandler(logfile, mode='a', maxBytes=1000, backupCount=5) # noqa:E501
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.setLevel(logging.DEBUG)
while 1:
time.sleep(1)
logger.info("Long string to increase the file size")
You can also look at logrotate if you are working in Unix environment. It is a great and simple tool with good documentation to do just exactly what you need.
You can implement structured logging with RotatingFileHandler
import json
import logging
import logging.handlers
from datetime import datetime
class StructuredMessage:
def __init__(self, message, /, **kwargs):
self.message = message
self.kwargs = kwargs
def __str__(self):
return '%s >>> %s' % (self.message, json.dumps(self.kwargs))
_ = StructuredMessage # optional, to improve readability
log_json = {}
log_json["Data"] = {}
log_json['Data']['Key1'] = "value1"
log_json['Data']['alert'] = False
log_json['Data']['Key2'] = "N/A"
log_json['Created'] = datetime.utcnow().isoformat()
LOG_FILENAME = 'logging_rotatingfile_example.out'
# Set up a specific logger with our desired output level
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=20, backupCount=5)
bf = logging.Formatter('%(message)s')
handler.setFormatter(bf)
logger.addHandler(handler)
logger.info(_('INFO', **log_json))
Note: check here for more info about structured-logging-python
you can use also use json-logging-python with RotatingFileHandler
import logging
import json
import traceback
from datetime import datetime
import copy
import json_logging
import sys
json_logging.ENABLE_JSON_LOGGING = True
def extra(**kw):
'''Add the required nested props layer'''
return {'extra': {'props': kw}}
class CustomJSONLog(logging.Formatter):
"""
Customized logger
"""
def get_exc_fields(self, record):
if record.exc_info:
exc_info = self.format_exception(record.exc_info)
else:
exc_info = record.exc_text
return {'python.exc_info': exc_info}
#classmethod
def format_exception(cls, exc_info):
return ''.join(traceback.format_exception(*exc_info)) if exc_info else ''
def format(self, record):
json_log_object = {"#timestamp": datetime.utcnow().isoformat(),
"level": record.levelname,
"message": record.getMessage(),
"caller": record.filename + '::' + record.funcName
}
json_log_object['data'] = {
"python.logger_name": record.name,
"python.module": record.module,
"python.funcName": record.funcName,
"python.filename": record.filename,
"python.lineno": record.lineno,
"python.thread": record.threadName,
"python.pid": record.process
}
if hasattr(record, 'props'):
json_log_object['data'].update(record.props)
if record.exc_info or record.exc_text:
json_log_object['data'].update(self.get_exc_fields(record))
return json.dumps(json_log_object)
json_logging.init_non_web(custom_formatter=CustomJSONLog, enable_json=True)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
LOG_FILENAME = 'logging_rotating_json_example.out'
handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=20, backupCount=5)
logger.addHandler(handler)
log_json = {}
log_json["Data"] = {}
log_json['Data']['Key1'] = "value1"
log_json['Data']['alert'] = False
log_json['Data']['Key2'] = "N/A"
logger.info('Starting')
logger.debug('Working', extra={"props":log_json})
Note: check here for more info about json-logging-python
You can try this just before you write / append to the file. This should check to see if the file has reached the max lines size, then it will remove one line of code from the beginning of the file before you append as usual to the end of the file.
filename = 'file.txt'
maxLines = 100
count = len(open(filename).readlines())
if(count > maxLines) {
with open(filename, 'r') as fin:
data = fin.read().splitlines(True)
with open(filename, 'w') as fout:
fout.writelines(data[1:])
}
I'm setting up a readout system that takes data from a number of instruments and needs to log the data to a log file. This system will be running for weeks at a time, and so each day should have a log file. Since these instruments are being manipulated over this time, they may also have log files associated with their status.
With this, I have a directory in which all of the logs are stored, for example 'C:/logs'. Since there will be multiple log files associated with each day, I'd like to automate the creation of a new subdirectory in the the logs folder each day, so the structure of the files are something like 'C:/logs/20190814' for August 14, 'C:/logs/20190815' for the 15th, and so on. Then, in each daily directory I would have a number of log files such as 'data.log', 'instrument1.log', 'instrument2.log', etc.
Ideally, these would roll over at midnight each day.
I have been using the Python Logging module to attempt to create these log files. I have been able to implement the TimedRotatingFileHandler, but the problem with this is
(1) I want to change the directory that the log files are in based on the day, but leave their titles the same (e.g. 'C:/logs/20190814/data.log', 'C:/logs/20190815/data.log')
(2) the TimedRotatingFileHandler saves the files not with a '%Y%m%d.log' extension, but rather '.log.%Y%m%d', which is inconvenient to work with. I'd like to create a new directory each day and start writing a new log in the new day's directory.
Using the framework from another StackOverflow question that's similar but not exactly what I needed, I was able to get the behavior that I wanted. Here's the custom class that updates the logging TimedRotatingFileHandler class.
class MyTimedRotatingFileHandler(logging.handlers.TimedRotatingFileHandler):
def __init__(self, log_title, whenTo="midnight", intervals=1):
self.when = whenTo.upper()
self.inter = intervals
self.log_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "logs"))
if not os.path.isdir(self.log_file_path):
os.mkdir(self.log_file_path)
if self.when == "S":
self.extStyle = "%Y%m%d%H%M%S"
if self.when == "M":
self.extStyle = "%Y%m%d%H%M"
if self.when == "H":
self.extStyle = "%Y%m%d%H"
if self.when == "MIDNIGHT" or self.when == "D":
self.extStyle = "%Y%m%d"
self.dir_log = os.path.abspath(os.path.join(self.log_file_path, datetime.now().strftime(self.extStyle)))
if not os.path.isdir(self.dir_log):
os.mkdir(self.dir_log)
self.title = log_title
filename = os.path.join(self.dir_log, self.title)
logging.handlers.TimedRotatingFileHandler.__init__(self, filename, when=whenTo, interval=self.inter, backupCount=0, encoding=None)
self._header = ""
self._log = None
self._counter = 0
def doRollover(self):
"""
TimedRotatingFileHandler remix - rotates logs on daily basis, and filename of current logfile is time.strftime("%m%d%Y")+".txt" always
"""
self.stream.close()
# get the time that this sequence started at and make it a TimeTuple
t = self.rolloverAt - self.interval
timeTuple = time.localtime(t)
self.new_dir = os.path.abspath(os.path.join(self.log_file_path, datetime.now().strftime(self.extStyle)))
if not os.path.isdir(self.new_dir):
os.mkdir(self.new_dir)
self.baseFilename = os.path.abspath(os.path.join(self.new_dir, self.title))
if self.encoding:
self.stream = codecs.open(self.baseFilename, "w", self.encoding)
else:
self.stream = open(self.baseFilename, "w")
self.rolloverAt = self.rolloverAt + self.interval
Here is an example:
import logging
import time
from logging.handlers import TimedRotatingFileHandler
#----------------------------------------------------------------------
def create_timed_rotating_log(path):
""""""
logger = logging.getLogger("Rotating Log")
logger.setLevel(logging.INFO)
handler = TimedRotatingFileHandler(path,
when="m",
interval=1,
backupCount=5)
logger.addHandler(handler)
for i in range(6):
logger.info("This is a test!")
time.sleep(75)
#----------------------------------------------------------------------
if __name__ == "__main__":
log_file = "timed_test.log"
create_timed_rotating_log(log_file)
This example will rotate the log every minute with a back up count of 5. A more realistic rotation would probably be on the hour, so you would set the interval to 60 or the when to “h”. When this code is run, it too will create 6 files, but instead of appending integers to the log file name, it will append a timestamp using the strftime format %Y-%m-%d_%H-%M-%S.