Write line with timestamp+message to file - python

I want to create a logfile that adds every time an error occurs a new line to a textfile log.txt. I am pretty new to python, so maybe I miss something...but everytime an error occurs, the log.txt is overwritten and only the current error message is displayed although the error message is different every time (due to timestamp) and I added a \n.
Thats my code so far:
import os
import sys
import time
import datetime
try:
path = sys.argv[1]
ts = time.time()
sttime = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d_%H:%M:%S - ')
#some more things but nothing else of interest for here
except:
error = "ERROR! No file 'bla' found!"
log = 'log.txt'
logfile = file(log, "w")
logfile.write(sttime + error + '\n')
logfile.close()
sys.exit(0)
Maybe you can help me out here. Do I need a loop somewhere? I tried to create an empty string (error = "") that adds the error message to log.txt with += each time an error occurs, but that didn't work at all :-/
Thank you!

Open the file in append mode as 'w' mode will truncate the file each time., i.e
logfile = open(log, "a")
And you should use with:
with open(log, 'a') as logfile:
logfile.write(sttime + error + '\n')
No need to close the file, this will happen automatically.
Note that if the exception is raised at path = sys.argv[1], the timestamp might not be set when you try to log. It would be better to get the timestamp in the logging code.
Also, you should not use a bare except clause, but at least catch the exception and report it.
from datetime import datetime
except Exception, exc:
sttime = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
error = "ERROR! {}".format(exc)
log = 'log.txt'
with open(log, 'a') as logfile:
logfile.write(sttime + error + '\n')
raise
# sys.exit(0)

When you do file(log, 'W'). The file log will become empty. If you want to add something you should use a instead of w:
open(log, "a")

class Logger(object):
def __init__(self, n):
self.n = n
self.count = 0
self.log = open('log.txt', 'a')
def write(self, message):
self.count+=1
if self.count<self.n:
self.log.write("%s %s"% (time,message))
self.log.flush()
import sys
sys.stdout= Logger()
time -- is time string formatted the way you want.
Now regular print function will write to file.

Related

Python Try Except not Executing

I have written a script that is supposed to automate taking a data frame with pandas and interacting with it, then putting it to another part of the network as a reference, and then it interacts with SmartSheets. It uses a very basic API and most of the functions and logic work well, but the only issue is the try except block in the end that is supposed to be scheduled out.
There is no error thrown, it just sits in a blank terminal; this is odd because if I stack the functions I wrote one after another, they work with no issue.
This try except block is so that It can help write out errors on the machine and ultimately put into a scheduling function later in the script.
This script is on a VM that runs Ubuntu 18.04. It is written in Python 2.
I have researched online, and I cannot find a situation where the try except block does not error out and does not execute. I made sure my indentation on the IDE (Atom) is correct (4 spaces). I can line up the 4 functions themselves and they execute fine with no errors.
import os
import sys
import datetime
import tempfile
import smartsheet
import glob
import warnings
import platform
import pandas as pd
import math
from apscheduler.schedulers.blocking import BlockingScheduler
#variables
warnings.simplefilter(action='ignore', category=FutureWarning)
now = datetime.datetime.now()
PATH = "/mnt/cifs/rxlog.csv"
csvpath = "/home/ajskrilla/csvtest.csv"
End = '.csv'
today = now.strftime("%Y-%m-%d %H:%M")
path1 = "/mnt/cifs1"+"/Reports"+ now.strftime("%Y-%m-%d")+"/log Import"
path_glob = "/mnt/cifs1"+"/Reports"+ now.strftime("%Y-%m-%d")+"/log Import"+now.strftime("%Y-%m-%d")
Files_to_compare = glob.glob('/mnt/cifs1'+"/Reports"+ now.strftime("%Y-%m-%d")+'/log Import'+now.strftime("%Y-%m-%d")+'*.csv')
Fpath = path1 + now.strftime("%Y-%m-%d %H:%M") + End
SSName = 'Call Sheet/NDC ' + now.strftime("%Y-%m-%d %H:%M") + End
list_of_files = Files_to_compare
sched = BlockingScheduler()
#start of process
def Import_csv():
data_file = pd.read_csv(PATH, error_bad_lines=False, sep="|", parse_dates=True, low_memory=False, quotechar=None, quoting=3)
data_file.to_csv(csvpath)
def Clean_CSV():
file_path_directory = "/mnt/cifs1/"+"Reports" + now.strftime("%Y-%m-%d")
if not os.path.exists(file_path_directory):
os.makedirs(file_path_directory)
fields=['RXNBR', 'RX STOREID', 'FILLDATE', 'PATNAMELAST', 'PATNAMEFIRST', 'NH NBR', 'RX HOLD STATUS', 'RX HOLD REASON']
df = pd.read_csv(csvpath, skipinitialspace=True, usecols=fields, low_memory=False)
df.columns = ['RXNBR','RX_STOREID', 'FILLDATE', 'PATNAMELAST', 'PATNAMEFIRST', 'NH_NBR', 'RX_HOLD_STATUS', 'RX_HOLD_REASON']
nf = df[df.NH_NBR == 0][df.RX_HOLD_STATUS != 'Online Queued']
with tempfile.NamedTemporaryFile(delete=False) as temp:
nf.to_csv(Fpath, index=False)
def Compare_files():
if platform.system() == 'Linux':
if len(list_of_files) > 2:
latest_file = min(list_of_files, key=os.stat)
first_file = max(list_of_files, key=os.stat)
one_file= pd.read_csv(first_file)
two_file= pd.read_csv(latest_file)
out = one_file.append(two_file)
out.drop_duplicates('RXNBR', inplace=True)
with tempfile.NamedTemporaryFile(delete=False) as temp:
out.to_csv(Fpath, index=False)
for file in list_of_files:
if file != latest_file:
for files in list_of_files:
os.remove(files)
else:
pass
#delete the old file
def SS_import():
ss_client = smartsheet.Smartsheet("BANNERID#")
ss_client.errors_as_exceptions(True)
imported_sheet = ss_client.Workspaces.import_csv_sheet(
# need to change this based upon workspace ID
xxxxxxxxxxxxxxxx, #WS ID
Fpath,
SSName,
header_row_index=0
)
def SS_delete():
ss_client = smartsheet.Smartsheet("BANNNERID#")
action = ss_client.Sheets.list_sheets(include_all=True)
for single_sheet in action.data:
Sheetid= single_sheet.id
ss_client.Sheets.delete_sheet(
single_sheet.id)
######################################################################################################################################
#this is where the issue arises, during this function
#full fnct with error handling
def NDC_import():
try:
Import_csv()
except Exception as E:
Import_error_file = open('/mnt/Error Files/IE Error' + now.strftime("%Y-%m-%d %H:%M") + '.txt', 'w+')
for line in Import_error_file:
line.write(E)
line.close()
sys.exit()
try:
Clean_CSV()
except Exception as E:
Clean_CSV_error = open('/mnt/Error Files/CC Error' + now.strftime("%Y-%m-%d %H:%M") + '.txt', 'w+')
for line in Clean_CSV_error:
line.write(E)
line.close()
sys.exit()
try:
Compare_files()
except Exception as E:
Compare_files_error = open('/mnt/Error Files/CF Error' + now.strftime("%Y-%m-%d %H:%M") + '.txt', 'w+')
for line in Compare_files_error:
line.write(E)
line.close()
sys.exit()
try:
SS_import()
except Exception as E:
SS_import_error = open('/mnt/Error Files/SSI Error' + now.strftime("%Y-%m-%d %H:%M") + '.txt', 'w+')
for line in SS_import_error:
line.write(E)
line.close()
sys.exit()
NDC_import()
When the script is run for the NDC_import() function, it won't execute at all. if the 4 functions Import_csv(), Clean_CSV(), Compare_files(), and SS_import() are just by themselves, they execute normally.
Why can't it run in that try except block I wrote? It does not even throw an error and the terminal is blank.
I think the primary issue revolves around your exception handling. In particular, you're opening the files as w+ mode and then iterating through the opened file -- not exactly a syntax error but it's functionally broken. It's also worth noting that if there are no errors, you should not expect to see any printed output.
I would suggest to take a different direction with handling errors entirely and consider using the fabulous logging library.
An example of how you could use it would be
# ... other imports
import logging
# ... your function definitions
def NDC_Import():
try:
Import_csv()
except Exception as E:
logging.exception('error during Import_csv()')
return
# and so on ...
That logging.exception() line will print out the whole traceback to whatever logger is configured, and you can configure it to log to a file.
You're opening the file in w+ mode. Example:
SS_import_error = open('/mnt/Error Files/SSI Error' + now.strftime("%Y-%m-%d %H:%M") + '.txt', 'w+')
w+ means it will create the file if it doesn't exist, or overwrite it if it does. That means that SS_import_error will always point to an empty file after this line. Then you immediately try to loop over the lines of that file:
for line in SS_import_error:
line.write(E)
line.close()
sys.exit()
But attempting to loop over the lines of an empty file will end immediately, without ever executing the code within the block. The end result is that the except block runs, but it doesn't actually do anything (other than potentially wipe a file).
I don't know exactly what you intended the for loops to do, so I can't suggest an fix.

Weird loop in a CSV file parser

I have a Python that is going to read every x seconds a CSV file.
What I do is:
Open the file, read the info as CSV, loop every entry
This is done in this Python file:
import csv
import time
import datetime
CSV_PLAN = "./XoceKochPlan.csv"
chargePlanFile = open(CSV_PLAN, 'rt')
def loopMe():
try:
for eachRow in reader:
print (eachRow)
except Exception, ex:
print ("Error processFileing the Thread" + str(ex))
print ("opening file " + str(CSV_PLAN))
now = datetime.datetime.utcnow().strftime("%a %b %d %H %M %S %Z %Y")
print ("Now " + str(now))
reader = csv.reader(chargePlanFile)
loopMe()
The output is so far so good.
But if I do:
loopMe()
time.sleep(10)
loopMe()
then the file is only printed once!
The question is Why?
What am I missing? What is getting internally consumed, or is the reader just empty after the first loop?
In python the file io handler has an internal pointer. After reading the file it will be at the end of the csv file. Ensure you call the chargePlanFile.close() method and reopen the file before calling the loopme() function. Or use the chargePlanFile.seek(0) to reset the position of the internal pointer.
When you start the second loop, your reader is already at the last line. You should reassign the reader. You should do it inside your loopMe function at the beginning.
def loopMe():
reader = csv.reader(chargePlanFile)
try:
for eachRow in reader:
print (eachRow)
except Exception, ex:
print ("Error processFileing the Thread" + str(ex))
If you would keep the same code, just add reader.seek(0) in the first line of loopMe

user created log files

I am getting a TypeError: object of type file' has no len()
I have traced down the issue to the path established upon execution.
What am I missing to correct this error found within the "savePath" deceleration or usage within the "temp = os.path.join(savePath, files)"?
def printTime(time):
savePath = "C:\Users\Nicholas\Documents"
files = open("LogInLog.txt", "a")
temp = os.path.join(savePath, files)
files.write("A LogIn occured.")
files.write(time)
print files.read
files.close
main()
The whole program is below for reference:
from time import strftime
import os.path
def main():
getTime()
def getTime():
time = strftime("%Y-%m-%d %I:%M:%S")
printTime(time)
def printTime(time):
savePath = "C:\Users\Nicholas\Documents"
files = open("LogInLog.txt", "a")
temp = os.path.join(savePath, files)
files.write("A LogIn occured.")
files.write(time)
print files.read
files.close
main()
Here's a working version:
from time import strftime
import os.path
def main():
getTime()
def getTime():
time = strftime("%Y-%m-%d %I:%M:%S")
printTime(time)
def printTime(time):
savePath = "C:\Users\Nicholas\Documents"
logFile = "LogInLog.txt"
files = open(os.path.join(savePath, logFile), "a+")
openPosition = files.tell()
files.write("A LogIn occured.")
files.write(time)
files.seek(openPosition)
print(files.read())
files.close()
if __name__ == '__main__':
main()
There were a few problems with the code snippet posted in the question:
Two import statements were concatenated together. Each should be on a separate line.
The os.path.join function doesn't work on an open filehandle.
The read() and close() methods were missing parens.
If the intent is to read what is written in append mode, it's necessary to get the current file position via tell() and seek() to that position after writing to the file.
While it's legal to call main() without any conditional check, it's usually best to make sure the module is being called as a script as opposed to being imported.

display an error message when file is empty - proper way?

hi im slowly trying to learn the correct way to write python code. suppose i have a text file which i want to check if empty, what i want to happen is that the program immediately terminates and the console window displays an error message if indeed empty. so far what ive done is written below. please teach me the proper method on how one ought to handle this case:
import os
def main():
f1name = 'f1.txt'
f1Cont = open(f1name,'r')
if not f1Cont:
print '%s is an empty file' %f1name
os.system ('pause')
#other code
if __name__ == '__main__':
main()
There is no need to open() the file, just use os.stat().
>>> #create an empty file
>>> f=open('testfile','w')
>>> f.close()
>>> #open the empty file in read mode to prove that it doesn't raise IOError
>>> f=open('testfile','r')
>>> f.close()
>>> #get the size of the file
>>> import os
>>> import stat
>>> os.stat('testfile')[stat.ST_SIZE]
0L
>>>
The pythonic way to do this is:
try:
f = open(f1name, 'r')
except IOError as e:
# you can print the error here, e.g.
print(str(e))
Maybe a duplicate of this.
From the original answer:
import os
if (os.stat(f1name).st_size == 0)
print 'File is empty!'
If file open succeeds the value of 'f1Cont` will be a file object and will not be False (even if the file is empty).One way you can check if the file is empty (after a successful open) is :
if f1Cont.readlines():
print 'File is not empty'
else:
print 'File is empty'
Assuming you are going to read the file if it has data in it, I'd recommend opening it in append-update mode and seeing if the file position is zero. If so, there's no data in the file. Otherwise, we can read it.
with open("filename", "a+") as f:
if f.tell():
f.seek(0)
for line in f: # read the file
print line.rstrip()
else:
print "no data in file"
one can create a custom exception and handle that using a try and except block as below
class ContentNotFoundError(Exception):
pass
with open('your_filename','r') as f:
try:
content=f.read()
if not content:
raise ContentNotFoundError()
except ContentNotFoundError:
print("the file you are trying to open has no contents in it")
else:
print("content found")
print(content)
This code will print the content of the file given if found otherwise will print the message
the file you are trying to open has no contents in it

Python: Handle Missing Files from a Sequence

I have a program that's basically as follows:
for l in range(0,100):
file = open("C:/Twitter/json/user_" + str(l) + ".json", "r")
#do some stuff
file.close()
I am trying to figure out a way to handle the exception that will be thrown if say file 20 is missing, and tell it to continue. I attempted to use the continue with a try statement however, it kept complaining that I wasn't putting it in the loop properly. Any advice would be appreciated.
Basically I tried:
try:
for:
except:
continue
Thanks,
Something like :
import json
for l in xrange(100):
try:
with open('C:/Twitter/json/user_%d.json' % l, 'r') as f:
data = json.load(f)
#do stuff with obj
except IOError:
pass
edit fixed the code.
you could check file existence and then open it:
import os.path
os.path.exists(file_path)

Categories

Resources