Python retry to open the file - python

What is the best practice to try to open the file and retry after n seconds?
Currently, I do:
import os
from os import path
import shutil
dir_path = path.abspath(path.join("foo", "bar"))
destination_path = path.abspath(path.join("foo", "dest_dir"))
for f in dir_path:
try:
# try to open the file if the file isn't used any longer
file_opened = open(f, 'r')
# move the file on successful opening
shutil.move(file_opened, destination_path)
file_opened.close()
except IOError:
return False
So, at the moment I do not handle the exception. I think about creation of extra function to open the file and recall the function on excepth with time.sleep(n)
However, I am sure there must be something else ...
I do not use
with open(f, 'rb') as file_opened:
do whatever`
EDIT:
One process creates the file and I want Python process to move the file once I am sure the file writting / creation is completed. So, I have added shutil.move in the above code to show the whole situation.
EDIT:
Please find below the code I have developed to solve the problem. I ended with writing own custom solution to handle it:
import os
from os import path
import psutil
from retry import retry
import shutil
from subprocess import check_output,Popen, PIPE
import glob
import time
class FileHandler:
def __init__(self, fn_source, dir_source):
self.file_source_name = fn_source
self.file_source_path = path.join(dir_source, self.file_source_name)
self.dir_dest_path = path.join(dir_source, "test")
self.file_dest_path = path.join(self.dir_dest_path, self.file_source_name)
def check_file(self):
if os.path.exists(self.file_source_path):
try:
os.rename(self.file_source_path, self.file_source_path)
print("file renamed")
return True
except:
print("can not rename the file..retrying")
time.sleep(1)
self.check_file()
else:
print("source file does not exist...retrying")
time.sleep(5)
self.check_file()
def check_destination(self):
if os.path.exists(self.file_source_path) and not os.path.exists(self.file_dest_path):
return True
elif os.path.exists(self.file_source_path) and os.path.exists(self.file_dest_path):
try:
print(self.file_dest_path, self.file_source_name)
os.remove(self.file_dest_path)
return True
except Exception as e:
print("can not remove the file..retrying")
time.sleep(5)
self.check_destination()
def move_file(self):
if self.check_destination():
print(self.file_source_path)
shutil.move(self.file_source_path, self.file_dest_path)
print("moved", str(self.file_source_path))
return True
else:
print("can not move the file..retrying")
time.sleep(1)
self.move_file()
def file_ops(self):
if self.check_file():
self.move_file()
else:
print("source does not exist")
time.sleep(1)
self.file_ops()
return True
def each_file_ops(fn, dir_source):
fh = FileHandler(fn, dir_source)
return fh.file_ops()
def main(dir_source):
dir_files = glob.glob(path.join(dir_source, '*.txt'))
if dir_files:
[each_file_ops(f, dir_source) for f in dir_files]
else:
print("source dir is empty")
time.sleep(1)
main(dir_source)
if __name__ == '__main__':
main(path.join(""))

You can use the retry module for these kind of retrying. This makes the code to look much cleaner. pip install retry should install the module
from retry import retry
import shutil
#retry((FileNotFoundError, IOError), delay=1, backoff=2, max_delay=10, tries=100)
def attempt_to_move_file(fname, dest_path):
# Your attempt to move file
# shutil.move(fname, destination_path)
With the above code, when attempt_to_move_file is invoked it would be retried (upto a max of 100 tries) whenever we hit FileNotFoundError or IOError and the retry happens with a sleep 1, 2, 4, 8, 10, 10, 10 ... seconds between attempts

you do not need to open a file to move it. Because once you have opened a file it is in open state hence you can not move it, Take it like this, when you are playing a video in a player and you try to delete it or cut and paste system won't allow you.
import shutil
file_name = 'trytry.csv'
shutil.copyfile(file_name, '/Users/umeshkaushik/PycharmProjects/newtry.csv')
shutil.move(file_name, '/Users/umeshkaushik/PycharmProjects/newtry1.csv')
Above code runs fine. Just make sure you are providing correct input and output path.

Related

My pdf is blank when trying to extracting report from MS access using python

When I am running below python code, its generating PDF file with blank pages.
Although I am expecting it to generate PDF file as it's saved on MS Access report.
Can someone please let me know what I'm missing.
Below is my code
import win32com.client as win
import os
import time
s_current_working_directory = os.getcwd()
script = os.path.realpath(__file__)
def file_open(file_name):
if os.path.exists(file_name):
try:
os.rename(file_name, file_name)
return False
except:
print("File Open "+file_name)
time.sleep(2)
file_open(file_name)
return True
else:
return False
raise NameError
access = win.Dispatch("Access.Application")
access.visible = 1
db = access.OpenCurrentDatabase(os.path.join(s_current_working_directory, 'Database.accdb'))
access.DoCmd.OpenReport('ReportName',1)
if os.path.isfile(filename):
os.remove(filename)
access.DoCmd.OutputTo(3, 'ReportName', r'PDF Format (*.pdf)', r'export_path')
access.DoCmd.CloseDatabase
access.Quit()
access=None

How to check if a file's window is closed/open Python?

How can I check when a file window is closed? I tried this:
import time
import os
path = input()
os.startfile(request)
try:
file_path = open(path, 'rb')
except Exception:
done = True
while done:
if file_path.closed:
done = False
time.sleep(SLEEP_TIME)
So the file starts before I start checking if the file is closed. Then, I open the file again, and because its open already, an exception should pop up, where I'm looping until the file is closed. I don't know whether the indication I made is right or not.
Please help!!! I'm desperate!!!
You can't use file_path in the except: block, because the variable isn't assigned if open() raises an exception.
You should put the loop around the try/except, not inside the except.
while True:
try:
file_path = open(path, 'rb')
break
except:
time.sleep(SLEEP_TIME)
It will open the file only once, and if the file is already open it will use the already opened fie
import time
import os
path = 'bescom-17-apr.pdf'
try:
with open(path, "r") as file:
print("opened file")
except IOError:
print("file not found")

I need to prevent a python script from running twice

I need to prevent a python script from running more than once. So far I have:
import fcntl
def lockFile(lockfile):
fp = open(lockfile, 'w')
try:
fcntl.flock(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
return False
return True
if not lockFile("myfile.lock"):
sys.exit(0)
Problem is the sys.exit() never gets called even if the file is there. Maybe this is a platform dependent way of doing things? I just need to write a lockfile, check for its existence and if it's not there or stale, create a new one. Ideas?
Writing a file will create a new file if none exist; you could instead try to read the file first: if there is none, an error is raised and a file a file written; if there is a file, the program exits.
try:
with open('lockfile.txt', 'r') as f:
lock = f.readline().strip().split()
if lock[0] == 'locked':
print('exiting')
sys.exit(0)
except FileNotFoundError:
with open('lockfile.txt', 'w') as f:
f.write('locked')
print('file written')
if __name__ == '__main__':
pass
If you need something more sophisticated, you could look up the atexit module
You can check if you file exists by using os.path.exists (docs here). If it does, then you can use the sys.exit you mentioned before. If you need something more than sys.exit, try the atexit module #Reblochon suggested. The script will then assume the file is ready to lock and the method will report its success back to the user via a boolean.
import os
import sys
import fcntl
FILE_NAME = 'myfile.lock'
def lockFile(lockfile):
fp = open(lockfile, 'w') # create a new one
try:
fcntl.flock(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
success = True # the file has been locked
except IOError:
success = False # an error occurred
fp.close() # make sure to close your file
return success
if os.path.exists(FILE_NAME): # exit the script if it exists
sys.exit(0)
print('success', lockFile(FILE_NAME))

Exception for Python ftplib in my program?

I wrote this program to draw data from a text file on a website's directory (of which is edited by the user on the site) but it seems to crash. A lot.
from sys import argv
import ftplib
import serial
from time import sleep
one = "0"
repeat = True
ser = serial.Serial("COM3", 9600)
while repeat == True:
path = 'public_html/'
filename = 'fileone.txt'
ftp = ftplib.FTP("*omitted*")
ftp.login("*omitted*", "*omitted*")
ftp.cwd(path)
ftp.retrbinary("RETR " + filename ,open(filename, 'wb').write)
ftp.quit()
txt = open(filename)
openup = txt.read()
ser.write(openup)
print(openup)
Does anyone know any kind of way to stop it from crashing? I was thinking of using an exception but I'm no Python expert. The program does what it's meant to do, by the way, and the address and login have been omitted for obvious reasons. Also if possible I ask for an exception to stop the program from crashing when it disconnects from the serial port.
Thanks in advance!
Two things:
You might want to put all the ftplib related code in a try-except block like so:
try:
#code related to ftplib
except Exception, e: #you can fill this in after you encounter the exception once
print str(e)
You seem to be opening the file but not closing it when you're done. This might also cause errors later. The best way to do this would be:
with open(filename, 'r') as txt:
openup = txt.read()
This way the file will be closed automatically once you're outside the 'with' block.

Timeout error, getting images from urls - Python

I am trying to save jpegs to a file from a list of urls. This code times out frequently and randomly. It has saved up to 113 jpegs, there are many more than that, and sometimes only saves 10 before timing out. Is there a way to put a wait in so the timeout doesn't occur? I have tried sleep in the commented section with no luck. Thanks for the feedback!
Heres the timeout error message:
import urllib.request
import urllib
import codecs
from urllib import request
import time
import csv
class File:
def __init__(self, data):
self.data = data
file = File("1")
with open("file.csv", encoding = "utf8") as f1:
file.data = list(csv.reader(f1, skipinitialspace = True))
for i in file.data[1:]:
if len(i[27]) != 0:
#i[14] creates a unique jpeg file name in the dir
image = open('C:\\aPath'+i[14]+'.JPG', 'wb')
path = 'aPath' + i[14] + '.JPG'
#time.sleep(2) Tried sleep here, didn't work
#i[27] is a working jpeg url
urllib.request.urlretrieve(i[27], path)
image.close()
print('done!')
There's no way to prevent the exception. You need to catch the exception and retry.
...
for i in file.data[1:]:
if not i[27]:
continue
path = 'aPath' + i[14] + '.JPG'
while True: # retry loop
try:
urllib.request.urlretrieve(i[27], path)
break # On success, stop retry.
except TimeoutError:
print('timeout, retry in 1 second.')
time.sleep(1)
BTW, you don't need to open file if you use urllib.request.urlretrieve.

Categories

Resources