I'm trying to download a very large file in collab to my gDrive. Sometimes the connection cuts out, and It requires I restart. Is there a way I can download from where I left off?
My code looks like so:
from requests import get
import sys
def download(url, file_name):
# open in binary mode
with open(file_name, "wb") as f:
print("Downloading %s" % file_name)
response = get(url, stream=True)
total_length = response.headers.get('content-length')
if total_length is None: # no content length header
f.write(response.content)
else:
dl = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
done = int(50 * dl / total_length)
sys.stdout.write("\r[%s%s]" % ('=' * done, ' ' * (50-done)) )
sys.stdout.flush()
Related
I have to write in Python that performs the following tasks:
1- Download the Movielens datasets from the url ‘http://files.grouplens.org/datasets/movielens/ml-
25m.zip’
2- Download the Movielens checksum from the url ‘http://files.grouplens.org/datasets/movielens/ml-
25m.zip.md5’
3- Check whether the checksum of the archive corresponds to the downloaded one
4- In case of positive check, print the names of the files contained by the downloaded archive
This is what I wrote up to now:
from zipfile import ZipFile
from urllib import request
import hashlib
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
url_datasets = 'http://files.grouplens.org/datasets/movielens/ml-25m.zip'
datasets = 'datasets.zip'
url_checksum = 'http://files.grouplens.org/datasets/movielens/ml-25m.zip.md5'
request.urlretrieve( url_datasets, datasets)
request.urlretrieve (url_checksum, checksum)
checksum = 'datasets.zip.md5'
with ZipFile(datasets, 'r') as zipObj:
listOfiles = zipObj.namelist()
for elem in listOfiles:
print(elem)
So what I'm missing is a way to compare the checksum I computed with the one I downloaded and maybe I can create a function "printFiles" that checks the checksum and in the positive case prints the list of files.
Is there something else I can improve?
Your code isn't actually making any of the requests.
from zipfile import ZipFile
import hashlib
import requests
def md5(fname):
hash_md5 = hashlib.md5()
hash_md5.update( open(fname,'rb').read() )
return hash_md5.hexdigest()
url_datasets = 'http://files.grouplens.org/datasets/movielens/ml-25m.zip'
datasets = 'datasets.zip'
url_checksum = 'http://files.grouplens.org/datasets/movielens/ml-25m.zip.md5'
checksum = 'datasets.zip.md5'
ds = requests.get( url_datasets, allow_redirects=True)
cs = requests.get( url_checksum, allow_redirects=True)
open( datasets, 'wb').write( ds.content )
ds_md5 = md5(datasets)
cs_md5 = cs.content.decode('utf-8').split()[0]
print( ds_md5 )
print( cs_md5 )
if ds_md5 == cs_md5:
print( "MATCH" )
with ZipFile(datasets, 'r') as zipObj:
listOfiles = zipObj.namelist()
for elem in listOfiles:
print(elem)
else:
print( "Checksum fail" )
I have been making this download manager app in tkinter and requests and I realized that sometimes if the user is downloading multiple files at the same time it fails to keep up and all the downloads end without any error. I also tried urllib3 and the standard urllib though the only difference that urrlib had was that it just raised and error but still failed. I want to make my program in a way that if the download ends:
Firstly check if the file size is less than it is supposed to be
If it is then get the size of that file and make a range header like so: {"Range": f"bytes={current_size}-{file_size}"}
Store the rest of the file in a temp file. After it is downloaded, get the data from both of the files and write it to one (merge the files together)
I used a while loop and temp counter but the problem is that when requests can't keep up and reaches the while loop, it makes millions of temp files with the size of each of them being 197 bytes and it doesn't work. I also tried just using an if loop hoping that it would be fixed, the difference being that it just didn't create millions of files but still didn't work. Finally I tried writing a separate mock program that just straightly got the rest of the files and merged it the half-downloaded file and it worked but for some reason when I try it in my program it doesn't. Keep in mind that I don't want to create a thread for each tempfile because it can be easily written in the same thread as the one that is downloading the file. How can I do this? My code (Be aware that this function is running in a separate thread):
currently_downloading = np.array([], dtype='S')
current_temp = 0
def download_files():
global files_downloading, times_clicked, currently_downloading, packed, last_temp, current_temp
try:
abort = False
win = None
available_num = 0
downloaded = 0
url = str(url_entry.get())
try:
headers = requests.head(url, headers={'accept-encoding': ''}).headers
except ValueError:
raise InvalidURL()
try:
file_size = float(headers['Content-Length'])
except TypeError:
raise NotDownloadable()
name = ""
formatname = ""
if num.get() == 1:
name = url.split("/")[-1].split(".")[0]
else:
if name_entry.get().strip() != "":
for char in str(name_entry.get()):
if char in banned_chars:
print("Usage of banned characters")
raise BannedCharsUsage()
else:
name = str(name_entry.get())
else:
raise EmptyName()
if var.get() == 1:
formatname = '.' + headers['Content-Type'].split('/')[1]
else:
if str(format_entry.get())[0] == '.' and len(format_entry.get()) >= 3:
formatname = str(format_entry.get())
else:
raise InvalidFormat()
fullname = str(name) + formatname
path = (str(output_entry.get()) + "/").replace(r" \ ".strip(), "/")
if chum.get() == 1:
conn = sqlite3.connect("DEF_PATH.db")
c = conn.cursor()
c.execute("SELECT * FROM DIRECTORY_LIST WHERE SELECTED_DEF = 1")
crnt_default_path = np.array(c.fetchone())
path = str(crnt_default_path[0] + "/").replace(r" \ ".strip(), "/")
conn.commit()
conn.close()
else:
pass
all_files_dir = np.array([], dtype='S')
for file in os.listdir(path):
all_files_dir = np.append(all_files_dir, path + file)
all_files_dir = np.concatenate((all_files_dir, currently_downloading))
while path + fullname in all_files_dir:
for element in currently_downloading:
if element not in all_files_dir:
all_files_dir = np.append(all_files_dir, element)
available_num += 1
if num.get() == 1:
name = url.split("/")[-1].split(".")[0] + f" ({available_num})"
else:
name = str(name_entry.get()) + f" ({available_num})"
fullname = name + formatname
if path + fullname not in all_files_dir:
currently_downloading = np.append(currently_downloading, path + fullname)
available_num = 0
break
else:
currently_downloading = np.append(currently_downloading, path + fullname)
def cancel_dl():
nonlocal abort
abort = True
start_time = time.time()
try:
r = requests.get(url, allow_redirects=False, stream=True)
start = last_print = time.time()
with open(path + fullname, 'wb') as fp:
for chunk in r.iter_content(chunk_size=4096):
if abort:
raise AbortException()
downloaded += fp.write(chunk)
if downloaded > 1000000:
lbl_crnt_size.config(text=f"Downloaded: {round(downloaded / 1000000, 2)} MB")
else:
lbl_crnt_size.config(text=f"Downloaded: {round(downloaded / 1000, 2)} KB")
pct_done = int(downloaded / file_size * 100)
lbl_percent.config(text=f"{round(pct_done, 2)} %")
download_prg["value"] = pct_done
now = time.time()
if now - last_print >= 1:
speed_sec = round(downloaded / (now - start))
if speed_sec > 1000000:
lbl_speed.config(text=f"{round(speed_sec / 1000000, 3)} MB/s")
else:
lbl_speed.config(text=f"{round(speed_sec / 1000, 3)} KB/s")
last_print = time.time()
while os.stat(path + fullname).st_size < file_size:
current_temp += 1
rng = {"Range": f"bytes={os.stat(path + fullname).st_size}-{file_size}"}
r = requests.get(url, allow_redirects=False, stream=True, headers=rng)
start = last_print = time.time()
with open(f"temp/Temp-{current_temp}{formatname}", 'wb') as fp:
for chunk in r.iter_content(chunk_size=4096):
if abort:
raise AbortException()
downloaded += fp.write(chunk)
if downloaded > 1000000:
lbl_crnt_size.config(text=f"Downloaded: {round(downloaded / 1000000, 2)} MB")
else:
lbl_crnt_size.config(text=f"Downloaded: {round(downloaded / 1000, 2)} KB")
pct_done = int(downloaded / file_size * 100)
lbl_percent.config(text=f"{round(pct_done, 2)} %")
download_prg["value"] = pct_done
now = time.time()
if now - last_print >= 1:
speed_sec = round(downloaded / (now - start))
if speed_sec > 1000000:
lbl_speed.config(text=f"{round(speed_sec / 1000000, 3)} MB/s")
else:
lbl_speed.config(text=f"{round(speed_sec / 1000, 3)} KB/s")
last_print = time.time()
with open(f"temp/Temp-{current_temp}{formatname}", 'rb') as fp:
temp_binary = fp.read()
with open(path + fullname, 'rb') as fp:
main_binary = fp.read()
with open(path + fullname, 'wb') as fp:
fp.write(main_binary + temp_binary)
except AbortException:
if os.path.exists(path + fullname):
os.remove(path + fullname)
There is no inbuilt function to do that so you will have to Manually do that .
First thing you need to do is keep record of how many chunks/buffers you have written to file.
Before download function declare some variable, say x=0. (To count how much data is written to file)
then inside the download function check if x == 0.
If true then download normally,
Else : resume download using range header
Read Following examples for range header :- source
If the web server supports the range request then you can add the Range header to your request:
Range: bytes=StartPos-StopPos
You will receive the part between StartPos and StopPos. If dont know the StopPos just use:
Range: bytes=StartPos-
So your code would be:
def resume_download(fileurl, resume_byte_position):
resume_header = {'Range': 'bytes=%d-' % resume_byte_position}
return requests.get(fileurl, headers=resume_header, stream=True, verify=False, allow_redirects=True)
Another example :-
https://www.oreilly.com/library/view/python-cookbook/0596001673/ch11s06.html
Also update the variable x after writing each chunk (x = x + chunk_size)
And in the end of your download part, add a "if" statement to check if the file size of downloaded file is same as the file size of file on server (you can get that by requests.header.get('Content-Length'). If file size is not same then you call your download function again.
I am trying to split a large files into 50Mb chunks and save them in another files. After running some read/write operations, some of my chunks were smaller than 50Mb (43Mb,17Mb and so on). Although, I wrote the same code in Java and It has the same problem. What is wrong? my codes are following bellow:
By the way, What we can do to speed up this code to split into chunks faster?
try:
f = open(self.__filename, 'rb')
except (OSError, IOError), e:
raise FileSplitterException, str(e)
bname = (os.path.split(self.__filename))[1]
fsize = os.path.getsize(self.__filename)
self.__chunksize = int(float(fsize)/float(self.__numchunks))
chunksz = self.__chunksize
total_bytes = 0
for x in range(self.__numchunks):
chunkfilename = bname + '-' + str(x+1) + self.__postfix
if x == self.__numchunks - 1:
chunksz = fsize - total_bytes
try:
print 'Writing file',chunkfilename
data = f.read(chunksz)
total_bytes += len(data)
chunkf = file(chunkfilename, 'wb')
chunkf.write(data)
chunkf.close()
except (OSError, IOError), e:
print e
continue
except EOFError, e:
print e
break
The code in the question seems to be focussed on producing a set number of chunks rather than files of 50MB in size.
This code produces 50MB files.
import os
try:
f = open('big.txt', 'rb')
except (OSError, IOError), e:
raise FileSplitterException, str(e)
bname = (os.path.split('big.txt'))[1]
chunksz = 50 * 1000 * 1000 # metric MB - use 1024 * 1024 for binary MB (MiB)
counter = 0
while True:
chunkfilename = bname + '-' + str(counter+1) + '.foo'
try:
print 'Writing file',chunkfilename
data = f.read(chunksz)
if not data:
# We have reached the end of the file, end the script.
break
chunkf = file(chunkfilename, 'wb')
chunkf.write(data)
chunkf.close()
except (OSError, IOError), e:
print e
continue
except EOFError, e:
print e
break
counter += 1
Some aspects of the code are considered poor style in modern python - for example not using a context manager to open files - but I haven't changed these in case the OP is on an old python like 2.5.
Your question is unclear because you haven't included a Minimal, Complete, and Verifiable example—so I don't know exactly what's wrong with your code. However after creating / simulating my guess as to the missing parts, I was able to come up with something that does exactly what you want, I think.
import os
class FileSplitterException(Exception): pass
class FileSplitter(object):
def __init__(self, filename, chunksize):
if not os.path.isfile(filename):
raise FileSplitterException(
"File: {!r} does not exist".format(filename))
self._filename = filename
self._postfix = 'chunk'
self._chunksize = chunksize
def split(self):
bname = os.path.splitext(self._filename)[0]
fsize = os.path.getsize(self._filename)
chunks, partial = divmod(fsize, self._chunksize)
if partial:
chunks += 1
with open(self._filename, 'rb') as infile:
for i in range(chunks):
chunk_filename = os.path.join('{}-{}.{}'.format(
bname, i, self._postfix))
with open(chunk_filename, 'wb') as outfile:
data = infile.read(self._chunksize)
if data:
outfile.write(data)
else:
FileSplitterException('unexpected EOF encountered')
if __name__ == '__main__':
import glob
filename = 'big_file.txt'
chunksize = 1 * 1024 * 1024 # 1 Mb
print('splitting {} into {:,} sized chunks'.format(filename, chunksize))
fs = FileSplitter(filename, chunksize)
fs.split()
print('chunk files written:')
bname = os.path.splitext(filename)[0]
for chunkname in sorted(glob.glob(bname + '-*.' + fs._postfix)):
fsize = os.path.getsize(chunkname)
print(' {}: size: {:,}'.format(chunkname, fsize))
import dropbox
client = dropbox.client.DropboxClient('<token>')
f = open('/ssd-scratch/abhishekb/try/1.mat', 'rb')
response = client.put_file('/data/1.mat', f)
I want to upload a big file to dropbox. How can I check the progress? [Docs]
EDIT:
The uploader offeset is same below somehow. What am I doing wrong
import os,pdb,dropbox
size=1194304
client = dropbox.client.DropboxClient(token)
path='D:/bci_code/datasets/1.mat'
tot_size = os.path.getsize(path)
bigFile = open(path, 'rb')
uploader = client.get_chunked_uploader(bigFile, size)
print "uploading: ", tot_size
while uploader.offset < tot_size:
try:
upload = uploader.upload_chunked()
print uploader.offset
except rest.ErrorResponse, e:
print("something went wrong")
EDIT 2:
size=1194304
tot_size = os.path.getsize(path)
bigFile = open(path, 'rb')
uploader = client.get_chunked_uploader(bigFile, tot_size)
print "uploading: ", tot_size
while uploader.offset < tot_size:
try:
upload = uploader.upload_chunked(chunk_size=size)
print uploader.offset
except rest.ErrorResponse, e:
print("something went wrong")
upload_chunked, as the documentation notes:
Uploads data from this ChunkedUploader's file_obj in chunks, until an
error occurs. Throws an exception when an error occurs, and can be
called again to resume the upload.
So yes, it uploads the entire file (unless an error occurs) before returning.
If you want to upload a chunk at a time on your own, you should use upload_chunk and commit_chunked_upload.
Here's some working code that shows you how to upload a single chunk at a time and print progress in between chunks:
from io import BytesIO
import os
from dropbox.client import DropboxClient
client = DropboxClient(ACCESS_TOKEN)
path = 'test.data'
chunk_size = 1024*1024 # 1MB
total_size = os.path.getsize(path)
upload_id = None
offset = 0
with open(path, 'rb') as f:
while offset < total_size:
offset, upload_id = client.upload_chunk(
BytesIO(f.read(chunk_size)),
offset=offset, upload_id=upload_id)
print('Uploaded so far: {} bytes'.format(offset))
# Note the "auto/" on the next line, which is needed because
# this method doesn't attach the root by itself.
client.commit_chunked_upload('auto/test.data', upload_id)
print('Upload complete.')
I am code a download function in python. The file size >1GB. The server is linux, HTTP server is Karrigell. Client is browse, Firefox or IE. I meet a big trouble.
At first, I use sys.stdout() to send file content.
file = open(path, 'rb')
size = os.path.getsize(path)
RESPONSE['Pragma'] = 'public'
RESPONSE['Expires'] = '0'
RESPONSE['Cache-Control'] = 'must-revalidate, pre-check=0'
RESPONSE['Content-Disposition'] = 'attachment; filename="' + os.path.basename(path) + '"'
RESPONSE['Content-type'] = "application/octet-stream"
RESPONSE['Content-Transfer-Encoding'] = 'binary'
RESPONSE['Content-length'] = str(os.path.getsize(path))
sys.stdout.flush()
chunk_size = 10000
handle = open(path, "rb")
while True:
buffer = handle.read(chunk_size)
if buffer:
STDOUT(buffer)
else:
break
sys.stdout.flush()
The problem is the server out of memory! I know, stdout write content to memory first, then memory send to socket.
So, I modify the function. Send content to socket directly. I use the py-sendfile module. http://code.google.com/p/py-sendfile/
file = open(path, 'rb')
size = os.path.getsize(path)
sock = REQUEST_HANDLER.sock
sock.sendall("""HTTP/1.1 200 OK\r\nPragma: no-cache\r\nExpires: 0\r\nCache-Control: no-cache, no-store\r\nContent-Disposition: attachment; filename="%s"\r\nContent-Type: application/octet-stream\r\nContent-Length: %u\r\nContent-Range: bytes 0-4096/%u\r\nLocation: "%s"\r\n\r\n""" % (os.path.basename(path), size, size, os.path.basename(path)))
offset = 0
nbytes = 4096
while 1:
try:
sent = sendfile.sendfile(sock.fileno(), file.fileno(), offset, nbytes)
except OSError, err:
if err.errno in (errno.EAGAIN, errno.EBUSY): # retry
continue
raise
else:
if sent == 0:
break # done
offset += sent
This time, the server memory is OK, but browse die! The browse memory rise quickly! Not free
until the socket accept whole file content.
I don't know how to deal with these problems. I think the second idea is right, send content to socket directly. But why browse can't free memory while accept data?
You should try to download the file in chunks. This is an example that works for me using urllib2
import os
import urllib2
import math
def downloadChunks(url):
"""Helper to download large files
the only arg is a url
this file will go to a temp directory
the file will also be downloaded
in chunks and print out how much remains
"""
baseFile = os.path.basename(url)
#move the file to a more uniq path
os.umask(0002)
temp_path = "/tmp/"
try:
file = os.path.join(temp_path,baseFile)
req = urllib2.urlopen(url)
total_size = int(req.info().getheader('Content-Length').strip())
downloaded = 0
CHUNK = 256 * 10240
with open(file, 'wb') as fp:
while True:
chunk = req.read(CHUNK)
downloaded += len(chunk)
print math.floor( (downloaded / total_size) * 100 )
if not chunk: break
fp.write(chunk)
except urllib2.HTTPError, e:
print "HTTP Error:",e.code , url
return False
except urllib2.URLError, e:
print "URL Error:",e.reason , url
return False
return file