How do i manage RAM usage by python? - python

I tried creating a script which would scrape the latest uploaded files on a public picture sharing website and use imageai to recognize what is in the picture,then move it to a folder called the best guess by imageai.
The script works but it's really slow and after like a minute it consumes 5GB of RAM which makes my PC unstable.
I am new to Python and programming this is my second project so far so i only tried a few things written in stackoverflow posts.
import requests
from bs4 import BeautifulSoup
import re
import urllib3
import urllib
from urllib3 import ProxyManager, make_headers
from imageai.Prediction import ImagePrediction
import os
import shutil, os
execution_path = os.getcwd()
from PIL import Image
proxy = urllib3.ProxyManager('proxy')
def make_soup(url):
http = urllib3.PoolManager()
r = http.request("GET", url)
return BeautifulSoup(r.data,'lxml')
ip = 'proxy'
proxies = {
'http': ip,
'https': ip,
}
default="space"
filetype = []
i = 1
soup = make_soup("link")
for img in soup.find_all('img', {'src':re.compile('_tn.jpg')}):
os.chdir(default)
temp = img.get('src').replace("_tn","")
print(temp)
nametemp = img.get('alt').replace("png","jpg")
print(nametemp)
if str(nametemp) == 'None':
filename = str(i)
i = i+1
else:
filename = nametemp
imagefile = open(filename , "wb")
imagefile.write(requests.get(temp).content)
imagefile.close()
print( filename + " mentve")
if ".png" in filename:
try:
im = Image.open(filename +".png")
filetype = "png"
print("this is a png")
im.close()
except IOError:
print("png error")
if ".jpg" in filename:
try:
im=Image.open(filename)
print("this is a jpg")
filetype= "jpg"
im.close()
prediction = ImagePrediction()
prediction.setModelTypeAsResNet()
prediction.setModelPath(os.path.join(execution_path, "resnet50_weights_tf_dim_ordering_tf_kernels.h5"))
prediction.loadModel()
predictions, probabilities = prediction.predictImage(os.path.join(execution_path, filename ), result_count=1 )
for eachPrediction, eachProbability in zip(predictions, probabilities):
print(eachPrediction , " : " , eachProbability)
dir = str(predictions[0])
if not os.path.exists(dir):
os.makedirs(dir)
os.chdir(dir)
print(dir)
if not os.path.isfile(filename):
os.chdir(default)
shutil.move(filename , dir)
else:
print("file already exists")
except IOError:
print("jpg error")
if ".jpeg" in filename:
try:
im=Image.open(filename)
print("this is a jpeg")
filetype= "jpeg"
im.close()
except IOError:
print("jpeg error")
print(filetype)
As i am seeking advice on what could be wrong with my code i provided it fully above. Sorry if it breaks any rules i think it's the logical step here.
I would love some advice on reading material or some kind of idea on what should i research to better understand how Python handles RAM usage.
Anything is appreciated, but keep in mind this is my second project so far.
Thank you.

Related

Python 2.7 download images

I'm using python 2.7 and pycharm is my editor. What i'm trying to do is have python go to a site and download an image from that site and save it to my directory. Currently I have no errors but i don't think its downloading because the file is not showing in my directory.
import random
import urllib2
def download_web_image(url):
name = random.randrange(1,1000)
full_name = str(name) + ".jpg"
urllib2.Request(url, full_name)
download_web_image("www.example.com/page1/picture.jpg")
This will do the trick. The rest can stay the same, just edit your function to include the two lines I have added.
def download_web_image(url):
name = random.randrange(1,1000)
full_name = str(name) + ".jpg"
request = urllib2.Request(url)
img = urllib2.urlopen(request).read()
with open (full_name, 'w') as f: f.write(img)
Edit 1:
Exact code as requested in comments.
import urllib2
def download_web_image(url):
request = urllib2.Request(url)
img = urllib2.urlopen(request).read()
with open ('test.jpg', 'w') as f: f.write(img)
download_web_image("http://upload.wikimedia.org/wikipedia/commons/8/8c/JPEG_example_JPG_RIP_025.jpg")
You are simply creating a Request but you are not downloading the image. Try the following instead:
urllib.urlretrieve(url, os.path.join(os.getcwd(), full_name)) # download and save image
Or try the requests library:
import requests
image = requests.get("www.example.com/page1/picture.jpg")
with open('picture.jpg', 'wb') as f:
f.write(image.content)

Using Python and the Pillow Library and CGI for file upload

I've been having hard time importing Image, and CGI together. So, basically I want to upload a gif image, and then display the output which is a thumbnail of the image. I'm getting bunch of errors. I'm unable to use from PIL import Pillow and cgi at the same time. Here is the code below. Your help is highly appreciated. I did hours and hours of research and can't figure that out. I'm getting this error: End of script output before headers: save_file.py
#!C:\Anaconda3\python.exe
from PIL import Image
import cgi, os
import cgitb; cgitb.enable() #cgitb enabled for bug tracking purposes
try: # Windows needs stdio set for binary mode.
import msvcrt
msvcrt.setmode (0, os.O_BINARY) # stdin = 0
msvcrt.setmode (1, os.O_BINARY) # stdout = 1
except ImportError:
pass
form = cgi.FieldStorage()
# A nested FieldStorage instance holds the file
fileitem = form['file']
# Test if the file was uploaded
if fileitem.filename:
# strip leading path from file name to avoid directory traversal attacks
fn = os.path.basename(fileitem.filename)
ext = fn[-3:]
condi = 'gif'
if ext == condi:
open('tmpmedia/' + fn, 'wb').write(fileitem.file.read())
message = 'The file "' + fn + '" was uploaded successfully'
selectfunction = "Please select a function from below"
else:
message = 'You can only upload a gif image. No file was uploaded'
size = (128,128)
saved = "thumb.jpg"
infile = ('C:\\xampp\\htdocs\\aztec\\tmpmedia\\gif' + fn)
try:
im = Image.open(infile)
except:
print("Unable to load image")
im.thumbnail(size)
im.save(saved)
print ("Content-Type: text/html\n")
print ("<html>")
print("<body>")
print(message)
print("</body>")
print("</html>")
I figured it out. There must be a try statement at the end after the print html, and then I would use from PIL import Image

Python not saving files to a different folder than where the python file is on Ubuntu 11.10

So I am pulling jpg's from a url. I am able to save the image files as long as they are being saved to the same folder the python file is in. As soon as I attempt to change the folder(seen here as the outpath) the image files do not get created. I imagine it has something to do with my outpath, but it seems to be fine when I am printing and watching it in the console.
Ubuntu 11.10 OS by the way. I'm a newbie with both linux and python, so it could easily be either. :)
If I were to print the sequence taken from the CSV file it would look like: [['Champ1', 'Subname1', 'imgurl1'],['Champ2', 'subname2', 'imgurl2'],['Champ3','subname3','imgurl3']...]
(It was scraped from a website)
import csv
from urlparse import urlsplit
from urllib2 import urlopen, build_opener
from urllib import urlretrieve
import webbrowser
import os
import sys
reader = csv.reader(open('champdata.csv', "rb"), delimiter = ",", skipinitialspace=True)
champInfo = []
for champs in reader:
champInfo.append(champs)
size = len(champInfo)
def GetImages(x, out_folder="/home/sean/Home/workspace/CP/images"):
b=1
size = len(champInfo)
print size
while b < size:
temp_imgurls = x.pop(b)
filename = os.path.basename(temp_imgurls[2])
print filename
outpath = os.path.join(out_folder, filename)
print outpath
u = urlopen(temp_imgurls[2])
localFile = open(outpath, 'wb')
localFile.write(u.read())
localFile.close()
b+=1
GetImages(champInfo)
I understand it's quite crude, but it does work, only if I'm not attempting to change the save path.
Try providing the complete image path everywhere
E:/../home/sean/Home/workspace/CD/images
def GetImages(x):
b=1
size = len(champInfo)
print size
while b < size:
temp_imgurls = x.pop(b)
filename = temp_imgurls[2]
u = urlopen(temp_imgurls[2])
localFile = open(filename, 'wb')
localFile.write(u.read())
localFile.close()
And this code will be save files in the same directory where script is.
Updated Answer:
I think the answer to your problem is just to add a check for the output directory existence, and create it if needed. ie, add:
if not os.path.exists(out_folder):
os.makedirs(out_folder)
to your existing code.
More generally , you could try something more like this:
import csv
from urllib2 import urlopen
import os
import sys
default_outfolder = "/home/sean/Home/workspace/CD/images"
# simple arg passing wihtout error checking
out_folder = sys.argv[1] if len(sys.argv) == 2 else default_outfolder
if not os.path.exists(out_folder):
os.makedirs(out_folder) # creates out_folder, including any required parent ones
else:
if not os.path.isdir(out_folder):
raise RuntimeError('output path must be a directory')
reader = csv.reader(open('champdata.csv', "rb"), delimiter = ",", skipinitialspace=True)
for champs in reader:
img_url = champs[2]
filename = os.path.basename(img_url)
outpath = os.path.join(out_folder, filename)
print 'Downloading %s to %s' % (img_url, outpath)
with open(outpath, 'wb') as f:
u = urlopen(img_url)
f.write(u.read())
The above code works for champdata.csv of the form stuff,more_stuff,http://www.somesite.com.au/path/to/image.png
but will need to be adapted if I have not understood the actual format of your incoming data.

How to save an image locally using Python whose URL address I already know?

I know the URL of an image on Internet.
e.g. http://www.digimouth.com/news/media/2011/09/google-logo.jpg, which contains the logo of Google.
Now, how can I download this image using Python without actually opening the URL in a browser and saving the file manually.
Python 2
Here is a more straightforward way if all you want to do is save it as a file:
import urllib
urllib.urlretrieve("http://www.digimouth.com/news/media/2011/09/google-logo.jpg", "local-filename.jpg")
The second argument is the local path where the file should be saved.
Python 3
As SergO suggested the code below should work with Python 3.
import urllib.request
urllib.request.urlretrieve("http://www.digimouth.com/news/media/2011/09/google-logo.jpg", "local-filename.jpg")
import urllib
resource = urllib.urlopen("http://www.digimouth.com/news/media/2011/09/google-logo.jpg")
output = open("file01.jpg","wb")
output.write(resource.read())
output.close()
file01.jpg will contain your image.
I wrote a script that does just this, and it is available on my github for your use.
I utilized BeautifulSoup to allow me to parse any website for images. If you will be doing much web scraping (or intend to use my tool) I suggest you sudo pip install BeautifulSoup. Information on BeautifulSoup is available here.
For convenience here is my code:
from bs4 import BeautifulSoup
from urllib2 import urlopen
import urllib
# use this image scraper from the location that
#you want to save scraped images to
def make_soup(url):
html = urlopen(url).read()
return BeautifulSoup(html)
def get_images(url):
soup = make_soup(url)
#this makes a list of bs4 element tags
images = [img for img in soup.findAll('img')]
print (str(len(images)) + "images found.")
print 'Downloading images to current working directory.'
#compile our unicode list of image links
image_links = [each.get('src') for each in images]
for each in image_links:
filename=each.split('/')[-1]
urllib.urlretrieve(each, filename)
return image_links
#a standard call looks like this
#get_images('http://www.wookmark.com')
This can be done with requests. Load the page and dump the binary content to a file.
import os
import requests
url = 'https://apod.nasa.gov/apod/image/1701/potw1636aN159_HST_2048.jpg'
page = requests.get(url)
f_ext = os.path.splitext(url)[-1]
f_name = 'img{}'.format(f_ext)
with open(f_name, 'wb') as f:
f.write(page.content)
Python 3
urllib.request — Extensible library for opening URLs
from urllib.error import HTTPError
from urllib.request import urlretrieve
try:
urlretrieve(image_url, image_local_path)
except FileNotFoundError as err:
print(err) # something wrong with local path
except HTTPError as err:
print(err) # something wrong with url
I made a script expanding on Yup.'s script. I fixed some things. It will now bypass 403:Forbidden problems. It wont crash when an image fails to be retrieved. It tries to avoid corrupted previews. It gets the right absolute urls. It gives out more information. It can be run with an argument from the command line.
# getem.py
# python2 script to download all images in a given url
# use: python getem.py http://url.where.images.are
from bs4 import BeautifulSoup
import urllib2
import shutil
import requests
from urlparse import urljoin
import sys
import time
def make_soup(url):
req = urllib2.Request(url, headers={'User-Agent' : "Magic Browser"})
html = urllib2.urlopen(req)
return BeautifulSoup(html, 'html.parser')
def get_images(url):
soup = make_soup(url)
images = [img for img in soup.findAll('img')]
print (str(len(images)) + " images found.")
print 'Downloading images to current working directory.'
image_links = [each.get('src') for each in images]
for each in image_links:
try:
filename = each.strip().split('/')[-1].strip()
src = urljoin(url, each)
print 'Getting: ' + filename
response = requests.get(src, stream=True)
# delay to avoid corrupted previews
time.sleep(1)
with open(filename, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
except:
print ' An error occured. Continuing.'
print 'Done.'
if __name__ == '__main__':
url = sys.argv[1]
get_images(url)
A solution which works with Python 2 and Python 3:
try:
from urllib.request import urlretrieve # Python 3
except ImportError:
from urllib import urlretrieve # Python 2
url = "http://www.digimouth.com/news/media/2011/09/google-logo.jpg"
urlretrieve(url, "local-filename.jpg")
or, if the additional requirement of requests is acceptable and if it is a http(s) URL:
def load_requests(source_url, sink_path):
"""
Load a file from an URL (e.g. http).
Parameters
----------
source_url : str
Where to load the file from.
sink_path : str
Where the loaded file is stored.
"""
import requests
r = requests.get(source_url, stream=True)
if r.status_code == 200:
with open(sink_path, 'wb') as f:
for chunk in r:
f.write(chunk)
Using requests library
import requests
import shutil,os
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
}
currentDir = os.getcwd()
path = os.path.join(currentDir,'Images')#saving images to Images folder
def ImageDl(url):
attempts = 0
while attempts < 5:#retry 5 times
try:
filename = url.split('/')[-1]
r = requests.get(url,headers=headers,stream=True,timeout=5)
if r.status_code == 200:
with open(os.path.join(path,filename),'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw,f)
print(filename)
break
except Exception as e:
attempts+=1
print(e)
ImageDl(url)
Use a simple python wget module to download the link. Usage below:
import wget
wget.download('http://www.digimouth.com/news/media/2011/09/google-logo.jpg')
This is very short answer.
import urllib
urllib.urlretrieve("http://photogallery.sandesh.com/Picture.aspx?AlubumId=422040", "Abc.jpg")
Version for Python 3
I adjusted the code of #madprops for Python 3
# getem.py
# python2 script to download all images in a given url
# use: python getem.py http://url.where.images.are
from bs4 import BeautifulSoup
import urllib.request
import shutil
import requests
from urllib.parse import urljoin
import sys
import time
def make_soup(url):
req = urllib.request.Request(url, headers={'User-Agent' : "Magic Browser"})
html = urllib.request.urlopen(req)
return BeautifulSoup(html, 'html.parser')
def get_images(url):
soup = make_soup(url)
images = [img for img in soup.findAll('img')]
print (str(len(images)) + " images found.")
print('Downloading images to current working directory.')
image_links = [each.get('src') for each in images]
for each in image_links:
try:
filename = each.strip().split('/')[-1].strip()
src = urljoin(url, each)
print('Getting: ' + filename)
response = requests.get(src, stream=True)
# delay to avoid corrupted previews
time.sleep(1)
with open(filename, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
except:
print(' An error occured. Continuing.')
print('Done.')
if __name__ == '__main__':
get_images('http://www.wookmark.com')
Late answer, but for python>=3.6 you can use dload, i.e.:
import dload
dload.save("http://www.digimouth.com/news/media/2011/09/google-logo.jpg")
if you need the image as bytes, use:
img_bytes = dload.bytes("http://www.digimouth.com/news/media/2011/09/google-logo.jpg")
install using pip3 install dload
Something fresh for Python 3 using Requests:
Comments in the code. Ready to use function.
import requests
from os import path
def get_image(image_url):
"""
Get image based on url.
:return: Image name if everything OK, False otherwise
"""
image_name = path.split(image_url)[1]
try:
image = requests.get(image_url)
except OSError: # Little too wide, but work OK, no additional imports needed. Catch all conection problems
return False
if image.status_code == 200: # we could have retrieved error page
base_dir = path.join(path.dirname(path.realpath(__file__)), "images") # Use your own path or "" to use current working directory. Folder must exist.
with open(path.join(base_dir, image_name), "wb") as f:
f.write(image.content)
return image_name
get_image("https://apod.nasddfda.gov/apod/image/2003/S106_Mishra_1947.jpg")
this is the easiest method to download images.
import requests
from slugify import slugify
img_url = 'https://apod.nasa.gov/apod/image/1701/potw1636aN159_HST_2048.jpg'
img = requests.get(img_url).content
img_file = open(slugify(img_url) + '.' + str(img_url).split('.')[-1], 'wb')
img_file.write(img)
img_file.close()
If you don't already have the url for the image, you could scrape it with gazpacho:
from gazpacho import Soup
base_url = "http://books.toscrape.com"
soup = Soup.get(base_url)
links = [img.attrs["src"] for img in soup.find("img")]
And then download the asset with urllib as mentioned:
from pathlib import Path
from urllib.request import urlretrieve as download
directory = "images"
Path(directory).mkdir(exist_ok=True)
link = links[0]
name = link.split("/")[-1]
download(f"{base_url}/{link}", f"{directory}/{name}")
# import the required libraries from Python
import pathlib,urllib.request
# Using pathlib, specify where the image is to be saved
downloads_path = str(pathlib.Path.home() / "Downloads")
# Form a full image path by joining the path to the
# images' new name
picture_path = os.path.join(downloads_path, "new-image.png")
# "/home/User/Downloads/new-image.png"
# Using "urlretrieve()" from urllib.request save the image
urllib.request.urlretrieve("//example.com/image.png", picture_path)
# urlretrieve() takes in 2 arguments
# 1. The URL of the image to be downloaded
# 2. The image new name after download. By default, the image is saved
# inside your current working directory
Ok, so, this is my rudimentary attempt, and probably total overkill.
Update if needed, as this doesn't handle any timeouts, but, I got this working for fun.
Code listed here: https://github.com/JayRizzo/JayRizzoTools/blob/master/pyImageDownloader.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# Created Syst: MAC OSX High Sierra 21.5.0 (17G65)
# Created Plat: Python 3.9.5 ('v3.9.5:0a7dcbdb13', 'May 3 2021 13:17:02')
# Created By : Jeromie Kirchoff
# Created Date: Thu Jun 15 23:31:01 2022 CDT
# Last ModDate: Thu Jun 16 01:41:01 2022 CDT
# =============================================================================
# NOTE: Doesn't work on SVG images at this time.
# I will look into this further: https://stackoverflow.com/a/6599172/1896134
# =============================================================================
import requests # to get image from the web
import shutil # to save it locally
import os # needed
from os.path import exists as filepathexist # check if file paths exist
from os.path import join # joins path for different os
from os.path import expanduser # expands current home
from pyuser_agent import UA # generates random UserAgent
class ImageDownloader(object):
"""URL ImageDownloader.
Input : Full Image URL
Output: Image saved to your ~/Pictures/JayRizzoDL folder.
"""
def __init__(self, URL: str):
self.url = URL
self.headers = {"User-Agent" : UA().random}
self.currentHome = expanduser('~')
self.desktop = join(self.currentHome + "/Desktop/")
self.download = join(self.currentHome + "/Downloads/")
self.pictures = join(self.currentHome + "/Pictures/JayRizzoDL/")
self.outfile = ""
self.filename = ""
self.response = ""
self.rawstream = ""
self.createdfilepath = ""
self.imgFileName = ""
# Check if the JayRizzoDL exists in the pictures folder.
# if it doesn't exist create it.
if not filepathexist(self.pictures):
os.mkdir(self.pictures)
self.main()
def getFileNameFromURL(self, URL: str):
"""Parse the URL for the name after the last forward slash."""
NewFileName = self.url.strip().split('/')[-1].strip()
return NewFileName
def getResponse(self, URL: str):
"""Try streaming the URL for the raw data."""
self.response = requests.get(self.url, headers=self.headers, stream=True)
return self.response
def gocreateFile(self, name: str, response):
"""Try creating the file with the raw data in a custom folder."""
self.outfile = join(self.pictures, name)
with open(self.outfile, 'wb') as outFilePath:
shutil.copyfileobj(response.raw, outFilePath)
return self.outfile
def main(self):
"""Combine Everything and use in for loops."""
self.filename = self.getFileNameFromURL(self.url)
self.rawstream = self.getResponse(self.url)
self.createdfilepath = self.gocreateFile(self.filename, self.rawstream)
print(f"File was created: {self.createdfilepath}")
return
if __name__ == '__main__':
# Example when calling the file directly.
ImageDownloader("https://stackoverflow.design/assets/img/logos/so/logo-stackoverflow.png")
Download Image file, with avoiding all possible error:
import requests
import validators
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
def is_downloadable(url):
valid=validators. url(url)
if valid==False:
return False
req = Request(url)
try:
response = urlopen(req)
except HTTPError as e:
return False
except URLError as e:
return False
else:
return True
for i in range(len(File_data)): #File data Contain list of address for image
#file
url = File_data[i][1]
try:
if (is_downloadable(url)):
try:
r = requests.get(url, allow_redirects=True)
if url.find('/'):
fname = url.rsplit('/', 1)[1]
fname = pth+File_data[i][0]+"$"+fname #Destination to save
#image file
open(fname, 'wb').write(r.content)
except Exception as e:
print(e)
except Exception as e:
print(e)

Download whole directories in Python SimpleHTTPServer

I really like how I can easily share files on a network using the SimpleHTTPServer, but I wish there was an option like "download entire directory". Is there an easy (one liner) way to implement this?
Thanks
I did that modification for you, I don't know if there'are better ways to do that but:
Just save the file (Ex.: ThreadedHTTPServer.py) and access as:
$ python -m /path/to/ThreadedHTTPServer PORT
BPaste Raw Version
The modification also works in threaded way so you won't have problem with download and navigation in the same time, the code aren't organized but:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SocketServer import ThreadingMixIn
import threading
import SimpleHTTPServer
import sys, os, zipfile
PORT = int(sys.argv[1])
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if self.path.endswith('?download'):
tmp_file = "tmp.zip"
self.path = self.path.replace("?download","")
zip = zipfile.ZipFile(tmp_file, 'w')
for root, dirs, files in os.walk(path):
for file in files:
if os.path.join(root, file) != os.path.join(root, tmp_file):
zip.write(os.path.join(root, file))
zip.close()
path = self.translate_path(tmp_file)
elif os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
else:
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def list_directory(self, path):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import cgi, urllib
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<a href='%s'>%s</a>\n" % (self.path+"?download",'Download Directory Tree as Zip'))
f.write("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or # for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "#"
# Note: a link to a directory displays with # and links with /
f.write('<li>%s\n'
% (urllib.quote(linkname), cgi.escape(displayname)))
f.write("</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
encoding = sys.getfilesystemencoding()
self.send_header("Content-type", "text/html; charset=%s" % encoding)
self.send_header("Content-Length", str(length))
self.end_headers()
return f
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
Handler.send_head = send_head
Handler.list_directory = list_directory
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
if __name__ == '__main__':
server = ThreadedHTTPServer(('0.0.0.0', PORT), Handler)
print 'Starting server, use <Ctrl-C> to stop'
server.serve_forever()
Look at the sources, e.g. online here. Right now, if you call the server with a URL that's a directory, its index.html file is served, or, missing that, the list_directory method is called. Presumably, you want instead to make a zip file with the directory's contents (recursively, I imagine), and serve that? Obviously there's no way to do it with a one-line change, since you want to replace what are now lines 68-80 (in method send_head) plus the whole of method list_directory, lines 98-137 -- that's already at least a change to over 50 lines;-).
If you're OK with a change of several dozen lines, not one, and the semantics I've described are what you want, you could of course build the required zipfile as a cStringIO.StringIO object with the ZipFile class, and populate it with an os.walk on the directory in question (assuming you want, recursively, to get all subdirectories as well). But it's most definitely not going to be a one-liner;-).
There is no one liner which would do it, also what do you mean by "download whole dir" as tar or zip?
Anyway you can follow these steps
Derive a class from SimpleHTTPRequestHandler or may be just copy its code
Change list_directory method to return a link to "download whole folder"
Change copyfile method so that for your links you zip whole dir and return it
You may cache zip so that you do not zip folder every time, instead see if any file is modified or not
Would be a fun exercise to do :)
There is no simple way.
An alternative is to use the python script below to download the whole folder recursively. This works well for Python 3. Change the URL as needed.
import os
from pathlib import Path
from urllib.parse import urlparse, urljoin
import requests
from bs4 import BeautifulSoup
def get_links(content):
soup = BeautifulSoup(content)
for a in soup.findAll('a'):
yield a.get('href')
def download(url):
path = urlparse(url).path.lstrip('/')
print(path)
r = requests.get(url)
if r.status_code != 200:
raise Exception('status code is {} for {}'.format(r.status_code, url))
content = r.text
if path.endswith('/'):
Path(path.rstrip('/')).mkdir(parents=True, exist_ok=True)
for link in get_links(content):
if not link.startswith('.'): # skip hidden files such as .DS_Store
download(urljoin(url, link))
else:
with open(path, 'w') as f:
f.write(content)
if __name__ == '__main__':
# the trailing / indicates a folder
url = 'http://ed470d37.ngrok.io/a/bc/'
download(url)
I like #mononoke 's solution. But there are several problems in it. They are
write files in text mode
sometimeshref and text are different, especially for non-ascii path
not download large file block-wisely
I tried to fix these problems:
import os
from pathlib import Path
from urllib.parse import urlparse, urljoin
import requests
from bs4 import BeautifulSoup
import math
def get_links(content):
soup = BeautifulSoup(content)
for a in soup.findAll('a'):
yield a.get('href'), a.get_text()
def download(url, path=None, overwrite=False):
if path is None:
path = urlparse(url).path.lstrip('/')
if url.endswith('/'):
r = requests.get(url)
if r.status_code != 200:
raise Exception('status code is {} for {}'.format(r.status_code, url))
content = r.text
Path(path.rstrip('/')).mkdir(parents=True, exist_ok=True)
for link, name in get_links(content):
if not link.startswith('.'): # skip hidden files such as .DS_Store
download(urljoin(url, link), os.path.join(path, name))
else:
if os.path.isfile(path):
print("#existing", path)
if not overwrite:
return
chunk_size = 1024*1024
r = requests.get(url, stream=True)
content_size = int(r.headers['content-length'])
total = math.ceil(content_size / chunk_size)
print("#", path)
with open(path, 'wb') as f:
c = 0
st = 100
for chunk in r.iter_content(chunk_size=chunk_size):
c += 1
if chunk:
f.write(chunk)
ap = int(c*st/total) - int((c-1)*st/total)
if ap > 0:
print("#" * ap, end="")
print("\r "," "*int(c*st/total), "\r", end="")
if __name__ == '__main__':
# the trailing / indicates a folder
url = 'http://ed470d37.ngrok.io/a/bc/'
download(url, "/data/bc")

Categories

Resources