Unable to open file object created in Django - python

I'm developing an application that runs on an Apache server with Django framework. My current script works fine when it runs on the local desktop (without Django). The script downloads all the images from a website to a folder on the desktop. However, when I run the script on the server a file object is just create by Django that apparently has something in it (should be google's logo), however, I can't open up the file. I also create an html file, updated image link locations, but the html file gets created fine, I'm assuming because it's all text, maybe? I believe I may have to use a file wrapper somewhere, but I'm not sure. Any help is appreciated, below is my code, Thanks!
from django.http import HttpResponse
from bs4 import BeautifulSoup as bsoup
import urlparse
from urllib2 import urlopen
from urllib import urlretrieve
import os
import sys
import zipfile
from django.core.servers.basehttp import FileWrapper
def getdata(request):
out = 'C:\Users\user\Desktop\images'
if request.GET.get('q'):
#url = str(request.GET['q'])
url = "http://google.com"
soup = bsoup(urlopen(url))
parsedURL = list(urlparse.urlparse(url))
for image in soup.findAll("img"):
print "Old Image Path: %(src)s" % image
#Get file name
filename = image["src"].split("/")[-1]
#Get full path name if url has to be parsed
parsedURL[2] = image["src"]
image["src"] = '%s\%s' % (out,filename)
print 'New Path: %s' % image["src"]
# print image
outpath = os.path.join(out, filename)
#retrieve images
if image["src"].lower().startswith("http"):
urlretrieve(image["src"], outpath)
else:
urlretrieve(urlparse.urlunparse(parsedURL), out) #Constructs URL from tuple (parsedURL)
#Create HTML File and writes to it to check output (stored in same directory).
html = soup.prettify("utf-8")
with open("output.html", "wb") as file:
file.write(html)
else:
url = 'You submitted nothing!'
return HttpResponse(url)

My problem had to do with storing the files on the desktop. I stored the files in the DJango workspace folder, changed the paths, and it worked for me.

Related

Web Scraping FileNotFoundError When Saving to PDF

I copy some Python code in order to download data from a website. Here is my specific website:
https://www.codot.gov/business/bidding/bid-tab-archives/bid-tabs-2017-1
Here is the code which I copied:
import requests
from bs4 import BeautifulSoup
def _getUrls_(res):
hrefs = []
soup = BeautifulSoup(res.text, 'lxml')
main_content = soup.find('div',{'id' : 'content-core'})
table = main_content.find("table")
for a in table.findAll('a', href=True):
hrefs.append(a['href'])
return(hrefs)
bidurl = 'https://www.codot.gov/business/bidding/bid-tab-archives/bid-tabs-2017-1'
r = requests.get(bidurl)
hrefs = _getUrls_(r)
def _getPdfs_(hrefs, basedir):
for i in range(len(hrefs)):
print(hrefs[i])
respdf = requests.get(hrefs[i])
pdffile = basedir + "/pdf_dot/" + hrefs[i].split("/")[-1] + ".pdf"
try:
with open(pdffile, 'wb') as p:
p.write(respdf.content)
p.close()
except FileNotFoundError:
print("No PDF produced")
basedir= "/Users/ABC/Desktop"
_getPdfs_(hrefs, basedir)
The code runs successfully, but it did not download anything at all, even though there is no Filenotfounderror obviously.
I tried the following two URLs:
https://www.codot.gov/business/bidding/bid-tab-archives/bid-tabs-2017/aqc-088a-035-20360
https://www.codot.gov/business/bidding/bid-tab-archives/bid-tabs-2017/aqc-r100-258-21125
However both of these URLs return >>> No PDF produced.
The thing is that the code worked and downloaded successfully for other people, but not me.
Your code works I just tested. You need to make sure the basedir exists, you want to add this to your code:
if not os.path.exists(basedir):
os.makedirs(basedir)
I used this exact (indented) code but replaced the basedir with my own dir and it worked only after I made sure that the path actually exists. This code does not create the folder in case it does not exist.
As others have pointed out, you need to create basedir beforehand. The user running the script may not have the directory created. Make sure you insert this code at the beginning of the script, before the main logic.
Additionally, hardcoding the base directory might not be a good idea when transferring the script to different systems. It would be preferable to use the users %USERPROFILE% enviorment variable:
from os import envioron
basedir= join(environ["USERPROFILE"], "Desktop", "pdf_dot")
Which would be the same as C:\Users\blah\Desktop\pdf_dot.
However, the above enivorment variable only works for Windows. If you want it to work Linux, you will have to use os.environ["HOME"] instead.
If you need to transfer between both systems, then you can use os.name:
from os import name
from os import environ
# Windows
if name == 'nt':
basedir= join(environ["USERPROFILE"], "Desktop", "pdf_dot")
# Linux
elif name == 'posix':
basedir = join(environ["HOME"], "Desktop", "pdf_dot")
You don't need to specify the directory or create any folder manually. All you need do is run the following script. When the execution is done, you should get a folder named pdf_dot in your desktop containing the pdf files you wish to grab.
import requests
from bs4 import BeautifulSoup
import os
URL = 'https://www.codot.gov/business/bidding/bid-tab-archives/bid-tabs-2017-1'
dirf = os.environ['USERPROFILE'] + '\Desktop\pdf_dot'
if not os.path.exists(dirf):os.makedirs(dirf)
os.chdir(dirf)
res = requests.get(URL)
soup = BeautifulSoup(res.text, 'lxml')
pdflinks = [itemlink['href'] for itemlink in soup.find_all("a",{"data-linktype":"internal"}) if "reject" not in itemlink['href']]
for pdflink in pdflinks:
filename = f'{pdflink.split("/")[-1]}{".pdf"}'
with open(filename, 'wb') as f:
f.write(requests.get(pdflink).content)

Unable to save downloaded images into a folder on the desktop using python

I have made a scraper which is at this moment parsing image links and saving downloaded images into python directory by default. The only thing i wanna do now is choose a folder on the desktop to save those images within but can't. Here is what I'm up to:
import requests
import os.path
import urllib.request
from lxml import html
def Startpoint():
url = "https://www.aliexpress.com/"
response = requests.get(url)
tree = html.fromstring(response.text)
titles = tree.xpath('//div[#class="item-inner"]')
for title in titles:
Pics="https:" + title.xpath('.//span[#class="pic"]//img/#src')[0]
endpoint(Pics)
def endpoint(images):
sdir = (r'C:\Users\ar\Desktop\mth')
testfile = urllib.request.URLopener()
xx = testfile.retrieve(images, images.split('/')[-1])
filename=os.path.join(sdir,xx)
print(filename)
Startpoint()
Upon execution the above code throws an error showing: "join() argument must be str or bytes, not 'tuple'"
you can download images with urllib of python. You can see the official documentation of python here urllib documentation for python 2.7 . If you want to use python 3 then follow this documentation urllib for python 3
You could use urllib.request, BytesIO from io and PIL Image.
(if you have a direct url to the image)
from PIL import Image
from io import BytesIO
import urllib.request
def download_image(url):
req = urllib.request.Request(url)
response = urllib.request.urlopen(req)
content = response.read()
img = Image.open(BytesIO(content))
img.filename = url
return img
The images are dynamic now. So, I thought to update this post:
import os
from selenium import webdriver
import urllib.request
from lxml.html import fromstring
url = "https://www.aliexpress.com/"
def get_data(link):
driver.get(link)
tree = fromstring(driver.page_source)
for title in tree.xpath('//li[#class="item"]'):
pics = "https:" + title.xpath('.//*[contains(#class,"img-wrapper")]//img/#src')[0]
os.chdir(r"C:\Users\WCS\Desktop\test")
urllib.request.urlretrieve(pics, pics.split('/')[-1])
if __name__ == '__main__':
driver = webdriver.Chrome()
get_data(url)
driver.quit()
This is the code to download the html file from the web
import random
import urllib.request
def download(url):
name = random.randrange(1, 1000)
#this is the random function to give the name to the file
full_name = str(name) + ".html" #compatible data type
urllib.request.urlretrieve(url,full_name) #main function
download("any url")
This is the code for downloading any html file from the internet just you have to provide the link in the function.
As in your case you have told that you have retrieved the images links from the web page So you can change the extension from ".html" to compatible type, but the problem is that the image can be of different extension may be ".jpg" , ".png" etc.
So what you can do is you can match the ending of the link using if else with string matching and then assign the extension in the end.
Here is the example for the illustration
import random
import urllib.request
if(link extension is ".png"): #pseudo code
def download(url):
name = random.randrange(1, 1000)
#this is the random function to give the name to the file
full_name = str(name) + ".png" #compatible extension with .png
urllib.request.urlretrieve(url,full_name) #main function
download("any url")
else if (link extension is ".jpg"): #pseudo code
def download(url):
name = random.randrange(1, 1000)
#this is the random function to give the name to the file
full_name = str(name) + ".jpg" #compatible extension with .jpg
urllib.request.urlretrieve(url,full_name) #main function
download("any url")
You can use multiple if else for the various type of the extension.
If it helps for your situation have a Thumbs up buddy.

How to open a remote file with GDAL in Python through a Flask application

So, I'm developing a Flask application which uses the GDAL library, where I want to stream a .tif file through an url.
Right now I have method that reads a .tif file using gdal.Open(filepath). When run outside of the Flask environment (like in a Python console), it works fine by both specifying the filepath to a local file and a url.
from gdalconst import GA_ReadOnly
import gdal
filename = 'http://xxxxxxx.blob.core.windows.net/dsm/DSM_1km_6349_614.tif'
dataset = gdal.Open(filename, GA_ReadOnly )
if dataset is not None:
print 'Driver: ', dataset.GetDriver().ShortName,'/', \
dataset.GetDriver().LongName
However, when the following code is executed inside the Flask environement, I get the following message:
ERROR 4: `http://xxxxxxx.blob.core.windows.net/dsm/DSM_1km_6349_614.tif' does
not exist in the file system,
and is not recognised as a supported dataset name.
If I instead download the file to the local filesystem of the Flask app, and insert the path to the file, like this:
block_blob_service = get_blobservice() #Initialize block service
block_blob_service.get_blob_to_path('dsm', blobname, filename) # Get blob to local filesystem, path to file saved in filename
dataset = gdal.Open(filename, GA_ReadOnly)
That works just fine...
The thing is, since I'm requesting some big files (200 mb), I want to stream the files using the url instead of the local file reference.
Does anyone have an idea of what could be causing this? I also tried putting "/vsicurl_streaming/" in front of the url as suggested elsewhere.
I'm using Python 2.7, 32-bit with GDAL 2.0.2
Please try the follow code snippet:
from gzip import GzipFile
from io import BytesIO
import urllib2
from uuid import uuid4
from gdalconst import GA_ReadOnly
import gdal
def open_http_query(url):
try:
request = urllib2.Request(url,
headers={"Accept-Encoding": "gzip"})
response = urllib2.urlopen(request, timeout=30)
if response.info().get('Content-Encoding') == 'gzip':
return GzipFile(fileobj=BytesIO(response.read()))
else:
return response
except urllib2.URLError:
return None
url = 'http://xxx.blob.core.windows.net/container/example.tif'
image_data = open_http_query(url)
mmap_name = "/vsimem/"+uuid4().get_hex()
gdal.FileFromMemBuffer(mmap_name, image_data.read())
dataset = gdal.Open(mmap_name)
if dataset is not None:
print 'Driver: ', dataset.GetDriver().ShortName,'/', \
dataset.GetDriver().LongName
Which use a GDAL memory-mapped file to open an image retrieved via HTTP directly as a NumPy array without saving to a temporary file.
Refer to https://gist.github.com/jleinonen/5781308 for more info.

Download a file from Web when file name change every time

I have a task to downoad McAfee virus definition file daily from download site locally. The name of the file changes every day. The path to download site is "http://download.nai.com/products/licensed/superdat/english/intel/7612xdat.exe"
This ID 7612 will change every day for something different hence I can't hard code it. I have to find a way to either list file name before providing it as argument or etc.
On stackoverflow site I found someone's script which will work for me if someone could advice how to handle changing file name.
Here is the script that I'm going to use:
def download(url):
"""Copy the contents of a file from a given URL
to a local file.
"""
import urllib
webFile = urllib.urlopen(url)
localFile = open(url.split('/')[-1], 'w')
localFile.write(webFile.read())
webFile.close()
localFile.close()
if __name__ == '__main__':
import sys
if len(sys.argv) == 2:
try:
download(sys.argv[1])
except IOError:
print 'Filename not found.'
else:
import os
print 'usage: %s http://server.com/path/to/filename' % os.path.basename(sys.argv[0])
Could someone advice me?
Thanks in advance
Its a two-step process. First scrape the index page looking for files. Second, grab the latest and download
import urllib
import lxml.html
import os
import shutil
# index page
pattern_files_url = "http://download.nai.com/products/licensed/superdat/english/intel"
# relative url references based here
pattern_files_base = '/'.join(pattern_files_url.split('/')[:-1])
# scrape the index page for latest file list
doc = lxml.html.parse(pattern_files_url)
pattern_files = [ref for ref in doc.xpath("//a/#href") if ref.endswith('xdat.exe')]
if pattern_files:
pattern_files.sort()
newest = pattern_files[-1]
local_name = newest.split('/')[-1]
# grab it if we don't already have it
if not os.path.exists(local_name):
url = pattern_files_base + '/' + newest
print("downloading %s to %s" % (url, local_name))
remote = urllib.urlopen(url)
print dir(remote)
with open(local_name, 'w') as local:
shutil.copyfileobj(remote, local, length=65536)

"File Does Not Exist" when dynamically creating files for PDF download with Requests in Python 2.7

I'm trying to dynamically download pdf's from a web site. I am sure I'm listing them correctly but I am not sure I'm doing the actual file I/O correctly. I get the following error
File "download.py", line 22, in <module>
with open("'"+url+"'", "wb") as pdf:
IOError: [Errno 2] No such file or directory: "'http://www.lcs.mit.edu/publications/pubs/pdf/MIT-LCS-TR-179.pdf'"
Here is my code:
import requests
import re
from bs4 import BeautifulSoup
origin = requests.get("http://freehaven.net/anonbib")
soup=BeautifulSoup(origin.text)
results = soup.find_all(href=re.compile("(http).*(pdf)"))
for link in results:
url = (link.get('href'))
r = requests.get(url)
with open("'"+url+"'", "wb") as pdf:
try:
pdf.write(r.content)
finally:
pdf.close
If url is set to 'http://www.lcs.mit.edu/publications/pubs/pdf/MIT-LCS-TR-179.pdf', your code fails because it is trying to open a file with that name on your filesystem.
Instead, try something like this:
fileForUrl = '/tmp/' + url.split('/')[-1]
with open(fileForUrl, 'wb') as pdf:
# Rest of the code as before

Categories

Resources