import re
import string
import shutil
import os
import os.path
import time
import datetime
import math
import urllib
from array import array
import random
filehandle = urllib.urlopen('http://www.google.com/') #open webpage
s = filehandle.read() #read
print s #display
#what i plan to do with it once i get the first part working
#results = re.findall('[<td style="font-weight:bold;" nowrap>$][0-9][0-9][0-9][.][0-9][0-9][</td></tr></tfoot></table>]',s)
#earnings = '$ '
#for money in results:
#earnings = earnings + money[1]+money[2]+money[3]+'.'+money[5]+money[6]
#print earnings
#raw_input()
this is the code that i have so far. now i have looked at all the other forums that give solutions such as the name of the script, which is parse_Money.py, and i have tried doing it with urllib.request.urlopen AND i have tried running it on python 2.5, 2.6, and 2.7. If anybody has any suggestions it would be really welcome, thanks everyone!!
--Matt
---EDIT---
I also tried this code and it worked, so im thinking its some kind of syntax error, so if anybody with a sharp eye can point it out, i would be very appreciative.
import shutil
import os
import os.path
import time
import datetime
import math
import urllib
from array import array
import random
b = 3
#find URL
URL = raw_input('Type the URL you would like to read from[Example: http://www.google.com/] :')
while b == 3:
#get file name
file1 = raw_input('Enter a file name for the downloaded code:')
filepath = file1 + '.txt'
if os.path.isfile(filepath):
print 'File already exists'
b = 3
else:
print 'Filename accepted'
b = 4
file_path = filepath
#open file
FileWrite = open(file_path, 'a')
#acces URL
filehandle = urllib.urlopen(URL)
#display souce code
for lines in filehandle.readlines():
FileWrite.write(lines)
print lines
print 'The above has been saved in both a text and html file'
#close files
filehandle.close()
FileWrite.close()
it appears that the urlopen method is available in the urllib.request module and not in the urllib module as you're expecting.
rule of thumb - if you're getting an AttributeError, that field/operation is not present in the particular module.
EDIT - Thanks to AndiDog for pointing out - this is a solution valid for Py 3.x, and not applicable to Py2.x!
The urlopen function is actually in the urllib2 module. Try import urllib2 and use urllib2.urlopen
I see that you are using Python2 or at least intend to use Python2.
urlopen helper function is available in both urllib and urllib2 in Python2.
What you need to do this, execute this script against the correct version of your python
C:\Python26\python.exe yourscript.py
Related
I was trying to make a script that gets a .txt from a websites, pastes the code into a python executable temp file but its not working. Here is the code:
from urllib.request import urlopen as urlopen
import os
import subprocess
import os
import tempfile
filename = urlopen("https://randomsiteeeee.000webhostapp.com/script.txt")
temp = open(filename)
temp.close()
# Clean up the temporary file yourself
os.remove(filename)
temp = tempfile.TemporaryFile()
temp.close()
If you know a fix to this please let me know. The error is :
File "test.py", line 9, in <module>
temp = open(filename)
TypeError: expected str, bytes or os.PathLike object, not HTTPResponse
I tried everything such as a request to the url and pasting it but didnt work as well. I tried the code that i pasted here and didnt work as well.
And as i said, i was expecting it getting the code from the .txt from the website, and making it a temp executable python script
you are missing a read:
from urllib.request import urlopen as urlopen
import os
import subprocess
import os
import tempfile
filename = urlopen("https://randomsiteeeee.000webhostapp.com/script.txt").read() # <-- here
temp = open(filename)
temp.close()
# Clean up the temporary file yourself
os.remove(filename)
temp = tempfile.TemporaryFile()
temp.close()
But if the script.txt contains the script and not the filename, you need to create a temporary file and write the content:
from urllib.request import urlopen as urlopen
import os
import subprocess
import os
import tempfile
content = urlopen("https://randomsiteeeee.000webhostapp.com/script.txt").read() #
with tempfile.TemporaryFile() as fp:
name = fp.name
fp.write(content)
If you want to execute the code you fetch from the url, you may also use exec or eval instead of writing a new script file.
eval and exec are EVIL, they should only be used if you 100% trust the input and there is no other way!
EDIT: How do i use exec?
Using exec, you could do something like this (also, I use requests instead of urllib here. If you prefer urllib, you can do this too):
import requests
exec(requests.get("https://randomsiteeeee.000webhostapp.com/script.txt").text)
Your trying to open a file that is named "the content of a website".
filename = "path/to/my/output/file.txt"
httpresponse = urlopen("https://randomsiteeeee.000webhostapp.com/script.txt").read()
temp = open(filename)
temp.write(httpresponse)
temp.close()
Is probably more like what you are intending
I am trying to scrape data from ESPN Cricinfo using a python script available on Github. The code is the following.
import urllib.request as ur
import csv
import sys
import time
import os
import unicodedata
from urllib.parse import urlparse
from bs4 import BeautifulSoup
BASE_URL = 'http://www.espncricinfo.com'
for i in range(0, 6019):
url = 'http://search.espncricinfo.com/ci/content/match/search.html?search=first%20class;all=1;page='
soupy = BeautifulSoup(ur.urlopen(url + str(i)).read())
time.sleep(1)
for new_host in soupy.findAll('a', {'class' : 'srchPlyrNmTxt'}):
try:
new_host = new_host['href']
except:
continue
odiurl = BASE_URL + urlparse(new_host).geturl()
new_host = unicodedata.normalize('NFKD', new_host).encode('ascii','ignore')
print (new_host)
print (str.split(new_host, "/"))[4]
html = urllib2.urlopen(odiurl).read()
if html:
with open('espncricinfo-fc/{0!s}'.format(str.split(new_host, "/")[4]), "wb") as f:
f.write(html)
And the error is in this line.
print (str.split(new_host, "/"))[4]
TypeError: descriptor 'split' requires a 'str' object but received a 'bytes'
Anyhelp from you would be apperciated. Thanks
Use
str.split(new_host.decode("utf-8"), "/")[4]
.decode("utf-8") obviously being the most important part. That turns your byte object to a string.
On another note, be aware that urllib2 (which you're using but not importing, by the way) is no longer used (see this). Instead, you could use from urllib.request import urlopen.
EDIT: This is the full code that won't give you the error you described in your question. I am highlighting that because without the file previously created, the with open(...) statement will give you a FileNotFoundError.
import urllib.request as ur
import csv
import sys
import time
import os
import unicodedata
from urllib.parse import urlparse
from bs4 import BeautifulSoup
from urllib.request import urlopen
BASE_URL = 'http://www.espncricinfo.com'
for i in range(0, 6019):
url = 'http://search.espncricinfo.com/ci/content/match/search.html?search=first%20class;all=1;page='
soupy = BeautifulSoup(ur.urlopen(url + str(i)).read())
time.sleep(1)
for new_host in soupy.findAll('a', {'class' : 'srchPlyrNmTxt'}):
try:
new_host = new_host['href']
except:
continue
odiurl = BASE_URL + urlparse(new_host).geturl()
new_host = unicodedata.normalize('NFKD', new_host).encode('ascii','ignore')
print(new_host)
print(str.split(new_host.decode("utf-8"), "/")[4])
html = urlopen(odiurl).read()
if html:
with open('espncricinfo-fc/{0!s}'.format(str.split(new_host.decode("utf-8"), "/")[4]), "wb") as f:
f.write(html)
I would like to do the following in Python 3: Read in a FortranFile, but from an URL rather than a local file. The reason is that for my concrete example there are a lot of files and I want to avoid having to download them all first.
I have managed to
a) read in a simple .txt file from an URL
import urllib
from urllib.request import urlopen
url='http://www.deus-consortium.org/deus-library/filelist/deus_file_list_501.txt'
data=urllib.request.urlopen(url)
i=0
for line in data: # files are iterable
print(i,line)
i+=1
#alternative: data.read()
b) read in a local FortranFile (binary little endian unformated Fortran file) like this:
The file is from: http://www.deus-consortium.org/deus-library/efiler1/Babel_le/boxlen648_n2048_lcdmw7/post/fof/output_00090/fof_boxlen648_n2048_lcdmw7_masst_00000
from scipy.io import FortranFile
filename='../../Downloads/fof_boxlen648_n2048_rpcdmw7_masst_00000'
ff = FortranFile(filename, 'r')
nhalos=ff.read_ints(dtype=np.int32)[0]
print('number of halos in file',nhalos)
Is there any way to avoid downloading and reading FortranFiles directly from the URL? I tried
import urllib
from urllib.request import urlopen
url='http://www.deus-consortium.org/deus-library/efiler1/Babel_le/boxlen648_n2048_lcdmw7/cube_00090/fof_boxlen648_n2048_lcdmw7_cube_00000'
pathname = urllib.request.urlopen(url)
ff = FortranFile(pathname, 'r')
ff.read_ints()
gives "OSError: obtaining file position failed". pathname.read() doesn't work either because it's a fortran file.
Any ideas? Thanks in advance!
Maybe you can use tempfile module to download and read the data?
For example:
import urllib
import tempfile
from scipy.io import FortranFile
from urllib.request import urlopen
url='http://www.deus-consortium.org/deus-library/efiler1/Babel_le/boxlen648_n2048_lcdmw7/cube_00090/fof_boxlen648_n2048_lcdmw7_cube_00000'
with tempfile.TemporaryFile() as fp:
fp.write(urllib.request.urlopen(url).read())
fp.seek(0)
ff = FortranFile(fp, 'r')
info = ff.read_ints()
print(info)
Prints:
[12808737]
Would like to create a function that pulls a sound from given url and saves it in my machine locally
use urllib module
import urllib
urllib.urlretrieve(url,sound_clip_name)
the file will be save as what you provide the name
alternative, using urllib2
import urllib2
file = urllib2.urlopen(url).read()
f = open('sound_clip','w')
f.write(file)
f.close()
don't forget to give the extension of your file
If in Python 2.7, urllib2 module is your friend, or urllib.request in Python3.
Example in 2.7 :
import urllib2
f = urllib2.urlopen('http://www.python.org/')
with open(filename, w) as fd:
fd.write(f.read)
I have grabbed a pdf from the web using for example
import requests
pdf = requests.get("http://www.scala-lang.org/docu/files/ScalaByExample.pdf")
I would like to modify this code to display it
from gi.repository import Poppler, Gtk
def draw(widget, surface):
page.render(surface)
document = Poppler.Document.new_from_file("file:///home/me/some.pdf", None)
page = document.get_page(0)
window = Gtk.Window(title="Hello World")
window.connect("delete-event", Gtk.main_quit)
window.connect("draw", draw)
window.set_app_paintable(True)
window.show_all()
Gtk.main()
How do I modify the document = line to use the variable pdf that contains the pdf?
(I don't mind using popplerqt4 or anything else if that makes it easier.)
It all depends on the OS your using. These might usually help:
import os
os.system('my_pdf.pdf')
or
os.startfile('path_to_pdf.pdf')
or
import webbrowser
webbrowser.open(r'file:///my_pdf.pdf')
How about using a temporary file?
import tempfile
import urllib
import urlparse
import requests
from gi.repository import Poppler, Gtk
pdf = requests.get("http://www.scala-lang.org/docu/files/ScalaByExample.pdf")
with tempfile.NamedTemporaryFile() as pdf_contents:
pdf_contents.file.write(pdf)
file_url = urlparse.urljoin(
'file:', urllib.pathname2url(pdf_contents.name))
document = Poppler.Document.new_from_file(file_url, None)
Try this and tell me if it works:
document = Poppler.Document.new_from_data(str(pdf.content),len(repr(pdf.content)),None)
If you want to open pdf using acrobat reader then below code should work
import subprocess
process = subprocess.Popen(['<here path to acrobat.exe>', '/A', 'page=1', '<here path to pdf>'], shell=False, stdout=subprocess.PIPE)
process.wait()
Since there is a library named pyPdf, you should be able to load PDF file using that.
If you have any further questions, send me messege.
August 2015 : On a fresh intallation in Windows 7, the problem is still the same :
Poppler.Document.new_from_data(data, len(data), None)
returns : Type error: must be strings not bytes.
Poppler.Document.new_from_data(str(data), len(data), None)
returns : PDF document is damaged (4).
I have been unable to use this function.
I tried to use a NamedTemporayFile instead of a file on disk, but for un unknown reason, it returns an unknown error.
So I am using a temporary file. Not the prettiest way, but it works.
Here is the test code for Python 3.4, if anyone has an idea :
from gi.repository import Poppler
import tempfile, urllib
from urllib.parse import urlparse
from urllib.request import urljoin
testfile = "d:/Mes Documents/en cours/PdfBooklet3/tempfiles/preview.pdf"
document = Poppler.Document.new_from_file("file:///" + testfile, None) # Works fine
page = document.get_page(0)
print(page) # OK
f1 = open(testfile, "rb")
data1 = f1.read()
f1.close()
data2 = "".join(map(chr, data1)) # converts bytes to string
print(len(data1))
document = Poppler.Document.new_from_data(data2, len(data2), None)
page = document.get_page(0) # returns None
print(page)
pdftempfile = tempfile.NamedTemporaryFile()
pdftempfile.write(data1)
file_url = urllib.parse.urljoin('file:', urllib.request.pathname2url(pdftempfile.name))
print( file_url)
pdftempfile.seek(0)
document = Poppler.Document.new_from_file(file_url, None) # unknown error