How to create in-memory file object - python

I want to make a in-memory file to use in pygame mixer. I mean something like http://www.pygame.org/docs/ref/music.html#pygame.mixer.music.load which says load() method supports file object.
import requests
from pygame import mixer
r = requests.get("http://example.com/some_small_file.mp3")
in_memory_file = file(r.content) # something like this
mixer.music.init()
mixer.music.load(in_memory_file)
mixer.music.play()

You are probably looking for BytesIO or StringIO classes from Python io package, both available in python 2 and python 3. They provide a file-like interface you can use in your code the exact same way you interact with a real file.
StringIO is used to store textual data:
import io
f = io.StringIO("some initial text data")
BytesIO must be used for binary data:
import io
f = io.BytesIO(b"\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01")
To store MP3 file data, you will probably need the BytesIO class. To initialize it from a GET request to a server, proceed like this:
import requests
from pygame import mixer
import io
r = requests.get("http://example.com/somesmallmp3file.mp3")
inmemoryfile = io.BytesIO(r.content)
mixer.music.init()
mixer.music.load(inmemoryfile)
mixer.music.play()
# This will free the memmory from any data
inmemoryfile.close()
Additional note: as both classes inherit from IOBase, they can be used as context manager with the with statement, so you don't need to manually call the close() method anymore:
import requests
from pygame import mixer
import io
r = requests.get("http://example.com/somesmallmp3file.mp3")
with io.BytesIO(r.content) as inmemoryfile:
mixer.music.init()
mixer.music.load(inmemoryfile)
mixer.music.play()

Related

Save Visio Document as HTML

I'm trying to convert a lot of Visio files from .vsd to .html, but each file has a lot of pages, so I need to convert all pages to a single .html file.
Using the Python code below, I'm able to convert to PDF, but what I really need is HTML. I noticed I can use win32com.client.Dispatch("SaveAsWeb.VisSaveAsWeb"), but how to use it? Any ideas?
import sys
import win32com.client
from os.path import abspath
f = abspath(sys.argv[1])
visio = win32com.client.Dispatch("Visio.InvisibleApp")
doc = visio.Documents.Open(f)
doc.ExportAsFixedFormat(1, '{}.pdf'.format(f), 0, 0)
visio.Quit()
exit(0)
Visio cannot do that. You cannot "convert all pages into a single HTML file". You'll have a "root" file and a folder of "supporting" files.
VisSaveAsWeb is pretty well documented, no need to guess:
https://msdn.microsoft.com/en-us/vba/visio-vba/articles/vissaveasweb-object-visio-save-as-web
-- update
With python, it turned out to be not that trivial to deal with SaveAsWeb. It seems to default to a custom interface (non-dispatch). I don't think it's possible deal with this using win32com library, but with comtypes seems to work (comtypes library is building the client based on the type library, i.e. it also supports "custom" interfaces):
import sys
import comtypes
from comtypes import client
from os.path import abspath
f = abspath(sys.argv[1])
visio = comtypes.client.CreateObject("Visio.InvisibleApp")
doc = visio.Documents.Open(f)
comtypes.client.GetModule("{}\\SAVASWEB.DLL".format(visio.Path))
saveAsWeb = visio.SaveAsWebObject.QueryInterface(comtypes.gen.VisSAW.IVisSaveAsWeb)
webPageSettings = saveAsWeb.WebPageSettings.QueryInterface(comtypes.gen.VisSAW.IVisWebPageSettings)
webPageSettings.TargetPath = "{}.html".format(f)
webPageSettings.QuietMode = True
saveAsWeb.AttachToVisioDoc(doc)
saveAsWeb.CreatePages()
visio.Quit()
exit(0)
Other than that, you can try "command line" interface:
http://visualsignals.typepad.co.uk/vislog/2010/03/automating-visios-save-as-web-output.html
import sys
import win32com.client
from os.path import abspath
f = abspath(sys.argv[1])
visio = win32com.client.Dispatch("Visio.InvisibleApp")
doc = visio.Documents.Open(f)
visio.Addons("SaveAsWeb").Run("/quiet=True /target={}.htm".format(f))
visio.Quit()
exit(0)
Other than that you could give a try to my visio svg-export :)

how to write .npy file to s3 directly?

I would like to know if there is any way to write an array as a numpy file(.npy) to an AWS S3 bucket directly. I can use np.save to save a file locally as shown below. But I am looking for a solution to write it directly to S3, without saving locally first.
a = np.array([1, 2, 3, 4])
np.save('/my/localfolder/test1.npy', a)
If you want to bypass your local disk and upload directly the data to the cloud, you may want to use pickle instead of using a .npy file:
import boto3
import io
import pickle
s3_client = boto3.client('s3')
my_array = numpy.random.randn(10)
# upload without using disk
my_array_data = io.BytesIO()
pickle.dump(my_array, my_array_data)
my_array_data.seek(0)
s3_client.upload_fileobj(my_array_data, 'your-bucket', 'your-file.pkl')
# download without using disk
my_array_data2 = io.BytesIO()
s3_client.download_fileobj('your-bucket', 'your-file.pkl', my_array_data2)
my_array_data2.seek(0)
my_array2 = pickle.load(my_array_data2)
# check that everything is correct
numpy.allclose(my_array, my_array2)
Documentation:
boto3
pickle
BytesIO
I've recently had issues with s3fs dependency conflicts with boto3, so I try to avoid using it. This solution only depends on boto3, does not write to disk, and does not explicitly use pickle.
Saving:
from io import BytesIO
import numpy as np
from urllib.parse import urlparse
import boto3
client = boto3.client("s3")
def to_s3_npy(data: np.array, s3_uri: str):
# s3_uri looks like f"s3://{BUCKET_NAME}/{KEY}"
bytes_ = BytesIO()
np.save(bytes_, data, allow_pickle=True)
bytes_.seek(0)
parsed_s3 = urlparse(s3_uri)
client.upload_fileobj(
Fileobj=bytes_, Bucket=parsed_s3.netloc, Key=parsed_s3.path[1:]
)
return True
Loading:
def from_s3_npy(s3_uri: str):
bytes_ = BytesIO()
parsed_s3 = urlparse(s3_uri)
client.download_fileobj(
Fileobj=bytes_, Bucket=parsed_s3.netloc, Key=parsed_s3.path[1:]
)
bytes_.seek(0)
return np.load(bytes_, allow_pickle=True)
You can also use s3fs which is a file system interface to s3, a wrapper around boto. This solution also uses pickle, so make sure to allow_pickle=True at np.load. Refer functions below to both write and read.
import numpy as np
import pickle
from s3fs.core import S3FileSystem
s3 = S3FileSystem()
def saveLabelsToS3(npyArray, name):
with s3.open('{}/{}'.format(bucket, name), 'wb') as f:
f.write(pickle.dumps(npyArray))
def readLabelsFromS3(name):
return np.load(s3.open('{}/{}'.format(bucket, name)), allow_pickle=True)
# Use as below
saveLabelsToS3(labels, 'folder/filename.pkl')
labels = readLabelsFromS3('folder/filename.pkl')

Python - Download File Using Requests, Directly to Memory

The goal is to download a file from the internet, and create from it a file object, or a file like object without ever having it touch the hard drive. This is just for my knowledge, wanting to know if its possible or practical, particularly because I would like to see if I can circumvent having to code a file deletion line.
This is how I would normally download something from the web, and map it to memory:
import requests
import mmap
u = requests.get("http://www.pythonchallenge.com/pc/def/channel.zip")
with open("channel.zip", "wb") as f: # I want to eliminate this, as this writes to disk
f.write(u.content)
with open("channel.zip", "r+b") as f: # and his as well, because it reads from disk
mm = mmap.mmap(f.fileno(), 0)
mm.seek(0)
print mm.readline()
mm.close() # question: if I do not include this, does this become a memory leak?
r.raw (HTTPResponse) is already a file-like object (just pass stream=True):
#!/usr/bin/env python
import sys
import requests # $ pip install requests
from PIL import Image # $ pip install pillow
url = sys.argv[1]
r = requests.get(url, stream=True)
r.raw.decode_content = True # Content-Encoding
im = Image.open(r.raw) #NOTE: it requires pillow 2.8+
print(im.format, im.mode, im.size)
In general if you have a bytestring; you could wrap it as f = io.BytesIO(r.content), to get a file-like object without touching the disk:
#!/usr/bin/env python
import io
import zipfile
from contextlib import closing
import requests # $ pip install requests
r = requests.get("http://www.pythonchallenge.com/pc/def/channel.zip")
with closing(r), zipfile.ZipFile(io.BytesIO(r.content)) as archive:
print({member.filename: archive.read(member) for member in archive.infolist()})
You can't pass r.raw to ZipFile() directly because the former is a non-seekable file.
I would like to see if I can circumvent having to code a file deletion line
tempfile can delete files automatically f = tempfile.SpooledTemporaryFile(); f.write(u.content). Until .fileno() method is called (if some api requires a real file) or maxsize is reached; the data is kept in memory. Even if the data is written on disk; the file is deleted as soon as it closed.
Your answer is u.content. The content is in the memory. Unless you write it to a file, it won’t be stored on disk.
This is what I ended up doing.
import zipfile
import requests
import StringIO
u = requests.get("http://www.pythonchallenge.com/pc/def/channel.zip")
f = StringIO.StringIO()
f.write(u.content)
def extract_zip(input_zip):
input_zip = zipfile.ZipFile(input_zip)
return {i: input_zip.read(i) for i in input_zip.namelist()}
extracted = extract_zip(f)

How do I read image data from a URL?

What I'm trying to do is fairly simple when we're dealing with a local file, but the problem comes when I try to do this with a remote URL.
Basically, I'm trying to create a PIL image object from a file pulled from a URL. Sure, I could always just fetch the URL and store it in a temp file, then open it into an image object, but that feels very inefficient.
Here's what I have:
Image.open(urlopen(url))
It flakes out complaining that seek() isn't available, so then I tried this:
Image.open(urlopen(url).read())
But that didn't work either. Is there a Better Way to do this, or is writing to a temporary file the accepted way of doing this sort of thing?
In Python3 the StringIO and cStringIO modules are gone.
In Python3 you should use:
from PIL import Image
import requests
from io import BytesIO
response = requests.get(url)
img = Image.open(BytesIO(response.content))
Using a StringIO
import urllib, cStringIO
file = cStringIO.StringIO(urllib.urlopen(URL).read())
img = Image.open(file)
The following works for Python 3:
from PIL import Image
import requests
im = Image.open(requests.get(url, stream=True).raw)
References:
https://github.com/python-pillow/Pillow/pull/1151
https://github.com/python-pillow/Pillow/blob/master/CHANGES.rst#280-2015-04-01
Using requests:
from PIL import Image
import requests
from StringIO import StringIO
response = requests.get(url)
img = Image.open(StringIO(response.content))
Python 3
from urllib.request import urlopen
from PIL import Image
img = Image.open(urlopen(url))
img
Jupyter Notebook and IPython
import IPython
url = 'https://newevolutiondesigns.com/images/freebies/colorful-background-14.jpg'
IPython.display.Image(url, width = 250)
Unlike other methods, this method also works in a for loop!
Use StringIO to turn the read string into a file-like object:
from StringIO import StringIO
from PIL import Image
import urllib
Image.open(StringIO(urllib.request.urlopen(url).read()))
For those doing some sklearn/numpy post processing (i.e. Deep learning) you can wrap the PIL object with np.array(). This might save you from having to Google it like I did:
from PIL import Image
import requests
import numpy as np
from StringIO import StringIO
response = requests.get(url)
img = np.array(Image.open(StringIO(response.content)))
The arguably recommended way to do image input/output these days is to use the dedicated package ImageIO. Image data can be read directly from a URL with one simple line of code:
from imageio import imread
image = imread('https://cdn.sstatic.net/Sites/stackoverflow/img/logo.png')
Many answers on this page predate the release of that package and therefore do not mention it. ImageIO started out as component of the Scikit-Image toolkit. It supports a number of scientific formats on top of the ones provided by the popular image-processing library PILlow. It wraps it all in a clean API solely focused on image input/output. In fact, SciPy removed its own image reader/writer in favor of ImageIO.
select the image in chrome, right click on it, click on Copy image address, paste it into a str variable (my_url) to read the image:
import shutil
import requests
my_url = 'https://www.washingtonian.com/wp-content/uploads/2017/06/6-30-17-goat-yoga-congressional-cemetery-1-994x559.jpg'
response = requests.get(my_url, stream=True)
with open('my_image.png', 'wb') as file:
shutil.copyfileobj(response.raw, file)
del response
open it;
from PIL import Image
img = Image.open('my_image.png')
img.show()
Manually wrapping in BytesIO is no longer needed since PIL >= 2.8.0. Just use Image.open(response.raw)
Adding on top of Vinícius's comment:
You should pass stream=True as noted https://requests.readthedocs.io/en/master/user/quickstart/#raw-response-content
So
img = Image.open(requests.get(url, stream=True).raw)
USE urllib.request.urlretrieve() AND PIL.Image.open() TO DOWNLOAD AND READ IMAGE DATA :
import requests
import urllib.request
import PIL
urllib.request.urlretrieve("https://i.imgur.com/ExdKOOz.png", "sample.png")
img = PIL.Image.open("sample.png")
img.show()
or Call requests.get(url) with url as the address of the object file to download via a GET request. Call io.BytesIO(obj) with obj as the content of the response to load the raw data as a bytes object. To load the image data, call PIL.Image.open(bytes_obj) with bytes_obj as the bytes object:
import io
response = requests.get("https://i.imgur.com/ExdKOOz.png")
image_bytes = io.BytesIO(response.content)
img = PIL.Image.open(image_bytes)
img.show()
from PIL import Image
import cv2
import numpy as np
import requests
image=Image.open(requests.get("https://previews.123rf.com/images/darrenwhi/darrenwhi1310/darrenwhi131000024/24022179-photo-of-many-cars-with-one-a-different-color.jpg", stream=True).raw)
#image =resize((420,250))
image_array=np.array(image)
image
To directly get image as numpy array without using PIL
import requests, io
import matplotlib.pyplot as plt
response = requests.get(url).content
img = plt.imread(io.BytesIO(response), format='JPG')
plt.imshow(img)

Downloading and unzipping a .zip file without writing to disk

I have managed to get my first python script to work which downloads a list of .ZIP files from a URL and then proceeds to extract the ZIP files and writes them to disk.
I am now at a loss to achieve the next step.
My primary goal is to download and extract the zip file and pass the contents (CSV data) via a TCP stream. I would prefer not to actually write any of the zip or extracted files to disk if I could get away with it.
Here is my current script which works but unfortunately has to write the files to disk.
import urllib, urllister
import zipfile
import urllib2
import os
import time
import pickle
# check for extraction directories existence
if not os.path.isdir('downloaded'):
os.makedirs('downloaded')
if not os.path.isdir('extracted'):
os.makedirs('extracted')
# open logfile for downloaded data and save to local variable
if os.path.isfile('downloaded.pickle'):
downloadedLog = pickle.load(open('downloaded.pickle'))
else:
downloadedLog = {'key':'value'}
# remove entries older than 5 days (to maintain speed)
# path of zip files
zipFileURL = "http://www.thewebserver.com/that/contains/a/directory/of/zip/files"
# retrieve list of URLs from the webservers
usock = urllib.urlopen(zipFileURL)
parser = urllister.URLLister()
parser.feed(usock.read())
usock.close()
parser.close()
# only parse urls
for url in parser.urls:
if "PUBLIC_P5MIN" in url:
# download the file
downloadURL = zipFileURL + url
outputFilename = "downloaded/" + url
# check if file already exists on disk
if url in downloadedLog or os.path.isfile(outputFilename):
print "Skipping " + downloadURL
continue
print "Downloading ",downloadURL
response = urllib2.urlopen(downloadURL)
zippedData = response.read()
# save data to disk
print "Saving to ",outputFilename
output = open(outputFilename,'wb')
output.write(zippedData)
output.close()
# extract the data
zfobj = zipfile.ZipFile(outputFilename)
for name in zfobj.namelist():
uncompressed = zfobj.read(name)
# save uncompressed data to disk
outputFilename = "extracted/" + name
print "Saving extracted file to ",outputFilename
output = open(outputFilename,'wb')
output.write(uncompressed)
output.close()
# send data via tcp stream
# file successfully downloaded and extracted store into local log and filesystem log
downloadedLog[url] = time.time();
pickle.dump(downloadedLog, open('downloaded.pickle', "wb" ))
Below is a code snippet I used to fetch zipped csv file, please have a look:
Python 2:
from StringIO import StringIO
from zipfile import ZipFile
from urllib import urlopen
resp = urlopen("http://www.test.com/file.zip")
myzip = ZipFile(StringIO(resp.read()))
for line in myzip.open(file).readlines():
print line
Python 3:
from io import BytesIO
from zipfile import ZipFile
from urllib.request import urlopen
# or: requests.get(url).content
resp = urlopen("http://www.test.com/file.zip")
myzip = ZipFile(BytesIO(resp.read()))
for line in myzip.open(file).readlines():
print(line.decode('utf-8'))
Here file is a string. To get the actual string that you want to pass, you can use zipfile.namelist(). For instance,
resp = urlopen('http://mlg.ucd.ie/files/datasets/bbc.zip')
myzip = ZipFile(BytesIO(resp.read()))
myzip.namelist()
# ['bbc.classes', 'bbc.docs', 'bbc.mtx', 'bbc.terms']
My suggestion would be to use a StringIO object. They emulate files, but reside in memory. So you could do something like this:
# get_zip_data() gets a zip archive containing 'foo.txt', reading 'hey, foo'
import zipfile
from StringIO import StringIO
zipdata = StringIO()
zipdata.write(get_zip_data())
myzipfile = zipfile.ZipFile(zipdata)
foofile = myzipfile.open('foo.txt')
print foofile.read()
# output: "hey, foo"
Or more simply (apologies to Vishal):
myzipfile = zipfile.ZipFile(StringIO(get_zip_data()))
for name in myzipfile.namelist():
[ ... ]
In Python 3 use BytesIO instead of StringIO:
import zipfile
from io import BytesIO
filebytes = BytesIO(get_zip_data())
myzipfile = zipfile.ZipFile(filebytes)
for name in myzipfile.namelist():
[ ... ]
I'd like to offer an updated Python 3 version of Vishal's excellent answer, which was using Python 2, along with some explanation of the adaptations / changes, which may have been already mentioned.
from io import BytesIO
from zipfile import ZipFile
import urllib.request
url = urllib.request.urlopen("http://www.unece.org/fileadmin/DAM/cefact/locode/loc162txt.zip")
with ZipFile(BytesIO(url.read())) as my_zip_file:
for contained_file in my_zip_file.namelist():
# with open(("unzipped_and_read_" + contained_file + ".file"), "wb") as output:
for line in my_zip_file.open(contained_file).readlines():
print(line)
# output.write(line)
Necessary changes:
There's no StringIO module in Python 3 (it's been moved to io.StringIO). Instead, I use io.BytesIO]2, because we will be handling a bytestream -- Docs, also this thread.
urlopen:
"The legacy urllib.urlopen function from Python 2.6 and earlier has been discontinued; urllib.request.urlopen() corresponds to the old urllib2.urlopen.", Docs and this thread.
Note:
In Python 3, the printed output lines will look like so: b'some text'. This is expected, as they aren't strings - remember, we're reading a bytestream. Have a look at Dan04's excellent answer.
A few minor changes I made:
I use with ... as instead of zipfile = ... according to the Docs.
The script now uses .namelist() to cycle through all the files in the zip and print their contents.
I moved the creation of the ZipFile object into the with statement, although I'm not sure if that's better.
I added (and commented out) an option to write the bytestream to file (per file in the zip), in response to NumenorForLife's comment; it adds "unzipped_and_read_" to the beginning of the filename and a ".file" extension (I prefer not to use ".txt" for files with bytestrings). The indenting of the code will, of course, need to be adjusted if you want to use it.
Need to be careful here -- because we have a byte string, we use binary mode, so "wb"; I have a feeling that writing binary opens a can of worms anyway...
I am using an example file, the UN/LOCODE text archive:
What I didn't do:
NumenorForLife asked about saving the zip to disk. I'm not sure what he meant by it -- downloading the zip file? That's a different task; see Oleh Prypin's excellent answer.
Here's a way:
import urllib.request
import shutil
with urllib.request.urlopen("http://www.unece.org/fileadmin/DAM/cefact/locode/2015-2_UNLOCODE_SecretariatNotes.pdf") as response, open("downloaded_file.pdf", 'w') as out_file:
shutil.copyfileobj(response, out_file)
I'd like to add my Python3 answer for completeness:
from io import BytesIO
from zipfile import ZipFile
import requests
def get_zip(file_url):
url = requests.get(file_url)
zipfile = ZipFile(BytesIO(url.content))
files = [zipfile.open(file_name) for file_name in zipfile.namelist()]
return files.pop() if len(files) == 1 else files
write to a temporary file which resides in RAM
it turns out the tempfile module ( http://docs.python.org/library/tempfile.html ) has just the thing:
tempfile.SpooledTemporaryFile([max_size=0[,
mode='w+b'[, bufsize=-1[, suffix=''[,
prefix='tmp'[, dir=None]]]]]])
This
function operates exactly as
TemporaryFile() does, except that data
is spooled in memory until the file
size exceeds max_size, or until the
file’s fileno() method is called, at
which point the contents are written
to disk and operation proceeds as with
TemporaryFile().
The resulting file has one additional
method, rollover(), which causes the
file to roll over to an on-disk file
regardless of its size.
The returned object is a file-like
object whose _file attribute is either
a StringIO object or a true file
object, depending on whether
rollover() has been called. This
file-like object can be used in a with
statement, just like a normal file.
New in version 2.6.
or if you're lazy and you have a tmpfs-mounted /tmp on Linux, you can just make a file there, but you have to delete it yourself and deal with naming
Adding on to the other answers using requests:
# download from web
import requests
url = 'http://mlg.ucd.ie/files/datasets/bbc.zip'
content = requests.get(url)
# unzip the content
from io import BytesIO
from zipfile import ZipFile
f = ZipFile(BytesIO(content.content))
print(f.namelist())
# outputs ['bbc.classes', 'bbc.docs', 'bbc.mtx', 'bbc.terms']
Use help(f) to get more functions details for e.g. extractall() which extracts the contents in zip file which later can be used with with open.
All of these answers appear too bulky and long. Use requests to shorten the code, e.g.:
import requests, zipfile, io
r = requests.get(zip_file_url)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall("/path/to/directory")
Vishal's example, however great, confuses when it comes to the file name, and I do not see the merit of redefing 'zipfile'.
Here is my example that downloads a zip that contains some files, one of which is a csv file that I subsequently read into a pandas DataFrame:
from StringIO import StringIO
from zipfile import ZipFile
from urllib import urlopen
import pandas
url = urlopen("https://www.federalreserve.gov/apps/mdrm/pdf/MDRM.zip")
zf = ZipFile(StringIO(url.read()))
for item in zf.namelist():
print("File in zip: "+ item)
# find the first matching csv file in the zip:
match = [s for s in zf.namelist() if ".csv" in s][0]
# the first line of the file contains a string - that line shall de ignored, hence skiprows
df = pandas.read_csv(zf.open(match), low_memory=False, skiprows=[0])
(Note, I use Python 2.7.13)
This is the exact solution that worked for me. I just tweaked it a little bit for Python 3 version by removing StringIO and adding IO library
Python 3 Version
from io import BytesIO
from zipfile import ZipFile
import pandas
import requests
url = "https://www.nseindia.com/content/indices/mcwb_jun19.zip"
content = requests.get(url)
zf = ZipFile(BytesIO(content.content))
for item in zf.namelist():
print("File in zip: "+ item)
# find the first matching csv file in the zip:
match = [s for s in zf.namelist() if ".csv" in s][0]
# the first line of the file contains a string - that line shall de ignored, hence skiprows
df = pandas.read_csv(zf.open(match), low_memory=False, skiprows=[0])
It wasn't obvious in Vishal's answer what the file name was supposed to be in cases where there is no file on disk. I've modified his answer to work without modification for most needs.
from StringIO import StringIO
from zipfile import ZipFile
from urllib import urlopen
def unzip_string(zipped_string):
unzipped_string = ''
zipfile = ZipFile(StringIO(zipped_string))
for name in zipfile.namelist():
unzipped_string += zipfile.open(name).read()
return unzipped_string
Use the zipfile module. To extract a file from a URL, you'll need to wrap the result of a urlopen call in a BytesIO object. This is because the result of a web request returned by urlopen doesn't support seeking:
from urllib.request import urlopen
from io import BytesIO
from zipfile import ZipFile
zip_url = 'http://example.com/my_file.zip'
with urlopen(zip_url) as f:
with BytesIO(f.read()) as b, ZipFile(b) as myzipfile:
foofile = myzipfile.open('foo.txt')
print(foofile.read())
If you already have the file downloaded locally, you don't need BytesIO, just open it in binary mode and pass to ZipFile directly:
from zipfile import ZipFile
zip_filename = 'my_file.zip'
with open(zip_filename, 'rb') as f:
with ZipFile(f) as myzipfile:
foofile = myzipfile.open('foo.txt')
print(foofile.read().decode('utf-8'))
Again, note that you have to open the file in binary ('rb') mode, not as text or you'll get a zipfile.BadZipFile: File is not a zip file error.
It's good practice to use all these things as context managers with the with statement, so that they'll be closed properly.

Categories

Resources