reading large JSON file in Python (raw_decode) - python

I am trying to read in large JSON file (data.json) in Python. Because the JSON file has multiple JSON objects, and multiple dictionaries will be created in Python(the number of dictionaries are unknown), I used decoder.raw_decode() and generator.
The following is the code:
import json
import pprint
import io
import pprint
def parse():
with open('data.json',encoding='utf-8') as jfile:
try:
while True:
decoder = json.JSONDecoder()
obj, idx = decoder.raw_decode(jfile)
yield obj
except ValueError as e:
print(e)
pass
else:
print("aha")
def main():
imputd=parse()
if imputd:
while True:
try:
print(str(next(imputd)).readlines())
except StopIteration as e:
print(e)
break
main()
I get the error:
Traceback (most recent call last):
File "H:\Document\Python\j10.py", line 57, in <module>
main()
File "H:\Document\Python\j10.py", line 36, in main
print(str(next(imputd)).readlines())
File "H:\Document\Python\j10.py", line 21, in parse
obj, idx = decoder.raw_decode(jfile)
File "C:\Python34\lib\json\decoder.py", line 360, in raw_decode
obj, end = self.scan_once(s, idx)
TypeError: first argument must be a string, not _io.TextIOWrapper
I edited code based on Martijn's answer:
import json
import io
file=open('data.json.txt')
def readin():
return file.read(2000)
def parse():
decoder = json.JSONDecoder()
buffer = ''
for chunk in iter(readin, ''):
buffer += chunk
while buffer:
try:
result, index = decoder.raw_decode(buffer)
yield result
buffer = buffer[index:]
except ValueError:
# Not enough data to decode, read more
break
def main():
imputd=parse()
if imputd:
while True:
try:
print(str(next(imputd)).readlines())
except StopIteration as e:
print(e)
break
main()
and I get an UnicodeError:
Traceback (most recent call last):
File "H:\Document\Python\j11.py", line 35, in <module>
main()
File "H:\Document\Python\j11.py", line 30, in main
print(str(next(imputd)).readlines())
File "H:\Document\Python\j11.py", line 14, in parse
for chunk in iter(readin, ''):
File "H:\Document\Python\j11.py", line 8, in readin
return file.read(2000)
File "C:\Python34\lib\encodings\cp1252.py", line 23, in decode
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
UnicodeDecodeError: 'charmap' codec can't decode byte 0x9d in position 4217: character maps to <undefined>

You are passing in the file object, but decoder.raw_decode() only takes text data. You need to do the reading yourself:
obj, idx = decoder.raw_decode(jfile.read())
You are then yielding Python objects created from the JSON data, so your .readlines() call in your main() function loop will also fail.
You are not using raw_decode() correctly, however. You are yourself responsible for feeding it chunks of text, it'll not read that text from the file for you. If you wanted to handle the file in chunks, and there are no clear delimiters between the JSON entries, you'll be forced to read the file in blocks:
decoder = json.JSONDecoder()
buffer = ''
for chunk in iter(partial(jfile.read, buffersize), ''):
buffer += chunk
while buffer:
try:
result, index = decoder.raw_decode(buffer)
yield result
buffer = buffer[index:]
except ValueError:
# Not enough data to decode, read more
break
This will still yield completely decoded objects; if your file is one long JSON object (like one top-level list or dictionary) then this'll not yield the contents of that object one by one; it'll still read the whole object before yielding.

Related

read clm chunk from wav file using python wavfile

i am using the enhanced wavfile.py library, and i want to use it to read serum-style wavetables. i know that these files use a 'clm' block to store cue points, but i am having trouble with reading these using the library
right now i'm just trying to read the file (i'll do something with it later); here is my code:
import wavfile as wf
wf.read('wavetable.wav')
when i run the script, i get this error:
[my dir]/wavfile.py:223: WavFileWarning: Chunk b'clm ' skipped
warnings.warn("Chunk " + str(chunk_id) + " skipped", WavFileWarning)
[my dir]/wavfile.py:223: WavFileWarning: Chunk b'' skipped
warnings.warn("Chunk " + str(chunk_id) + " skipped", WavFileWarning)
Traceback (most recent call last):
File "[my dir]/./test.py", line 5, in <module>
wf.read('wavetable.wav')
File "[my dir]/wavfile.py", line 228, in read
_skip_unknown_chunk(fid)
File "[my dir]/wavfile.py", line 112, in _skip_unknown_chunk
size = struct.unpack('<i', data)[0]
struct.error: unpack requires a buffer of 4 bytes
is it even possible to do this using the library? if not, how could i modify the library to make this work?
bear with me, i'm new to working with files and python in general
UPDATE:
here's the output after i add madison courto's code:
Traceback (most recent call last):
File "[my dir]/./test.py", line 5, in <module>
wf.debug('wavetable.wav')
File "[my dir]/wavfile.py", line 419, in debug
format_str = format.decode("utf-8")
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x80 in position 1: invalid start byte
and here is the wavetable i'm testing; hopefully sndup left it intact
Adding these conditions to the read function returns a dict of markers, it seems that one of the markers is currupt so I added except pass, it's a bit janky but works.
elif chunk_id == b'':
break
elif chunk_id == b'clm ':
str1 = fid.read(8)
size, numcue = struct.unpack('<ii', str1)
for c in range(numcue):
try:
str1 = fid.read(24)
idx, position, datachunkid, chunkstart, blockstart, sampleoffset = struct.unpack(
'<iiiiii', str1)
# _cue.append(position)
_markersdict[idx][
'position'] = position # needed to match labels and markers
except:
pass

Parsing XML using json raises ValueError

I'm trying to parse a XML file using xml ElementTree and json
from xml.etree import ElementTree as et
import json
def parse_file(file_name):
tree = et.ElementTree()
npcs = {}
for npc in tree.parse(file_name):
quests = []
for quest in npc:
quest_name = quest.attrib['name']
stages = []
for i, stage in enumerate(quest):
next_stage, choice, npc_condition = None, None, None
for key, val in stage.attrib.items():
val = json.loads(val)
if key == 'choices':
choice = val
elif key == 'next_stage':
next_stage = val
elif key == 'ncp_condition':
npc_condition = {stage.attrib['npc_name']: val}
stages.append([i, next_stage, choice, npc_condition])
quests.append( {quest_name:stages})
npcs[npc.attrib['name']] = quests
return npcs
The XML file:
<?xml version="1.0" encoding="utf-8"?>
<npcs>
<npc name="NPC NAME">
<quest0 name="Quest Name here">
<stage0 choices='{"Option1":1, "Option1":2}'>
<text>text1</text>
</stage0>
<stage1 next_stage="[3,4]">
<text>text2</text>
</stage1>
<stage3 npc_name="other_npc_name" ncp_condition='{"some_condition":false}' next_stage="[3, 4]">
<text>text3</text>
</stage3>
</quest0>
</npc>
</npcs>
But I'm having trouble with this bit:
<stage3 npc_name="other_npc_name" ncp_condition='{"some_condition":false}' next_stage="[3, 4]">
Traceback:
Traceback (most recent call last):
File "C:/.../test2.py", line 28, in <module>
parse_file('quests.xml')
File "C:/.../test2.py", line 15, in parse_file
val = json.loads(val)
File "C:\Python27\lib\json\__init__.py", line 338, in loads
return _default_decoder.decode(s)
File "C:\Python27\lib\json\decoder.py", line 366, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "C:\Python27\lib\json\decoder.py", line 384, in raw_decode
raise ValueError("No JSON object could be decoded")
ValueError: No JSON object could be decoded
It raises this error in the line val = json.loads(val) when key="npc_name" and val="other_npc_name".
What's wrong with that? It didn't raise any error when name="some string", but it does when npc_name="some string".
I noticed that if I change "other_npc_name" to '"other_npc_name"' it doesn't complain, but this seem a bit hackish to me
JSON is a way to store data structures - thus it can only decode said data structures.
When you try to get JSON to decode something like this:
other_npc_name
JSON can't match this to any valid data type. However, if this is wrapped in quotation marks:
"other_npc_name"
JSON recognizes this as a String (as per the JSON spec, that is how a string is defined).
And this is what is happening in your script:
import json
print json.loads("other_npc_name") #throws error
print json.loads('"other_npc_name"') #returns "other_npc_name" as a Unicode string
Thus, it may seem 'hackish' to wrap the string this way, however, this is really the only way for JSON to decode it.
One potential suggestion is that if the npc_name attribute in XML is always a string, then pull it out as a string instead of trying to decode it as a JSON object.

How to output a live JSON feed in Python 3?

I am using Python 3 to access a live JSON feed from http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/2.5_day.geojson. This is the code:
try:
# For Py 3.0+
from urllib.request import urlopen
except ImportError:
# For Py 2
from urllib2 import urlopen
import json
def printResults(data):
# Use the json module to load the string data into a dictionary
theJSON = json.loads(data) #pass JSON data into a dictionary
# now we can access the contents of the JSON like any other Python object
if "title" in theJSON["metadata"]:
print (theJSON["metadata"]["title"])
def main():
# JSON feed of earthquake activity larger than 2.5 in the past 25 hours
urlData = "http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/2.5_day.geojson"
#open url and read contents
webUrl = urlopen(urlData)
print (webUrl.getcode())
if (webUrl.getcode() == 200):
data = webUrl.read()
#print results
printResults(data)
else:
print ("Received an error from server " + str(webUrl.getcode()))
if __name__ == "__main__":
main()
I get the following output:
Traceback (most recent call last):
File "<string>", line 420, in run_nodebug
File "C:\Users\modar\Desktop\jsondata_finished.py", line 56, in <module>
File "C:\Users\modar\Desktop\jsondata_finished.py", line 50, in main
else:
File "C:\Users\modar\jsondata_finished.py", line 13, in printResults
if "title" in theJSON["metadata"]:
File "C:\Python33\lib\json\__init__.py", line 319, in loads
return _default_decoder.decode(s)
File "C:\Python33\lib\json\decoder.py", line 352, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
TypeError: can't use a string pattern on a bytes-like object
How can I fix this? An explanation as to what went wrong would also be great. Thanks in advance.
With the requests library, linked to in my comment above, your code becomes:
quake_data = requests.get('http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/2.5_day.geojson').json()
print(quake_data['metadata']['title'])
I do hope it helps...

Python Multiprocessing IndexError

I am trying to parallel process some file by reading chunks and process each chunk by using multiprocessing libraries. Following is my code:
from multiprocessing import Pool
from itertools import islice
import traceback
#Produce key value pairs (Date, Market_Share*Market_Share)
def Map(L):
results = []
for w in L:
temp = w.split(',')
Date = temp[0]
Share = float(temp[1][:-1])
ShareSquare = str(Share*Share)
results.append((Date,ShareSquare))
return results
if __name__=='__main__':
pool = Pool(2)
f = open('C:/Users/Daniel/Desktop/Project/Optiver/atchm_9450.csv','r')
fw = open('C:/Users/Daniel/Desktop/Project/Optiver/marketshare.csv','w')
f.readline()
while True:
next_n_lines = list(islice(f,16))
if not next_n_lines:
break
else:
l = pool.map(Map,next_n_lines)
f.close()
fw.close()
However, it produces index out of range error:
Traceback (most recent call last):
File "trial.py", line 29, in <module>
l = pool.map(Map,next_n_lines)
File "/usr/lib/python2.7/multiprocessing/pool.py", line 251, in map
return self.map_async(func, iterable, chunksize).get()
File "/usr/lib/python2.7/multiprocessing/pool.py", line 558, in get
raise self._value
IndexError: list index out of range
The list object I passed into the Map function is something like ['6/26/2014,68.90\n', '6/27/2014,68.84\n', '6/30/2014,68.80\n'....]
It works correctly when there is no parallelism involved (pool is not invoked).
What possibly causes this behavior?
At first glance, only those two lines can raise this exception:
Date = temp[0]
Share = float(temp[1][:-1])
Try to check that w have enough data.

Try except not catching IOError from class

I have a class that reads a file of a particular format. These files tend to be greater than 8Gb in size so are usually compressed. When reading the file in I wanted to catch the error of the file not being compressed but neither except IOError: nor except: will do so, for some reason I don't understand.
There are a few classes defined together in the file VCF.py, though the offending class is vcfReader(). The file from which the object is instantiated is below test.py, and lastly the Traceback.
Anyone have any ideas as to why it isn't working?
VCF.py
import gzip
import sys
class Call():
'''
Class to handle the sample genotypes and associated information
'''
def __init__(self,site,sample,format,data):
#do stuff here#
class Variant():
'''
Class for a single row from a VCF file.
'''
def __init__(self, entry, samples):
#do other stuff here
class vcfReader():
'''
read a compressed vcf file ignoring the meta-information, but parsing the header for sample names
'''
def __init__(self, file):
try:
self.vcfFile = gzip.open(file, 'rb')
except IOError:
print "Not a gzipped file"
sys.exit()
self.samples = self.readHeader()
def readHeader(self):
line = self.vcfFile.next()
while line.startswith('#'):
if line[1]!='#':
#lines that start with ##, i.e. meta tags are ignored. Header line starting with '#', sample names are extracted.
return line.rstrip().rsplit('\t')[9:]
else:
line = self.vcfFile.next()
def __iter__(self):
return self
def next(self):
row = self.vcfFile.next()
return Variant(row, self.samples)
and then test.py
import VCF
from collections import Counter
if __name__=='__main__':
vcfreader = VCF.vcfReader('all_samples.vcf')
filters = []
for i in vcfreader:
filters.extend(i.FILTERS)
filters = Counter(filters)
for k,v in filters.iteritems():
print "{0}: {1}".format(k,v)
Here is the traceback:
Traceback (most recent call last):
File "C:\Users\Davy\Documents\Programming\VCF_stuff\src\test.py", line 10, in <module>
vcfreader = VCF.vcfReader('all_samples.vcf')
File "C:\Users\Davy\Documents\Programming\VCF_stuff\src\VCF.py", line 95, in __init__
self.samples = self.readHeader()
File "C:\Users\Davy\Documents\Programming\VCF_stuff\src\VCF.py", line 98, in readHeader
line = self.vcfFile.next()
File "C:\Python27\lib\gzip.py", line 450, in readline
c = self.read(readsize)
File "C:\Python27\lib\gzip.py", line 256, in read
self._read(readsize)
File "C:\Python27\lib\gzip.py", line 291, in _read
self._read_gzip_header()
File "C:\Python27\lib\gzip.py", line 185, in _read_gzip_header
raise IOError, 'Not a gzipped file'
IOError: Not a gzipped file
The reason your except block doesn't catch the exception is that it happens outside the try block:
def __init__(self, file):
try:
self.vcfFile = gzip.open(file, 'rb')
except IOError:
print "Not a gzipped file"
sys.exit()
self.samples = self.readHeader() # <<<<<<<< exception is raised here

Categories

Resources