Python: Merge two CSV files to multilevel JSON - python

I am very new to Python/JSON so please bear with me on this. I could do this in R but we need to use Python so as to transform this to Python/Spark/MongoDB. Also, I am just posting a minimal subset - I have a couple more file types and so if anyone can help me with this, I can build upon that to integrate more files and file types:
Getting back to my problem:
I have two tsv input files that I need to merge and convert to JSON. Both the files have gene and sample columns plus some additional columns. However, the gene and sample may or may not overlap like I have shown - f2.tsv has all genes in f1.tsv but also has an additional gene g3. Similarly, both files have overlapping as well as non-overlapping values in sample column.
# f1.tsv – has gene, sample and additional column other1
$ cat f1.tsv
gene sample other1
g1 s1 a1
g1 s2 b1
g1 s3a c1
g2 s4 d1
# f2.tsv – has gene, sample and additional columns other21, other22
$ cat f2.tsv
gene sample other21 other22
g1 s1 a21 a22
g1 s2 b21 b22
g1 s3b c21 c22
g2 s4 d21 d22
g3 s5 f21 f22
The gene forms the top level, each gene has multiple samples which form the second level and the additional columns form the extras which is the third level. The extras are divided into two because one file has other1 and the second file has other21 and other22. The other files that I will include later will have other fields like other31 and other32 and so on but they will still have the gene and sample columns.
# expected output – JSON by combining both tsv files.
$ cat output.json
[{
"gene":"g1",
"samples":[
{
"sample":"s2",
"extras":[
{
"other1":"b1"
},
{
"other21":"b21",
"other22":"b22"
}
]
},
{
"sample":"s1",
"extras":[
{
"other1":"a1"
},
{
"other21":"a21",
"other22":"a22"
}
]
},
{
"sample":"s3b",
"extras":[
{
"other21":"c21",
"other22":"c22"
}
]
},
{
"sample":"s3a",
"extras":[
{
"other1":"c1"
}
]
}
]
},{
"gene":"g2",
"samples":[
{
"sample":"s4",
"extras":[
{
"other1":"d1"
},
{
"other21":"d21",
"other22":"d22"
}
]
}
]
},{
"gene":"g3",
"samples":[
{
"sample":"s5",
"extras":[
{
"other21":"f21",
"other22":"f22"
}
]
}
]
}]
How do convert two csv files to a single - multi level JSON based on two common columns?
I would really appreciate any help that I can get on this.
Thanks!

Here's another option. I tried to make it easy to manage when you start adding more files. You can run on the command line and provide arguments, one for each file you want to add in. Gene/sample names are stored in dictionaries to improve efficiency. The formatting of your desired JSON object is done in each class' format() method. Hope this helps.
import csv, json, sys
class Sample(object):
def __init__(self, name, extras):
self.name = name
self.extras = [extras]
def format(self):
map = {}
map['sample'] = self.name
map['extras'] = self.extras
return map
def add_extras(self, extras):
#edit 8/20
#always just add the new extras to the list
for extra in extras:
self.extras.append(extra)
class Gene(object):
def __init__(self, name, samples):
self.name = name
self.samples = samples
def format(self):
map = {}
map ['gene'] = self.name
map['samples'] = sorted([self.samples[sample_key].format() for sample_key in self.samples], key=lambda sample: sample['sample'])
return map
def create_or_add_samples(self, new_samples):
# loop through new samples, seeing if they already exist in the gene object
for sample_name in new_samples:
sample = new_samples[sample_name]
if sample.name in self.samples:
self.samples[sample.name].add_extras(sample.extras)
else:
self.samples[sample.name] = sample
class Genes(object):
def __init__(self):
self.genes = {}
def format(self):
return sorted([self.genes[gene_name].format() for gene_name in self.genes], key=lambda gene: gene['gene'])
def create_or_add_gene(self, gene):
if not gene.name in self.genes:
self.genes[gene.name] = gene
else:
self.genes[gene.name].create_or_add_samples(gene.samples)
def row_to_gene(headers, row):
gene_name = ""
sample_name = ""
extras = {}
for value in enumerate(row):
if headers[value[0]] == "gene":
gene_name = value[1]
elif headers[value[0]] == "sample":
sample_name = value[1]
else:
extras[headers[value[0]]] = value[1]
sample_dict = {}
sample_dict[sample_name] = Sample(sample_name, extras)
return Gene(gene_name, sample_dict)
if __name__ == '__main__':
delim = "\t"
genes = Genes()
files = sys.argv[1:]
for file in files:
print("Reading " + str(file))
with open(file,'r') as f1:
reader = csv.reader(f1, delimiter=delim)
headers = []
for row in reader:
if len(headers) == 0:
headers = row
else:
genes.create_or_add_gene(row_to_gene(headers, row))
result = json.dumps(genes.format(), indent=4)
print(result)
with open('json_output.txt', 'w') as output:
output.write(result)

This looks like a problem for pandas! Unfortunately pandas only takes us so far and we then have to do some manipulation on our own. This is neither fast nor particularly efficient code, but it will get the job done.
import pandas as pd
import json
from collections import defaultdict
# here we import the tsv files as pandas df
f1 = pd.read_table('f1.tsv', delim_whitespace=True)
f2 = pd.read_table('f2.tsv', delim_whitespace=True)
# we then let pandas merge them
newframe = f1.merge(f2, how='outer', on=['gene', 'sample'])
# have pandas write them out to a json, and then read them back in as a
# python object (a list of dicts)
pythonList = json.loads(newframe.to_json(orient='records'))
newDict = {}
for d in pythonList:
gene = d['gene']
sample = d['sample']
sampleDict = {'sample':sample,
'extras':[]}
extrasdict = defaultdict(lambda:dict())
if gene not in newDict:
newDict[gene] = {'gene':gene, 'samples':[]}
for key, value in d.iteritems():
if 'other' not in key or value is None:
continue
else:
id = key.split('other')[-1]
if len(id) == 1:
extrasdict['1'][key] = value
else:
extrasdict['{}'.format(id[0])][key] = value
for value in extrasdict.values():
sampleDict['extras'].append(value)
newDict[gene]['samples'].append(sampleDict)
newList = [v for k, v in newDict.iteritems()]
print json.dumps(newList)
If this looks like a solution that will work for you, I am happy to spend some time cleaning it up to make it bait more readable and efficient.
PS: If you like R, then pandas is the way to go (it was written to give a R-like interface to data in python)

Do it in steps:
Read the incoming tsv files and aggregate the information from different genes into a dictionary.
Process said dictionary to match your desired format.
Write the result to a JSON file.
Here is the code:
import csv
import json
from collections import defaultdict
input_files = ['f1.tsv', 'f2.tsv']
output_file = 'genes.json'
# Step 1
gene_dict = defaultdict(lambda: defaultdict(list))
for file in input_files:
with open(file, 'r') as f:
reader = csv.DictReader(f, delimiter='\t')
for line in reader:
gene = line.pop('gene')
sample = line.pop('sample')
gene_dict[gene][sample].append(line)
# Step 2
out = [{'gene': gene,
'samples': [{'sample': sample, 'extras': extras}
for sample, extras in samples.items()]}
for gene, samples in gene_dict.items()]
# Step 3
with open(output_file, 'w') as f:
json.dump(out, f)

Related

How do I alter an existing text file to add new data after a specific line using Python

So I am looking to create a script to make a mod for a game using Python. The script will need to copy all files from a directory to another directory, then alter those files to add a new attribute after a specific line. The issue I am having is that this game uses custom coding based on json formatting in a txt file. I know how to do most of this, however, adding the new data is not something I can get to work.
My end goal will be to be able to do this to any file, so other mod authors can use it to add the data to their mods without needing to do it manually. I also want to try to make this script do more advanced things, but that is another goal that can wait till I get this bit working.
Sample data:
The line I need to add is position_priority = ###. The ### will be different based on what the building does (building categories).
Sample code I need to alter:
building_name_number = {
base_build_time = 60
base_cap_amount = 1
category = pop_assembly
<more code>
}
I need to put the new data just after building_name_number, however this exact name will be unique, the only thing that will always be the same is that it will start with building. So regex is what I have been trying to use, but I have never dealt with regex so I cant get it to work.
My Current code:
if testingenabled:
workingdir = R"E:/Illusives-Mods/Stellaris/Building Sorting"
pattern = "^building_"
Usortingindex = sortingindex["sorting_pop_assembly"]
print(f"Testing Perameters: Index: {Usortingindex}, Version: {__VERSION__}, Working DIR: {workingdir}")
# os.chdir(stellaris_buildings_path)
os.chdir(workingdir)
for file in os.listdir(workingdir):
if fnmatch.fnmatch(file, "*.txt"):
print("File found")
with open(file, "r+", encoding="utf-8") as openfiledata:
alllines = openfiledata.read()
for line in alllines:
if line == re.match(r'(^building_)', line, re.M):
print("found match")
# print(f"{sorting_attrib}{Usortingindex}")
# print("position_priority = 200")
openfiledata.write("\n" + sorting_attrib + Usortingindex + "\n")
break
I am not getting any errors with this code. But it doesnt work
I am using Python 3.9.6.
EDIT:
This code is before the script
allow = {
hidden_trigger = {
OR = {
owner = { is_ai = no }
NAND = {
free_district_slots = 0
free_building_slots <= 1
free_housing <= 0
free_jobs <= 0
}
}
}
}
This is after
allow = {
hidden_trigger = {
OR = {
owner = {
is_ai = false
}
NAND = {
free_district_slots = 0
free_building_slots = {
value = 1
operand = <=
}
free_housing = {
value = 0
operand = <=
}
free_jobs = {
value = 0
operand = <=
}
}
}
}
}
The output must be the same as the input, at least in terms of the operators
If you would keep it as JSON then you could read all to Python (to get ti as dictionary), search and add items in dictionary, and write back to JSON new dictionary.
text = '''{
"building_name_number": {
"base_build_time": 60,
"base_cap_amount": 1,
"category": "pop_assembly"
},
"building_other": {}
}'''
import json
data = json.loads(text)
for key in data.keys():
if key.startswith('building_'):
data[key]["position_priority"] = 'some_value'
print(json.dumps(data, indent=4))
Result:
{
"building_name_number": {
"base_build_time": 60,
"base_cap_amount": 1,
"category": "pop_assembly",
"position_priority": "some_value"
},
"building_other": {
"position_priority": "some_value"
}
}
I found module paradox-reader which can convert this file format to JSON file.
Using code from file paradoxReader.py I created example which can convert string to Python dictionary, add some value and convert to something similar to original file. But this may need to add more code in encode()
import json
import re
def decode(data):#, no_json):
data = re.sub(r'#.*', '', data) # Remove comments
data = re.sub(r'(?<=^[^\"\n])*(?<=[0-9\.\-a-zA-Z])+(\s)(?=[0-9\.\-a-zA-Z])+(?=[^\"\n]*$)', '\n', data, flags=re.MULTILINE) # Seperate one line lists
data = re.sub(r'[\t ]', '', data) # Remove tabs and spaces
definitions = re.findall(r'(#\w+)=(.+)', data) # replace #variables with value
if definitions:
for definition in definitions:
data = re.sub(r'^#.+', '', data, flags=re.MULTILINE)
data = re.sub(definition[0], definition[1], data)
data = re.sub(r'\n{2,}', '\n', data) # Remove excessive new lines
data = re.sub(r'\n', '', data, count=1) # Remove the first new line
data = re.sub(r'{(?=\w)', '{\n', data) # reformat one-liners
data = re.sub(r'(?<=\w)}', '\n}', data) # reformat one-liners
data = re.sub(r'^[\w-]+(?=[\=\n><])', r'"\g<0>"', data, flags=re.MULTILINE) # Add quotes around keys
data = re.sub(r'([^><])=', r'\1:', data) # Replace = with : but not >= or <=
data = re.sub(r'(?<=:)(?!-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+-]?\d+)?)(?!\".*\")[^{\n]+', r'"\g<0>"', data) # Add quotes around string values
data = re.sub(r':"yes"', ':true', data) # Replace yes with true
data = re.sub(r':"no"', ':false', data) # Replace no with false
data = re.sub(r'([<>]=?)(.+)', r':{"value":\g<2>,"operand":"\g<1>"}', data) # Handle < > >= <=
data = re.sub(r'(?<![:{])\n(?!}|$)', ',', data) # Add commas
data = re.sub(r'\s', '', data) # remove all white space
data = re.sub(r'{(("[a-zA-Z_]+")+)}', r'[\g<1>]', data) # make lists
data = re.sub(r'""', r'","', data) # Add commas to lists
data = re.sub(r'{("\w+"(,"\w+")*)}', r'[\g<1>]', data)
data = re.sub(r'((\"hsv\")({\d\.\d{1,3}(,\d\.\d{1,3}){2}})),', r'{\g<2>:\g<3>},', data) # fix hsv objects
data = re.sub(r':{([^}{:]*)}', r':[\1]', data) # if there's no : between list elements need to replace {} with []
data = re.sub(r'\[(\w+)\]', r'"\g<1>"', data)
data = re.sub(r'\",:{', '":{', data) # Fix user_empire_designs
data = '{' + data + '}'
return json.loads(data)
def encode(data):
text = json.dumps(data, indent=4)
text = text[2:-2]
text = text.replace('"', '').replace(':', ' =').replace(',', '')
return text
# ----------
text = '''building_name_number = {
base_build_time = 60
base_cap_amount = 1
category = pop_assembly
}'''
data = decode(text)
data['building_name_number']['new_item'] = 123
text = encode(data)
print(text)
Result:
building_name_number = {
base_build_time = 60
base_cap_amount = 1
category = pop_assembly
new_item = 123
}

how to append text to file in specific location in python3?

i'm trying to append text to file in specific location.
i want to create program which takes input from user(name, image, id) and adds them to this file
names = []
images = []
id = 0
url = ['https://somewebsiteUsingId10.com',
'https://somewebsiteUsingId20.com']
if id == 5:
names.append("Testing Names")
images.append("Testing Images")
elif id == 0:
names.append("Testing one names")
images.append("Testing one Images")
I want modified file to be like this:
names = []
images = []
id = 0
url = ['https://somewebsiteUsingId20.com',
'https://somewebsiteUsingId10.com',
'https://somewebsiteUsingId50.com']
if id == 5:
names.append("Testing Names")
images.append("Testing Images")
elif id == 0:
names.append("Testing one names")
images.append("Testing one Images")
elif id == 50:
names.append("User input")
images.append("User Input")
Thanks!
In cases like this, a good course of action is to put the variable data in a configuration file.
On start-up, your program reads the configuration file and processes it.
Another program can update the configuration file.
Python has the json module in its standard library. This supports lists and dicts, so it is a good match for Python data structures.
Say you write a file urls.json, looking like this:
[
"https://somewebsiteUsingId20.com",
"https://somewebsiteUsingId10.com",
"https://somewebsiteUsingId50.com"
]
In your program you can then do:
import json
with open("urls.json") as f:
urls = json.load(f)
The variable urls now points to a list containing the aforementioned URLs.
Writing the config data goes about the same:
urls = [
"https://www.parrot.org",
"https://www.ministryofsillywalks.org",
"https://www.cheese.net",
]
with open("newurls.json", "w") as f:
json.dump(urls, f, indent=4)
The file newurls.json now contains:
[
"https://www.parrot.org",
"https://www.ministryofsillywalks.org",
"https://www.cheese.net"
]
Note that JSON is pretty flexible, you are not limited to strings:
import datetime
config = {
'directories': ["https://www.parrot.org", "https://www.ministryofsillywalks.org"],
'saved': str(datetime.datetime.now()),
'count': 12
}
with open("configuration.json", "w") as cf:
json.dump(config, cf, indent=4)
This would result in something like:
{
"directories": [
"https://www.parrot.org",
"https://www.ministryofsillywalks.org"
],
"saved": "2022-02-07 21:21:14.787420",
"count": 12
}
(You'd get another date/time, of course.)
The only major downside to JSON files is that they don't allow comments. If you need comments, use another format like the configparser module.
Note that there are other methods like shelve and read&eval but those have potential safety issues.

How to Change dictionary values in python file from another file

I would like to change values in a Dict in another file. File1.py contains the code to edit the Dict, File2.py contains the Dict itself.
File1.py is generating a code to replace BTOK values only.
File1.py:
with open('file2.py', 'r') as file :
filedata = file.read()
print (filedata.str(BTK['btk1']))
for line in filedata:
line['btk1'] = BTok
with open('file2.py', 'w') as file:
file.write(line)
File2.py:
c = {
'id' : 'C80e3ce43c3ea3e8d1511ec',
'secret' : 'c10c371b4641010a750073925b0857'
}
rk = {
't1' : 'ZTkwMGE1MGEt',
}
BTK = {
'BTok' : '11eyJhbGc'
}
If you want to do this reliably, that is, so it works whether your strings are quoted with ', " or """, for whatever values they have and whatever newlines you want to put around values, then you may want to use ast to parse the source code and modify it. The only inconvenient with this is that module cannot, by itself, generate code, so you would need to install some additional dependency such as astor, for what is essentially a rather menial task. In any case, here is how you could do it that way:
import ast
import astor
# To read from file:
# with open('file2.py', 'r') as f: code = f.read()
code = """
c = {
'id' : 'C80e3ce43c3ea3e8d1511ec',
'secret' : 'c10c371b4641010a750073925b0857'
}
rk = {
't1' : 'ZTkwMGE1MGEt',
}
BTK = {
'BTok' : '11eyJhbGc'
}
"""
# Value to replace
KEY = 'BTok'
NEW_VALUE = 'new_btok'
# Parse code
m = ast.parse(code)
# Go through module statements
for stmt in m.body:
# Only look at assignments
if not isinstance(stmt, ast.Assign): continue
# Take right-hand side of the assignment
value = stmt.value
# Only look at dict values
if not isinstance(value, ast.Dict): continue
# Look for keys that match what we are looking for
replace_idx = [i for i, k in enumerate(value.keys)
if isinstance(k, ast.Str) and k.s == KEY]
# Replace corresponding values
for i in replace_idx:
value.values[i] = ast.Str(NEW_VALUE)
new_code = astor.to_source(m)
# To write to file:
# with open(`file2.py', 'w') as f: f.write(new_code)
print(new_code)
# c = {'id': 'C80e3ce43c3ea3e8d1511ec', 'secret':
# 'c10c371b4641010a750073925b0857'}
# rk = {'t1': 'ZTkwMGE1MGEt'}
# BTK = {'BTok': 'new_btok'}

Convert Json to CSV using Python

Below, is the json structure I am pulling from my online weather station. I am also including a json_to_csv python script that is supposed to convert json data to csv output, but only returns a "Key" error. I want to pull data from "current_observation": only.
{
"response": {
"features": {
"conditions": 1
}
}
, "current_observation": {
"display_location": {
"latitude":"40.466442",
"longitude":"-85.362709",
"elevation":"280.4"
},
"observation_time_rfc822":"Fri, 26 Jan 2018 09:40:16 -0500",
"local_time_rfc822":"Sun, 28 Jan 2018 11:22:47 -0500",
"local_epoch":"1517156567",
"local_tz_short":"EST",
"weather":"Clear",
"temperature_string":"44.6 F (7.0 C)",
}
}
import csv, json, sys
inputFile = open("pywu.cache.json", 'r') #open json file
outputFile = open("CurrentObs.csv", 'w') #load csv file
data = json.load(inputFile) #load json content
inputFile.close() #close the input file
output = csv.writer(outputFile) #create a csv.write
output.writerow(data[0].keys())
for row in data:
output = csv.writer(outputFile) #create a csv.write
output.writerow(data[0].keys())
for row in data:
output.writerow(row.values()) #values row
What's the best method to retrieve the temperature string and convert to .csv format? Thank you!
import pandas as pd
df = pd.read_json("pywu.cache.json")
df = df.loc[["local_time_rfc822", "weather", "temperature_string"],"current_observation"].T
df.to_csv("pywu.cache.csv")
maybe pandas can be of help for you. the .read_json() function creates a nice dataframe, from which you can easily choose the desired rows and columns. and it can save as csv as well.
to add latitude and longitude to the csv-line, you can do this:
df = pd.read_json("pywu.cache.csv")
df = df.loc[["local_time_rfc822", "weather", "temperature_string", "display_location"],"current_observation"].T
df = df.append(pd.Series([df["display_location"]["latitude"], df["display_location"]["longitude"]], index=["latitude", "longitude"]))
df = df.drop("display_location")
df.to_csv("pywu.cache.csv")
to print the location in numeric values, you can do this:
df = pd.to_numeric(df, errors="ignore")
print(df['latitude'], df['longitude'])
This will find all keys (e.g. "temperature_string") specified inside of the json blob and then write them to a csv file. You can modify this code to get multiple keys.
import csv, json, sys
def find_deep_value(d, key):
# Find a the value of keys hidden within a dict[dict[...]]
# Modified from https://stackoverflow.com/questions/9807634/find-all-occurrences-of-a-key-in-nested-python-dictionaries-and-lists
# #param d dictionary to search through
# #param key to find
if key in d:
yield d[key]
for k in d.keys():
if isinstance(d[k], dict):
for j in find_deep_value(d[k], key):
yield j
inputFile = open("pywu.cache.json", 'r') # open json file
outputFile = open("mypws.csv", 'w') # load csv file
data = json.load(inputFile) # load json content
inputFile.close() # close the input file
output = csv.writer(outputFile) # create a csv.write
# Gives you a list of temperature_strings from within the json
temps = list(find_deep_value(data, "temperature_string"))
output.writerow(temps)
outputFile.close()

Most effective way to parse CSV and take action based on content of row

I have a CSV file that Splunk generates, similar in format to the following:
Category,URL,Hash,ID,"__mv_Hash","_mkv_ID"
binary,somebadsite.com/file.exe,12345abcdef,123,,,
callback,bad.com,,567,,,
What I need to do is iterate through the CSV file, maintaining header order, and take a different action if the result is a binary or callback. For this example, if the result is a binary I'll return an arbitrary "clean" or "dirty" rating and if it's a callback I'll just print out the details.
Below is the code I'm currently planning to use, but I'm new to Python and would like feedback on the code and if there is a better way to accomplish this. I'm also not fully clear on the difference between how I'm handling if the result is binary: for k in (k for k in r.fieldnames if (not k.startswith("""__mv_""") and not k.startswith("""_mkv_"""))) and how I handle if it's not. Both achieve the same result, so whats the benefit of one over the other?
import gzip
import csv
import json
csv_file = 'test_csv.csv.gz'
class GZipCSVReader:
def __init__(self, filename):
self.gzfile = gzip.open(filename)
self.reader = csv.DictReader(self.gzfile)
self.fieldnames = self.reader.fieldnames
def next(self):
return self.reader.next()
def close(self):
self.gzfile.close()
def __iter__(self):
return self.reader.__iter__()
def get_rating(hash):
if hash == "12345abcdef":
rating = "Dirty"
else:
rating = "Clean"
return hash, rating
def print_callback(result):
print json.dumps(result, sort_keys=True, indent=4, separators=(',',':'))
def process_results_content(r):
for row in r:
values = {}
values_misc = {}
if row["Category"] == "binary":
# Iterate through key:value pairs and add to dictionary
for k in (k for k in r.fieldnames if (not k.startswith("""__mv_""") and not k.startswith("""_mkv_"""))):
v = row[k]
values[k] = v
rating = get_rating(row["Hash"])
if rating[1] == "Dirty":
print rating
else:
for k in r.fieldnames:
if not k.startswith("""__mv_""") and not k.startswith("""_mkv_"""):
v = row[k]
values_misc[k] = v
print_callback(values_misc)
r.close()
if __name__ == '__main__':
r = GZipCSVReader(csv_file)
process_results_content(r)
Finally, would a for...else loop be better rather than doing something such as if row["Category"] == "binary"? For example, could I do something such as:
def process_results_content(r):
for row in r:
values = {}
values_misc = {}
for k in (k for k in r.fieldnames if (not row["Category"] == "binary")):
v = row[k]
...
else:
v = row[k]
...
Seems like that would be the same logic where the first clause would capture anything not binary and the second would capture everything else, but does not seem to produce the correct result.
My take using the pandas library.
Code:
import pandas as pd
csv_file = 'test_csv.csv'
df = pd.read_csv(csv_file)
df = df[["Category","URL","Hash","ID"]] # Remove the other columns.
get_rating = lambda x: "Dirty" if x == "12345abcdef" else "Clean"
df["Rating"] = df["Hash"].apply(get_rating) # Assign a value to each row based on Hash value.
print df
j = df.to_json() # Self-explanatory. :)
print j
Result:
Category URL Hash ID Rating
0 binary somebadsite.com/file.exe 12345abcdef 123 Dirty
1 callback bad.com NaN 567 Clean
{"Category":{"0":"binary","1":"callback"},"URL":{"0":"somebadsite.com\/file.exe","1":"bad.com"},"Hash":{"0":"12345abcdef","1":null},"ID":{"0":123,"1":567},"Rating":{"0":"Dirty","1":"Clean"}}
If this is your intended result, then just substitute the above to your GZipReader, since I did not emulate the opening of the gzip file.

Categories

Resources