How can I pickle a python object into a csv file? - python

I am trying to pickle a python object into a csv file. I want to write the pickle of an object as the third column in my file. I want to use pickle to avoid writing serialization for my complex objects.
Code to write to csv :
with open(self.file_path, 'a') as csv_file:
wr = csv.writer(csv_file, delimiter='|')
row = ['klines', symbol]
row.extend(pickle.dumps(object))
wr.writerow(row)
Code to read csv :
with open(self.simulation_file_name, 'r') as csv_file:
line = csv_file.readline()
while line != '':
line = line.strip('\n')
columns = line.split('|')
event_type = line.pop(0)
symbol = line.pop(0)
pickled = line.pop(0)
klines = pickle.loads(klines)
I get the following error :
TypeError: a bytes-like object is required, not 'str'

To write bytes/binary in text file like CSV, use base64 or other methods to avoid any escaping problem. Code simplified & python3 assumed.
import base64
with open('a.csv', 'a', encoding='utf8') as csv_file:
wr = csv.writer(csv_file, delimiter='|')
pickle_bytes = pickle.dumps(obj) # unsafe to write
b64_bytes = base64.b64encode(pickle_bytes) # safe to write but still bytes
b64_str = b64_bytes.decode('utf8') # safe and in utf8
wr.writerow(['col1', 'col2', b64_str])
# the file contains
# col1|col2|gANdcQAu
with open('a.csv', 'r') as csv_file:
for line in csv_file:
line = line.strip('\n')
b64_str = line.split('|')[2] # take the pickled obj
obj = pickle.loads(base64.b64decode(b64_str)) # retrieve
P.S. If you are not writing a utf8 file (e.g. ascii file), simply replace the encoding method.
P.S. Writing bytes in CSV is possible yet hardly elegant. One alternative is dumping a whole dict with dumped objects as values and storing keys in the CSV.

Related

Converting a large CSV file to multiple JSON files using Python

I am currently using the following code to convert a large CSV file to a JSON file.
import csv
import json
def csv_to_json(csvFilePath, jsonFilePath):
jsonArray = []
with open(csvFilePath, encoding='utf-8') as csvf:
csvReader = csv.DictReader(csvf)
for row in csvReader:
jsonArray.append(row)
with open(jsonFilePath, 'w', encoding='utf-8') as jsonf:
jsonString = json.dumps(jsonArray, indent=4)
jsonf.write(jsonString)
csvFilePath = r'test_data.csv'
jsonFilePath = r'test_data.json'
csv_to_json(csvFilePath, jsonFilePath)
This code works fine and I am able to convert the CSV to JSON without any issues. However, as the CSV file contains 600,000+ rows and hence as many items in my JSON, it has become very difficult to manage the JSON file.
I would like to modify my above code such that for every 5000 rows of the CSV, the data is written into a new JSON file. Ideally, I would be having 120 (600,000/5000) JSON files in this case.
How can I do the same?
Split up your read\write methods and add a simple threshold:
JSON_ENTRIES_THRESHOLD = 5000 # modify to whatever you see suitable
def write_json(json_array, filename):
with open(filename, 'w', encoding='utf-8') as jsonf:
json.dump(json_array, jsonf) # note the usage of .dump directly to a file descriptor
def csv_to_json(csvFilePath, jsonFilePath):
jsonArray = []
with open(csvFilePath, encoding='utf-8') as csvf:
csvReader = csv.DictReader(csvf)
filename_index = 0
for row in csvReader:
jsonArray.append(row)
if len(jsonArray) >= JSON_ENTRIES_THRESHOLD:
# if we reached the treshold, write out
write_json(jsonArray, f"jsonFilePath-{filename_index}.json")
filename_index += 1
jsonArray = []
# Finally, write out the remainder
write_json(jsonArray, f"jsonFilePath-{filename_index}.json")

Python requests data in file, line by line

I try to get the output of this request (https://api.opendota.com/api/players/7841909) in a file, line by line.
For some reason the output is stored in byte and not str, which I can change by str().
I tried to use a regualar expression to just store the information between the {} and also tried the csv module, which lead to just store digits.
What did I do wrong? The following version ignores the linebreak and the delimiters. :/
import requests
import csv
import re
dotaId = "7841909" #somit als string gespeichert
pfad = "https://api.opendota.com/api/players/" + dotaId + "/matches"
req = requests.get(pfad)
with open('%s.csv' % dotaId, 'w') as file:
clean_line = re.findall(r'\{(.*?)\}', req.text)
file.write(str(clean_line))
Your object clean_line is a list which you are writing as a one liner into the file.
It is better to use the csv writer module and write the content row by row:
with open('new_file.csv', 'w', newline='') as file:
writer = csv.writer(file, quotechar="'")
clean_lines = re.findall(r'\{(.*?)\}', req.text)
for line in clean_lines:
writer.writerow([str(line)])

Converting JSON to CSV, CSV is empty

I'm attempting to convert yelps data set that is in JSON to a csv format. The new csv file that is created is empty.
I've tried different ways to iterate through the JSON but they all give me a zero bytes file.
The json file looks like this:
{"business_id":"1SWheh84yJXfytovILXOAQ","name":"Arizona Biltmore Golf Club","address":"2818 E Camino Acequia Drive","city":"Phoenix","state":"AZ","postal_code":"85016","latitude":33.5221425,"longitude":-112.0184807,"stars":3.0,"review_count":5,"is_open":0,"attributes":{"GoodForKids":"False"},"categories":"Golf, Active Life","hours":null}
import json
import csv
infile = open("business.json","r")
outfile = open("business2.csv","w")
data = json.load(infile)
infile.close()
out = csv.writer(outfile)
out.writerow(data[0].keys())
for row in data:
out.writerow(row.values())
I get an "extra data" message when the code runs. The new business2 csv file is empty and the size is zero bytes.
if you JSON has only one row.. then try this
infile = open("business.json","r")
outfile = open("business2.csv","w")
data = json.load(infile)
infile.close()
out = csv.writer(outfile)
#print(data.keys())
out.writerow(data.keys())
out.writerow(data.values())
Hi Please try the below code, by using with command the file access will automatically get closed when the control moves out of scope of with
infile = open("business.json","r")
outfile = open("business2.csv","w")
data = json.load(infile)
infile.close()
headers = list(data.keys())
values = list(data.values())
with open("business2.csv","w") as outfile:
out = csv.writer(outfile)
out.writerow(headers)
out.writerow(values)
You need to use with to close file.
import json
import csv
infile = open("business.json","r")
data = json.load(infile)
infile.close()
with open("business2.csv","w") as outfile:
out = csv.writer(outfile)
out.writerow(list(data.keys()))
out.writerow(list(data.values()))

Python read in file: ERROR: line contains NULL byte

I would like to parse an .ubx File(=my input file). This file contains many different NMEA sentences as well as raw receiver data. The output file should just contain informations out of GGA sentences. This works fine as far as the .ubx File does not contain any raw messages. However if it contains raw data
I get the following error:
Traceback (most recent call last):
File "C:...myParser.py", line 25, in
for row in reader:
Error: line contains NULL byte
My code looks like this:
import csv
from datetime import datetime
import math
# adapt this to your file
INPUT_FILENAME = 'Rover.ubx'
OUTPUT_FILENAME = 'out2.csv'
# open the input file in read mode
with open(INPUT_FILENAME, 'r') as input_file:
# open the output file in write mode
with open(OUTPUT_FILENAME, 'wt') as output_file:
# create a csv reader object from the input file (nmea files are basically csv)
reader = csv.reader(input_file)
# create a csv writer object for the output file
writer = csv.writer(output_file, delimiter=',', lineterminator='\n')
# write the header line to the csv file
writer.writerow(['Time','Longitude','Latitude','Altitude','Quality','Number of Sat.','HDOP','Geoid seperation','diffAge'])
# iterate over all the rows in the nmea file
for row in reader:
if row[0].startswith('$GNGGA'):
time = row[1]
# merge the time and date columns into one Python datetime object (usually more convenient than having both separately)
date_and_time = datetime.strptime(time, '%H%M%S.%f')
date_and_time = date_and_time.strftime('%H:%M:%S.%f')[:-6] #
writer.writerow([date_and_time])
My .ubx file looks like this:
$GNGSA,A,3,16,25,29,20,31,26,05,21,,,,,1.30,0.70,1.10*10
$GNGSA,A,3,88,79,78,81,82,80,72,,,,,,1.30,0.70,1.10*16
$GPGSV,4,1,13,02,08,040,17,04,,,47,05,18,071,44,09,02,348,24*49
$GPGSV,4,2,13,12,03,118,24,16,12,298,36,20,15,118,30,21,44,179,51*74
$GPGSV,4,3,13,23,06,324,35,25,37,121,47,26,40,299,48,29,60,061,49*73
$GPGSV,4,4,13,31,52,239,51*42
$GLGSV,3,1,10,65,07,076,24,70,01,085,,71,04,342,34,72,13,029,35*64
$GLGSV,3,2,10,78,35,164,41,79,75,214,48,80,34,322,46,81,79,269,49*64
$GLGSV,3,3,10,82,28,235,52,88,39,043,43*6D
$GNGLL,4951.69412,N,00839.03672,E,124610.00,A,D*71
$GNGST,124610.00,12,,,,0.010,0.010,0.010*4B
$GNZDA,124610.00,03,07,2016,00,00*79
µb<  ¸½¸Abð½ . SB éF é v.¥ # 1 f =•Iè ,
Ïÿÿ£Ëÿÿd¡ ¬M 0+ùÿÿ³øÿÿµj #ª ² -K*
,¨ , éºJU /) ++ f 5 .lG NL C8G /{; „> é óK 3 — Bòl . "¿ 2 bm¡
4âH ÐM X cRˆ 35 »7 Óo‡ž "*ßÿÿØÜÿÿUhQ`
3ŒðÿÿÂïÿÿþþûù ÂÈÿÿñÅÿÿJX ES
$²I uM N:w (YÃÿÿV¿ÿÿ> =ìî 1¥éÿÿèÿÿmk³m /?ÔÿÿÒÿÿšz+Ú ­Ïÿÿ6ÍÿÿêwÇ\ ? ]? ˜B Aÿƒ y µbÐD‹lçtæ#p3,}ßœŒ-vAh
¿M"A‚UE ôû JQý
'wA´üát¸jžAÀ‚"Å
)DÂï–ŽtAöÙüñÅ›A|$Å ôû/ Ìcd§ÇørA†áãì˜AØY–Ä ôû1 /Áƒ´zsAc5+_’ô™AìéNÅ ôû( ¶y(,wvAFøÈV§ƒA˜ÝwE ôû$ _S R‰wAhÙ]‘ÑëžAÇ9Å vwAòܧsAŒöƒd§Ò™AÜOÄ ôû3 kœÕ}vA;D.ž‡žAÒûàÄ #ˆ" ϬŸ ntAfˆÞ3ךA~Y2E ôû3 :GVtAæ93l)ÆšAß yE ôû4 Uþy.TwA<âƒ' ¦žAhmëC ôû" ¯4Çï ›wAþ‰Ì½6ŸAŠû¶D ~~xI]tA<ÞÿrÁšAmHE ôû/ ÖÆ#ÈgŸsAXnþ‚†4šA'0tE ôû. ·ÈO:’
sA¢B†i™Aë%
E ôû/ >Þ,À8vA°‚9êœA>ÇD ôû, ø(¼+çŠuAÆOÁ לAÈΆD
ôû# ¨Ä-_c¯qAuÓ?]> —AÐкà ôû0 ÆUV¨ØZsA]ðÛñß™AÛ'Å ôû, ™mv7žqAYÐ:›Ä‘—AdWxD ôû1 ûö>%vA}„
ëV˜A.êbE
AÝ$GNRMC,124611.00,A,4951.69413,N,00839.03672,E,0.009,,030716,,,D*62
$GNVTG,,T,,M,0.009,N,0.016,K,D*36
$GNGNS,124611.00,4951.69413,N,00839.03672,E,RR,15,0.70,162.5,47.6,1.0,0000*42
$GNGGA,124611.00,4951.69413,N,00839.03672,E,4,12,0.70,162.5,M,47.6,M,1.0,0000*6A
$GNGSA,A,3,16,25,29,20,31,26,05,21,,,,,1.31,0.70,1.10*11
$GNGSA,A,3,88,79,78,81,82,80,72,,,,,,1.31,0.70,1.10*17
$GPGSV,4,1,13,02,08,040,18,04,,,47,05,18,071,44,09,02,348,21*43
$GPGSV,4,2,13,12,03,118,24,16,
I already searched for similar problems. However I was not able to find a solution which workes for me.
I ended up with code like that:
import csv
def unfussy_reader(csv_reader):
while True:
try:
yield next(csv_reader)
except csv.Error:
# log the problem or whatever
print("Problem with some row")
continue
if __name__ == '__main__':
#
# Generate malformed csv file for
# demonstration purposes
#
with open("temp.csv", "w") as fout:
fout.write("abc,def\nghi\x00,klm\n123,456")
#
# Open the malformed file for reading, fire up a
# conventional CSV reader over it, wrap that reader
# in our "unfussy" generator and enumerate over that
# generator.
#
with open("Rover.ubx") as fin:
reader = unfussy_reader(csv.reader(fin))
for n, row in enumerate(reader):
fout.write(row[0])
However I was not able to simply write a file containing just all the rows read in with the unfuss_reader wrapper using the above code.
Would be glad if you could help me.
Here is an Image of how the .ubx file looks in notepad++image
Thanks!
I am not quite sure but your file looks pretty binary. You should try to open it as such
with open(INPUT_FILENAME, 'rb') as input_file:
It seems like you did not open the file with correct coding format.
So the raw message cannot be read correctly.
If it is encoded as UTF8, you need to open the file with coding option:
with open(INPUT_FILENAME, 'r', newline='', encoding='utf8') as input_file
Hey if anyone else has this proglem to read in NMEA sentences of uBlox .ubx files
this pyhton code worked for me:
def read_in():
with open('GNGGA.txt', 'w') as GNGGA:
with open('GNRMC.txt','w') as GNRMC:
with open('rover.ubx', 'rb') as f:
for line in f:
#print line
if line.startswith('$GNGGA'):
#print line
GNGGA.write(line)
if line.startswith('$GNRMC'):
GNRMC.write(line)
read_in()
You could also use the gnssdump command line utility which is installed with the PyGPSClient and pygnssutils Python packages.
e.g.
gnssdump filename=Rover.ubx msgfilter=GNGGA
See gnssdump -h for help.
Alternatively if you want a simple Python script you could use the pyubx2 Python package, e.g.
from pyubx2 import UBXReader
with open("Rover.ubx", "rb") as stream:
ubr = UBXReader(stream)
for (_, parsed_data) in ubr.iterate():
if parsed_data.identity in ("GNGGA", "GNRMC"):
print(parsed_data)

How to write to a CSV line by line?

I have data which is being accessed via http request and is sent back by the server in a comma separated format, I have the following code :
site= 'www.example.com'
hdr = {'User-Agent': 'Mozilla/5.0'}
req = urllib2.Request(site,headers=hdr)
page = urllib2.urlopen(req)
soup = BeautifulSoup(page)
soup = soup.get_text()
text=str(soup)
The content of text is as follows:
april,2,5,7
may,3,5,8
june,4,7,3
july,5,6,9
How can I save this data into a CSV file.
I know I can do something along the lines of the following to iterate line by line:
import StringIO
s = StringIO.StringIO(text)
for line in s:
But i'm unsure how to now properly write each line to CSV
EDIT---> Thanks for the feedback as suggested the solution was rather simple and can be seen below.
Solution:
import StringIO
s = StringIO.StringIO(text)
with open('fileName.csv', 'w') as f:
for line in s:
f.write(line)
General way:
##text=List of strings to be written to file
with open('csvfile.csv','wb') as file:
for line in text:
file.write(line)
file.write('\n')
OR
Using CSV writer :
import csv
with open(<path to output_csv>, "wb") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
for line in data:
writer.writerow(line)
OR
Simplest way:
f = open('csvfile.csv','w')
f.write('hi there\n') #Give your csv text here.
## Python will convert \n to os.linesep
f.close()
You could just write to the file as you would write any normal file.
with open('csvfile.csv','wb') as file:
for l in text:
file.write(l)
file.write('\n')
If just in case, it is a list of lists, you could directly use built-in csv module
import csv
with open("csvfile.csv", "wb") as file:
writer = csv.writer(file)
writer.writerows(text)
I would simply write each line to a file, since it's already in a CSV format:
write_file = "output.csv"
with open(write_file, "wt", encoding="utf-8") as output:
for line in text:
output.write(line + '\n')
I can't recall how to write lines with line-breaks at the moment, though :p
Also, you might like to take a look at this answer about write(), writelines(), and '\n'.
To complement the previous answers, I whipped up a quick class to write to CSV files. It makes it easier to manage and close open files and achieve consistency and cleaner code if you have to deal with multiple files.
class CSVWriter():
filename = None
fp = None
writer = None
def __init__(self, filename):
self.filename = filename
self.fp = open(self.filename, 'w', encoding='utf8')
self.writer = csv.writer(self.fp, delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL, lineterminator='\n')
def close(self):
self.fp.close()
def write(self, elems):
self.writer.writerow(elems)
def size(self):
return os.path.getsize(self.filename)
def fname(self):
return self.filename
Example usage:
mycsv = CSVWriter('/tmp/test.csv')
mycsv.write((12,'green','apples'))
mycsv.write((7,'yellow','bananas'))
mycsv.close()
print("Written %d bytes to %s" % (mycsv.size(), mycsv.fname()))
Have fun
What about this:
with open("your_csv_file.csv", "w") as f:
f.write("\n".join(text))
str.join() Return a string which is the concatenation of the strings in iterable.
The separator between elements is
the string providing this method.
In my situation...
with open('UPRN.csv', 'w', newline='') as out_file:
writer = csv.writer(out_file)
writer.writerow(('Name', 'UPRN','ADMIN_AREA','TOWN','STREET','NAME_NUMBER'))
writer.writerows(lines)
you need to include the newline option in the open attribute and it will work
https://www.programiz.com/python-programming/writing-csv-files

Categories

Resources