Issue with reading from json file - python

I'm working with project and I'm trying to write from json file. Unfortunately I found a problem. When I tried to read it I dont' get everything the same as this in my file pliki.json
pliki.json:
[
{
"name": "",
"pesel": "",
"choosendir": "Amsterdam-Berlin",
"lot id": "1",
"bilet class": "Biznes ",
"bilet_price": " 68",
"seat": "5"
}
]
Function reading:
import csv
from lot import DatabaseofLoty, Lot
import json
from person import Person, Database
from ticket import Ticket
def read_info_aboutpeople(path):
with open(path, "r") as file_handle:
people = []
try:
rowing = json.load(file_handle)
for row in rowing:
name =row["name"],
print(name)
pesel = row["pesel"],
print(pesel)
choosendir = row["choosendir"]
print(choosendir)
lot_id = row["lot id"],
print(lot_id)
bilet_class = row["bilet class"],
bilet_price = row["bilet_price"],
seat = row["seat"]
ticket = Ticket(bilet_class,bilet_price)
person = Person(name, pesel,ticket, choosendir, lot_id, seat)
people.append(person)
database = Database(people)
return database
except Exception:
database = Database([])
print(read_info_aboutpeople("pliki.json"))
Outputs:
('',)
('',)
Amsterdam-Berlin
('1',)
Why this works like that? How to solve this

Actually json reading works, but look what exactly is happening here:
name = row["name"],
print(type(name)) # <class 'tuple'>
So in real you create one element tuple here with name as it's only element. That's why you see strange print results as most of your variables are just tuples (and that's why your code probably blows up when you use lot_id as you expect that as single str). You need to remove commas to make code work as you expect

Related

Import json with missing keys into postgres with Flask-Sqlalchemy

I have a question you guys might be able to answer.
I have a json file that looks something like this:
[
{
"address": "some address",
"full_time_school": false,
"name": "some name",
"official_id": "722154",
"school_type": "Grundschule",
"school_type_entity": "Grundschule",
"state": "BW"
},
{
"address": "some other address",
"name": "some other name",
"official_id": "722190",
"state": "BW"
}
]
The point is that not every entry has all keys.
I have a flask-sqlalchemy model that looks like this:
class School(db.Model):
__tablename__ = "school" # pragma: no cover
address = db.Column(db.String)
full_time_school = db.Column(db.Boolean)
name = db.Column(db.String)
official_id = db.Column(db.Integer)
school_type = db.Column(db.String)
school_type_entity = db.Column(db.String)
state = db.Column(db.String)
def __repr__(self):
return f"<name {self.name}"
And I have a python script to add the json entries into my postgresql database that looks like this:
from my_project import db
from my_project.models import School
import json
import os
# insert data
for filename in os.listdir("datamining"):
if filename.endswith(".json"):
file = open(os.path.join("datamining", filename))
print(f"Add schools from {filename.strip('.json')}")
data = json.load(file)
cleaned_data = {school["official_id"]: school for school in data}.values()
print(f"Adding {len(data)} schools to the database.")
for school in cleaned_data:
entry = School(
id=school["official_id"]
)
for key, value in school.items():
entry.key = value
db.session.add(entry)
db.session.commit()
file.close()
print("Added all schools!!!")
I don't know why but somehow every cell is NULL except the official_id field. How so and how can I fix that? I'm at the end of my wits right now. Every pointer or help is much appreciated.
EDIT:
What I found out so far is, that entry.key is not interpreted as entry.state for example, but actually creates a reference entry.key = "BW" for example. Why is that?
Your problem is
entry.key = value
You are just writing your values over and over into the attribute 'key' within your School model. I'm actually surprised SQLAlchemy doesn't raise some kind of error here...
Just pass all your values into the constructor and you should be fine:
school["id"] = school.pop("official_id")
entry = School(**school)
EDIT: It's "BW" because this happens to be the last value that is written into the attribute.
You can do this much easier and faster all in one go by executing this native parameterized query passing the text contents of the JSON file as parameter jsontext:
insert into school
select * from jsonb_populate_recordset(null::school, :jsontext::jsonb);

When trying to Join a CSV to many feature classes within a gdb. Error: AttributeError: 'str' object has no attribute 'get'

I am trying to join a csv to all feature classes within a gdb based on FILENAME which is the unique identifier in both csv and feature classes. Then I want to field calculate over the fields i need.
I am getting the following error. AttributeError: 'str' object has no attribute 'get'.
#Import modules
import arcpy, os, string, sys, csv, collections
#Set parameters
Working = address to gdb
Add_CSV = address to csv
arcpy.env.workspace = Working
fcList = arcpy.ListFeatureClasses()
#Join CSV to feature classes in gdb and populate fields
csv_values = {
row[3]: {"id": row[0], "Name": row[1], "Class": row[2]}
for row in arcpy.da.SearchCursor(Add_CSV, ["id", "Name", "Class", "FILENAME"])
}
for fc in fcList:
with arcpy.da.UpdateCursor(fc, ["FILENAME", "AssetID", "AssetName", "AssetClass"]) as u_cursor:
for filename, assetid, assetname, assetclass in u_cursor:
csv_record = Add_CSV.get(filename)
assert csv_record, "No value found for {} filename {} in CSV".format(fc, filename)
id = csv_record["id"]
name = csv_record["Name"]
category = csv_record["Class"]
u_cursor.updateRow([filename, id, name, class])
Instead of:
csv_record = Add_CSV.get(filename)
try:
csv_record = csv_values[filename]
You have already captured all the data from the CSV file (Add_CSV) and processed them into the dictionary csv_values, so matching them up to your feature class rows should be with that dictionary, not the file name.
(The error occurs because you're just working with the string file path, not with the CSV raw data!)

I get a warning when initializing a class that says 'parameter value is not used' in Python

I have the following code that has one class with 3 attributes, and two functions.
import json
class Song:
def __init__(self, name, artist, full_title=None):
self.name = name
self.artist = artist
self.full_title = self.name + ' ' + self.artist
def save_json(list_of_objects, json_file):
# json save
with open(json_file, 'w') as fp:
json.dump(list_of_objects, fp, indent=4)
def load_json(json_file):
# json load
with open(json_file, 'r') as fp:
loaded_data = json.load(fp)
return loaded_data
file = 'my_songs.json'
song1 = Song('Roar', 'Katy Perry')
song2 = Song('Hello', 'Adele')
song3 = Song('Grenade', 'Bruno Mars')
list_of_songs = [song1, song2, song3]
list_of_songs_dict = [vars(s) for s in list_of_songs]
save_json(list_of_songs_dict, file)
As you can see, the object is initialized with three arguments (name, artist, full_title)
The code creates a list of objects and dumps it in a json file named 'my_songs.json'.
The json file looks like this
[
{
"name": "Roar",
"artist": "Katy Perry",
"full_title": "Roar Katy Perry"
},
{
"name": "Hello",
"artist": "Adele",
"full_title": "Hello Adele"
},
{
"name": "Grenade",
"artist": "Bruno Mars",
"full_title": "Grenade Bruno Mars"
}
]
I would like to recreate the list named 'list_of_songs' from the json file (named 'my_songs.json').
This is the way I have done it:
loaded_songs = load_json(file)
list_of_songs_from_json = [Song(**o) for o in loaded_songs]
print(list_of_songs_from_json)
The IDE shows a warning that says 'parameter full_title value is not used'
If i remove 'full_title=None' from the class initialization I get the following error message:
TypeError: __init__() got an unexpected keyword argument 'full_title'
The reason why 'full_title=None' is in the class initialization is that in some cases, I want to define the object using 2 parameters, and in other cases (like when loading the data from the json file) I want to define the object using 3 parameters.
I suspect that there is a 'correct' or 'better' way of doing this, hence the warning. Any advice on how to remove the warning and achieve what the code already does?
Thank you.
you need to add the information when full_title is not None. warning is coming because
for self.full_title you are using self.a and self.b which make each time if full_title is not None, it's value is not used anywhere .
so some modification to your code, where it check if full_title is None or not.
class Song:
def __init__(self, name, artist, full_title=None):
self.name = name
self.artist = artist
if full_title is None:
self.full_title = self.name + ' ' + self.artist
else:
self.full_title = full_title

save two list in one json file

I'm getting data with two lists and I want to save both of them in one single json file can someone help me.
I'm using selenium
def get_name(self):
name = []
name = self.find_elements_by_class_name ('item-desc')
price = []
price = self.find_elements_by_class_name ('item-goodPrice')
for names in name :
names = (names.text)
#print names
for prices in price :
prices = (prices.text)
#print price
I would create a dictionary and then JSON dumps
An example could be:
import json
def get_name(self):
names = [ name.text for name in self.find_elements_by_class_name('item-desc') ]
prices = [ price.text for price in self.find_elements_by_class_name('item-goodPrice')]
with open('output-file-name.json', 'w') as f:
f.write(json.dumps({'names': names, 'prices': prices}))
EDIT: In the first version of the answer I was only creating the JSON, if you want to create a file as well, you should include what suggested by #Andersson comment

how to fetch data from google plus using api key in python?

**When i send a request like --
f = urllib.urlopen(https://www.googleapis.com/plus/v1/people/103777531434977807649/activities/public?key=*************** )
json=f.read()
print json
it returns some thing like this not the required json
{
"kind": "plus#activityFeed",
"etag": "\"seVFOlIgH91k2i-GrbizYfaw_AM/chWYjTdvKRLG9yxkeAfrCrofGHk\"",
"nextPageToken": "CAIQ__________9_IAAoAA",
"title": "Google+ List of Activities for Collection PUBLIC",
"items": []
}
what i have to do to get the right response????
this is the code:
import json
f = urllib.urlopen('https://www.googleapis.com/plus/v1/people/'+id+'/activities /public?key=*****************&maxResults=100')
s = f.read()
f.close()
ss=json.loads(s)
print ss
try:
nextpagetoken=str(ss['nextPageToken'])
i=0
str_current_datetime=str(datetime.now())
gp_crawldate=str_current_datetime.split(" ")[0]
gp_groupid=id
db = MySQLdb.connect("localhost","root","****","googleplus" )
cursor=db.cursor()
while i<len(ss['items']):
gp_kind=str(ss['items'][i]['kind'])
gp_title=str(ss['items'][i]['title'].encode('utf8'))
gp_published=str(ss['items'][i]['published'][0:10])
check=int(cool(str(ss['items'][i]['published'][0:19])))#this method is defined in the code
gp_activityid=str(ss['items'][i]['id'])
gp_actorid=str(ss['items'][i]['actor']['id'])
gp_verb=str(ss['items'][i]['verb'])
gp_objecttype=str(ss['items'][i]['object']['objectType'])
gp_originalcontent=str(ss['items'][i]['object']['content'].encode('utf8'))
gp_totalreplies=str(ss['items'][i]['object']['replies']['totalItems'])
gp_totalplusone=str(ss['items'][i]['object']['plusoners']['totalItems'])
gp_totalreshare=str(ss['items'][i]['object']['resharers']['totalItems'])
#gp_geocode=str(ss['items'][i]['geocode'])
#gp_placename=str(ss['items'][i]['placeName'])
i=i+1
is the any change in g+api???
The response you posted is a correct response. If the items field is an empty list, then the user that you are fetching the posts for has probably never posted anything publicly. In this case, I confirmed that the user has no public posts simply by visiting their profile.

Categories

Resources