My task is to find IfcQuantityArea values of all IfcWall in the project.ifc and export those values to .csv with another attributes such as GlobalId and Name.
The question is how I can "define" the result from def function, so I could set is as variable or list, so I could insert it into a column in my new .csv file?
I tried several ways, but as I print it, it looks fine, but I have no idea how to collect this values to my .csv file. Maybe there is another approach to count the IfcWall areas using some api functions? Any ideas both to python and ifcopenshell environment?
import ifcopenshell
def print_quantities(property_definition):
if 'IfcElementQuantity' == property_definition.is_a():
for quantity in property_definition.Quantities:
if 'IfcQuantityArea' == quantity.is_a():
print('Area value: ' + str(quantity.AreaValue))
model = ifcopenshell.open('D:/.../project-modified.ifc')
products = model.by_type('IfcWall')
for product in products:
if product.IsDefinedBy:
definitions = product.IsDefinedBy
for definition in definitions:
if 'IfcRelDefinesByProperties' == definition.is_a():
property_definition = definition.RelatingPropertyDefinition
print_quantities(property_definition)
if 'IfcRelDefinesByType' == definition.is_a():
type = definition.RelatingType
if type.HasPropertySets:
for property_definition in type.HasPropertySets:
print_quantities(property_definition)
import csv
header = ['GlobalId', 'Name', 'TotalArea']
data = []
for wall in model.by_type('IfcWall'):
row = [wall.GlobalId, wall.Name, AreaValue]
data.append(row)
with open('D:/.../quantities.csv', 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(data)
Related
I am trying to export data into a csv file using Python 3's csv writer. I need a way to automaticlly create a dictionary out of the object and its respective properties.
Here is my code:
#Import the appropriate models and functions needed for our script
from cbapi.response import *
import logging
import csv
#Connect to our CB Server
conn = CbResponseAPI()
#Sample Query
q = "ipAddress:000.00.0.0"
#Initial our query
process_query = conn.select(Process).where(q).group_by("id")
#Set your path
my_path='/Users/path/tt_123.csv'
#all object properties for event
objects=['childproc_count'
'cmdline',
'comms_ip',
'crossproc_count',
'filemod_count',
'filtering_known_dlls',
'group',
'host_type',
'id']
with open(my_path, 'w', newline='') as file:
header = objects #add column headers
writer = csv.DictWriter(file, fieldnames=header)
writer.writeheader()
for x in process_query:
p_1 = '{}'.format(x.id)
p_2 = '{}'.format(x.childproc_count)
p_3 = '{}'.format(x.cmdline)
p_4 = '{}'.format(x.comms_ip)
p_5 = '{}'.format(x.crossproc_count)
p_6 = '{}'.format(x.filemod_count)
p_7 = '{}'.format(x.filtering_known_dlls)
p_8 = '{}'.format(x.group)
p_9 = '{}'.format(x.host_type)
# Put them in a dictionary to write to csv file
dd={'id': p_1, 'child':p_2 , 'cmdline':p_3}
#Write rows to csv file
writer.writerow(dd)
It works this way but is there a way to automatically take all of the p_'s (p_1,p_2,etc.) into a dictionary like it shows in variable dd? I am new to python and anything would help.
So, essentially dd would be:
dd={'id': p_1, 'child':p_2 , 'cmdline':p_3, 'comms_ip':p_4, 'crossproc_count':p_5, 'filemod_count':p_6, 'filtering_known_dlls':p_7, 'group':p_8, 'host_type':p_9 }
Here's a little example that creates a couple objects with attributes, then queries the attributes to write to a file.
import csv
class MyObject:
def __init__(self,id,cmd,p1,p2,p3):
self.id = id
self.cmdline = cmd
self.param1 = p1
self.param2 = p2
self.param3 = p3
objects = [MyObject(111,'string','param 1','param 2',123),
MyObject(222,'string2','param 1','param 2',456)]
headers = 'id cmdline param1 param2 param3'.split()
with open('output.csv', 'w', newline='') as file:
writer = csv.DictWriter(file, fieldnames=headers)
writer.writeheader()
for obj in objects:
# "dictionary comprehension" to build the key/value pairs
row = {item:getattr(obj,item) for item in headers}
writer.writerow(row)
Output:
id,cmdline,param1,param2,param3
111,string,param 1,param 2,123
222,string2,param 1,param 2,456
Here's my code:
def update_win():
#GET EVERY STRING VALUES NEEDED FROM INPUTBAR
stud_ID = str(ID_num.get())
stud_name = str(name.get())
stud_course = str(Crs.get())
stud_year = str(Yr.get())
searchID = str(sID_num.get())#ID NUMBER FOR SEARCH
filename = str(files.get())#FILENAME
tempfile = NamedTemporaryFile(mode='w', delete=False)
fields = ['ID', 'Name', 'Course', 'Year']
with open(filename, 'r') as csvfile, tempfile:
reader = csv.DictReader(csvfile, fieldnames=fields)
writer = csv.DictWriter(tempfile, fieldnames=fields)
for row in reader:
if row['ID'] == searchID:
row['Name'], row['Course'], row['Year'] = stud_name, stud_course, stud_year
msg = Label(upd_win, text="Update Successful", font="fixedsys 12 bold").place(x=3,y=200)
row = {'ID': row['ID'], 'Name': row['Name'], 'Course': row['Course'], 'Year': row['Year']}
writer.writerow(row)
shutil.move(tempfile.name, filename)
So this code is an UPDATE, it searches that ID number from the CSV, and shows its rows via a GUI as you can see its not print but Label, after that it prompts the user to enter the new ID number, new name, new course, and new year, you want to replace to the row you have selected.
It does get through, but the value doesn't change. Any ideas what happened here and how I fix it?
Sometimes you need to isolate your problem by writing separate scripts to test your processes. This exercise is the first step to creating a Minimal, Complete, and Verifiable example for posting here. Oftentimes, creating an mcve helps you find the problem even before you post the question.
Here is a script to test whether the read-modify-write process works:
from tempfile import NamedTemporaryFile
import io
s = '''one, two, three
1,2,3
4,5,6
7,8,9
10,11,12
'''
data_csv = io.StringIO(s)
g = NamedTemporaryFile(mode = 'w', delete = False)
fields = ['a', 'b', 'c']
# read-modify-write
with data_csv as f, g:
w = csv.DictWriter(g, fields)
w.writeheader()
r = csv.DictReader(f, fields)
for line in r:
line['b'] = 'bar'
w.writerow(line)
# test - did the modifications get written to a temp file?
with open(g.name) as f:
print(f.read())
Which does seem to be working, the tempfile has modified data in it.
Maybe HOW you modified the data is the problem - but changing the test script to match the form of your code also works fine
...
for line in r:
line['a'], line['b'], line['c'] = line['a'], 'foo', line['c']
line = {'a':line['a'], 'b':line['b'], 'c':line['c']}
w.writerow(line)
Assuming all the .get()'s in the first lines of the function are working, filename in the line
shutil.move(tempfile.name, filename)
must not have the correct path.
OR the conditional
if row['ID'] == searchID:
isn't working.
Food for thought:
Moving code into functions, like the read-modify-write portion, can not only help with readability, it can make testing easier.
update_win() works by using a side effect (shutil.move(tempfile.name, filename)) instead of returning something that can be acted on. Side effects can make testing harder.
That isn't necessarily bad (sometimes it is practical), you just need to be aware that you are doing it.
You have opened both files for 'r' read and you are trying to write to temp file.
You can use 'r+' mode with one file handle in order to read and write to a file
...
with open(filename, 'r+') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=fields)
writer = csv.DictWriter(csvfile, fieldnames=fields)
...
I am crawling data from Wikipedia and it works so far. I can display it on the terminal, but I can't write it the way I need it into a csv file :-/
The code is pretty long, but I paste it here anyway and hope that somebody can help me.
import csv
import requests
from bs4 import BeautifulSoup
def spider():
url = 'https://de.wikipedia.org/wiki/Liste_der_Gro%C3%9F-_und_Mittelst%C3%A4dte_in_Deutschland'
code = requests.get(url).text # Read source code and make unicode
soup = BeautifulSoup(code, "lxml") # create BS object
table = soup.find(text="Rang").find_parent("table")
for row in table.find_all("tr")[1:]:
partial_url = row.find_all('a')[0].attrs['href']
full_url = "https://de.wikipedia.org" + partial_url
get_single_item_data(full_url) # goes into the individual sites
def get_single_item_data(item_url):
page = requests.get(item_url).text # Read source code & format with .text to unicode
soup = BeautifulSoup(page, "lxml") # create BS object
def getInfoBoxBasisDaten(s):
return str(s) == 'Basisdaten' and s.parent.name == 'th'
basisdaten = soup.find_all(string=getInfoBoxBasisDaten)[0]
basisdaten_list = ['Bundesland', 'Regierungsbezirk:', 'Höhe:', 'Fläche:', 'Einwohner:', 'Bevölkerungsdichte:',
'Postleitzahl', 'Vorwahl:', 'Kfz-Kennzeichen:', 'Gemeindeschlüssel:', 'Stadtgliederung:',
'Adresse', 'Anschrift', 'Webpräsenz:', 'Website:', 'Bürgermeister', 'Bürgermeisterin',
'Oberbürgermeister', 'Oberbürgermeisterin']
with open('staedte.csv', 'w', newline='', encoding='utf-8') as csvfile:
fieldnames = ['Bundesland', 'Regierungsbezirk:', 'Höhe:', 'Fläche:', 'Einwohner:', 'Bevölkerungsdichte:',
'Postleitzahl', 'Vorwahl:', 'Kfz-Kennzeichen:', 'Gemeindeschlüssel:', 'Stadtgliederung:',
'Adresse', 'Anschrift', 'Webpräsenz:', 'Website:', 'Bürgermeister', 'Bürgermeisterin',
'Oberbürgermeister', 'Oberbürgermeisterin']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL, extrasaction='ignore')
writer.writeheader()
for i in basisdaten_list:
wanted = i
current = basisdaten.parent.parent.nextSibling
while True:
if not current.name:
current = current.nextSibling
continue
if wanted in current.text:
items = current.findAll('td')
print(BeautifulSoup.get_text(items[0]))
print(BeautifulSoup.get_text(items[1]))
writer.writerow({i: BeautifulSoup.get_text(items[1])})
if '<th ' in str(current): break
current = current.nextSibling
print(spider())
The output is incorrect in 2 ways. The cells are their right places and only one city is written, all others are missing. It looks like this:
But it should look like this + all other cities in it:
'... only one city is written ...': You call get_single_item_data for each city. Then inside this function you open the output file with the same name, in the statement with open('staedte.csv', 'w', newline='', encoding='utf-8') as csvfile: which will overwrite the output file each time you call the function.
Each variable is written to a new row: In the statement writer.writerow({i: BeautifulSoup.get_text(items[1])}) you write the value for one variable to a row. What you need to do instead is to make a dictionary for values before you start looking for page values. As you accumulate the values from the page you shove them into the dictionary by field name. Then after you have found all of the values available you call writer.writerow.
I wanted to edit a csv file which reads the value from one of my another json file in python 2.7
my csv is : a.csv
a,b,c,d
,10,12,14
,11,14,15
my json file is a.json
{"a":20}
i want my where the column 'a' will try to match in json file. if their is a match. it should copy that value from json and paste it to my csv file and the final output of my csv file should be looks like this.
a,b,c,d
20,10,12,14
20,11,14,15
Till now I what I have tried is
fileCSV = open('a.csv', 'a')
fileJSON = open('a.json', 'r')
jsonData = fileJSON.json()
for k in range(jsonData):
for i in csvRow:
for j in jsonData.keys():
if i == j:
if self.count == 0:
self.data = jsonData[j]
self.count = 1
else:
self.data = self.data + "," + jsonData[j]
self.count = 0
fileCSV.write(self.data)
fileCSV.write("\n")
k += 1
fileCSV.close()
print("File created successfully")
I will be really thankful if anyone can help me for this.
please ignore any syntactical and indentation error.
Thank You.
Some basic string parsing will get you here.. I wrote a script which works for the simple scenario which you refer to.
check if this solves your problem:
import json
from collections import OrderedDict
def list_to_csv(listdat):
csv = ""
for val in listdat:
csv = csv+","+str(val)
return csv[1:]
lines = []
csvfile = "csvfile.csv"
outcsvfile = "outcsvfile.csv"
jsonfile = "jsonfile.json"
with open(csvfile, encoding='UTF-8') as a_file:
for line in a_file:
lines.append(line.strip())
columns = lines[0].split(",")
data = lines[1:]
whole_data = []
for row in data:
fields = row.split(",")
i = 0
rowData = OrderedDict()
for column in columns:
rowData[columns[i]] = fields[i]
i += 1
whole_data.append(rowData)
with open(jsonfile) as json_file:
jsondata = json.load(json_file)
keys = list(jsondata.keys())
for key in keys:
value = jsondata[key]
for each_row in whole_data:
each_row[key] = value
with open(outcsvfile, mode='w', encoding='UTF-8') as b_file:
b_file.write(list_to_csv(columns)+'\n')
for row_data in whole_data:
row_list = []
for ecolumn in columns:
row_list.append(row_data.get(ecolumn))
b_file.write(list_to_csv(row_list)+'\n')
CSV output is not written to the source file but to a different file.
The output file is also always truncated and written, hence the 'w' mode.
I would recommend using csv.DictReader and csv.DictWriter classes which will read into and out of python dicts. This would make it easier to modify the dict values that you read in from the JSON file.
I have a csv file with several hundred organism IDs and a second csv file with several thousand organism IDs and additional characteristics (taxonomic information, abundances per sample, etc)
I am trying to write a code that will extract the information from the larger csv using the smaller csv file as a reference. Meaning it will look at both smaller and larger files, and if the IDs are in both files, it will extract all the information form the larger file and write that in a new file (basically write the entire row for that ID).
so far I have written the following, and while the code does not error out on me, I get a blank file in the end and I don't exactly know why. I am a graduate student that knows some simple coding but I'm still very much a novice,
thank you
import sys
import csv
import os.path
SparCCnames=open(sys.argv[1],"rU")
OTU_table=open(sys.argv[2],"rU")
new_file=open(sys.argv[3],"w")
Sparcc_OTUs=csv.writer(new_file)
d=csv.DictReader(SparCCnames)
ids=csv.DictReader(OTU_table)
for record in ids:
idstopull=record["OTUid"]
if idstopull[0]=="OTUid":
continue
if idstopull[0] in d:
new_id.writerow[idstopull[0]]
SparCCnames.close()
OTU_table.close()
new_file.close()
I'm not sure what you're trying to do in your code but you can try this:
def csv_to_dict(csv_file_path):
csv_file = open(csv_file_path, 'rb')
csv_file.seek(0)
sniffdialect = csv.Sniffer().sniff(csv_file.read(10000), delimiters='\t,;')
csv_file.seek(0)
dict_reader = csv.DictReader(csv_file, dialect=sniffdialect)
csv_file.seek(0)
dict_data = []
for record in dict_reader:
dict_data.append(record)
csv_file.close()
return dict_data
def dict_to_csv(csv_file_path, dict_data):
csv_file = open(csv_file_path, 'wb')
writer = csv.writer(csv_file, dialect='excel')
headers = dict_data[0].keys()
writer.writerow(headers)
# headers must be the same with dat.keys()
for dat in dict_data:
line = []
for field in headers:
line.append(dat[field])
writer.writerow(line)
csv_file.close()
if __name__ == "__main__":
big_csv = csv_to_dict('/path/to/big_csv_file.csv')
small_csv = csv_to_dict('/path/to/small_csv_file.csv')
output = []
for s in small_csv:
for b in big_csv:
if s['id'] == b['id']:
output.append(b)
if output:
dict_to_csv('/path/to/output.csv', output)
else:
print "Nothing."
Hope that will help.
You need to read the data into a data structure, assuming OTUid is unique you can store this into a dictionary for fast lookup:
with open(sys.argv[1],"rU") as SparCCnames:
d = csv.DictReader(SparCCnames)
fieldnames = d.fieldnames
data = {i['OTUid']: i for i in d}
with open(sys.argv[2],"rU") as OTU_table, open(sys.argv[3],"w") as new_file:
Sparcc_OTUs = csv.DictWriter(new_file, fieldnames)
ids = csv.DictReader(OTU_table)
for record in ids:
if record['OTUid'] in data:
Sparcc_OTUs.writerow(data[record['OTUid']])
Thank you everyone for your help. I played with things and consulted with an advisor, and finally got a working script. I am posting it in case it helps someone else in the future.
Thanks!
import sys
import csv
input_file = csv.DictReader(open(sys.argv[1], "rU")) #has all info
ref_list = csv.DictReader(open(sys.argv[2], "rU")) #reference list
output_file = csv.DictWriter(
open(sys.argv[3], "w"), input_file.fieldnames) #to write output file with headers
output_file.writeheader() #write headers in output file
white_list={} #create empty dictionary
for record in ref_list: #for every line in my reference list
white_list[record["Sample_ID"]] = None #store into the dictionary the ID's as keys
for record in input_file: #for every line in my input file
record_id = record["Sample_ID"] #store ID's into variable record_id
if (record_id in white_list): #if the ID is in the reference list
output_file.writerow(record) #write the entire row into a new file
else: #if it is not in my reference list
continue #ignore it and continue iterating through the file