i have a prblem with importing CSV-file into Database...
Im using SQLAlchemy in Python and wanted to open a CSV-File than show it in QTableWidget to maybe change the values and after write it to DB (New Table).
def setinTable(self):
colcnt = len(self.title)
rowcnt = len(self.data)
self.tabel_model = QtGui.QTableWidget(rowcnt, colcnt)
vheader = QtGui.QHeaderView(QtCore.Qt.Orientation.Vertical)
self.tabel_model.setVerticalHeader(vheader)
hheader = QtGui.QHeaderView(QtCore.Qt.Orientation.Horizontal)
self.tabel_model.setHorizontalHeader(hheader)
self.tabel_model.setHorizontalHeaderLabels(self.title)
for i in range(rowcnt):
for j in range(len(self.data[0])):
item = QtGui.QTableWidgetItem(str(self.data[i][j]))
self.tabel_model.setItem(i, j, item)
self.tabel_model.horizontalHeader().sectionDoubleClicked.connect(self.changeHorizontalHeader)
self.setCentralWidget(self.tabel_model)
Get CSV-Data
def getdata(filepath):
with open(filepath, 'r') as csvfile:
sample = csvfile.read(1024)
dialect = csv.Sniffer().sniff(sample, [';',',','|'])
csvfile.seek(0)
reader = csv.reader(csvfile,dialect=dialect)
header = next(reader)
lines = []
for line in reader:
lines.append(line)
return lines
Reading and showing the CSV-File data in a QTableWidget is working .. but i dont know how to save it to a MySQL Database
For an easier way to load a csv into a database table, check out the 'odo' python project - https://pypi.python.org/pypi/odo/0.3.2
--
To use a table via SQL Alchemy one approach is to use a session and call "update":
myRow = myTable(
column_a = 'foo',
column_b = 'bar')
myRow.column_c = 1 + 2
mySession.update(myRow)
Related
My task is to find IfcQuantityArea values of all IfcWall in the project.ifc and export those values to .csv with another attributes such as GlobalId and Name.
The question is how I can "define" the result from def function, so I could set is as variable or list, so I could insert it into a column in my new .csv file?
I tried several ways, but as I print it, it looks fine, but I have no idea how to collect this values to my .csv file. Maybe there is another approach to count the IfcWall areas using some api functions? Any ideas both to python and ifcopenshell environment?
import ifcopenshell
def print_quantities(property_definition):
if 'IfcElementQuantity' == property_definition.is_a():
for quantity in property_definition.Quantities:
if 'IfcQuantityArea' == quantity.is_a():
print('Area value: ' + str(quantity.AreaValue))
model = ifcopenshell.open('D:/.../project-modified.ifc')
products = model.by_type('IfcWall')
for product in products:
if product.IsDefinedBy:
definitions = product.IsDefinedBy
for definition in definitions:
if 'IfcRelDefinesByProperties' == definition.is_a():
property_definition = definition.RelatingPropertyDefinition
print_quantities(property_definition)
if 'IfcRelDefinesByType' == definition.is_a():
type = definition.RelatingType
if type.HasPropertySets:
for property_definition in type.HasPropertySets:
print_quantities(property_definition)
import csv
header = ['GlobalId', 'Name', 'TotalArea']
data = []
for wall in model.by_type('IfcWall'):
row = [wall.GlobalId, wall.Name, AreaValue]
data.append(row)
with open('D:/.../quantities.csv', 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(data)
#gets rid of spaces in existing csv headers
def getColumns(readCSV):
return [column.replace(' ','') for column in next(readCSV)]
#insert format used to avoid hard-coded headers in script
def insertData(tableName, columns, readCSV):
print("Inserting Data")
query = 'INSERT INTO {}({}) VALUES ({})'.format(
tableName,
','.join(columns),
','.join('?' * len(columns))
)
for data in readCSV:
cursor.execute(query, data)
con.commit()
def updateTable(csvPath, tableName):
print("Updating table...")
print("Reading file contents and uploading into db table...")
## insert timestamp column into existing csv. Does not incorporate header correctly for timestamp
rows = []
with open(csvPath, 'r', newline='') as csvFile:
readCSV = csv.reader(csvFile, delimiter=',')
for row in readCSV:
rows.append(row)
with open(csvPath, 'w', newline='')as writeFile:
file_write = csv.writer(writeFile)
for val in rows:
timestamp = datetime.now()
val.insert(0, timestamp)
file_write.writerow(val)
with open(csvPath) as csvFile:
readCSV = csv.reader(csvFile, delimiter=',')
columns = getColumns(readCSV)
insertData(tableName, columns, readCSV)
print ("Upload complete")
Above is a snippet of the code I'm working on. I am gathering data from a csv to insert into a SQL database. Currently, the csv does not have a timestamp column and without that, the import wont work as it sees duplicate data. I found a solution at https://www.geeksforgeeks.org/how-to-add-timestamp-to-csv-file-in-python/ for adding a timestamp column and have incorporated it into the code, but it does not add a header for the column. I'm sure it's an easy fix, but I am new to python and cant find the solution anywhere else. Also, if you see something inefficient with the coding in updateTable, let me know so that I can recognize it and learn a better way.
I ended up using a different method with import pandas as pd
def updateTable(csvPath, tableName):
print("Updating table...")
print("Reading file contents and uploading into db table...")
timestamp = datetime.now()
df = pd.read_csv(csvPath)
df.insert(0, 'timestamp',timestamp)
df.to_csv(csvPath, index=False)
with open(csvPath) as csvFile:
readCSV = csv.reader(csvFile, delimiter=',')
columns = getColumns(readCSV)
insertData(tableName, columns, readCSV)
print ("Upload complete")
I am trying to export data into a csv file using Python 3's csv writer. I need a way to automaticlly create a dictionary out of the object and its respective properties.
Here is my code:
#Import the appropriate models and functions needed for our script
from cbapi.response import *
import logging
import csv
#Connect to our CB Server
conn = CbResponseAPI()
#Sample Query
q = "ipAddress:000.00.0.0"
#Initial our query
process_query = conn.select(Process).where(q).group_by("id")
#Set your path
my_path='/Users/path/tt_123.csv'
#all object properties for event
objects=['childproc_count'
'cmdline',
'comms_ip',
'crossproc_count',
'filemod_count',
'filtering_known_dlls',
'group',
'host_type',
'id']
with open(my_path, 'w', newline='') as file:
header = objects #add column headers
writer = csv.DictWriter(file, fieldnames=header)
writer.writeheader()
for x in process_query:
p_1 = '{}'.format(x.id)
p_2 = '{}'.format(x.childproc_count)
p_3 = '{}'.format(x.cmdline)
p_4 = '{}'.format(x.comms_ip)
p_5 = '{}'.format(x.crossproc_count)
p_6 = '{}'.format(x.filemod_count)
p_7 = '{}'.format(x.filtering_known_dlls)
p_8 = '{}'.format(x.group)
p_9 = '{}'.format(x.host_type)
# Put them in a dictionary to write to csv file
dd={'id': p_1, 'child':p_2 , 'cmdline':p_3}
#Write rows to csv file
writer.writerow(dd)
It works this way but is there a way to automatically take all of the p_'s (p_1,p_2,etc.) into a dictionary like it shows in variable dd? I am new to python and anything would help.
So, essentially dd would be:
dd={'id': p_1, 'child':p_2 , 'cmdline':p_3, 'comms_ip':p_4, 'crossproc_count':p_5, 'filemod_count':p_6, 'filtering_known_dlls':p_7, 'group':p_8, 'host_type':p_9 }
Here's a little example that creates a couple objects with attributes, then queries the attributes to write to a file.
import csv
class MyObject:
def __init__(self,id,cmd,p1,p2,p3):
self.id = id
self.cmdline = cmd
self.param1 = p1
self.param2 = p2
self.param3 = p3
objects = [MyObject(111,'string','param 1','param 2',123),
MyObject(222,'string2','param 1','param 2',456)]
headers = 'id cmdline param1 param2 param3'.split()
with open('output.csv', 'w', newline='') as file:
writer = csv.DictWriter(file, fieldnames=headers)
writer.writeheader()
for obj in objects:
# "dictionary comprehension" to build the key/value pairs
row = {item:getattr(obj,item) for item in headers}
writer.writerow(row)
Output:
id,cmdline,param1,param2,param3
111,string,param 1,param 2,123
222,string2,param 1,param 2,456
I am trying to generate a csv file for each query output. I have multiple select queries (queries.sql) in a single SQL file and i am looping through it to execute in database and write each query output to its own csv file. When i execute the code all queries are executing in the database but only the last query result set is being written to csv file, rest all csv files are with no records. Any help is appreciated.
col_pattern = "(^|[_-])SSN#?($|[_-])|^SS#?$|(^|[_-])(SSN|SOC.*SEC.*).?(ID|NO|NUMBERS?|NUM|NBR|#)($|[_-])|^SOCIAL.?SEC(URITY)?#?$"
SQL = "select OWNER||'_'||TABLE_NAME||'_'||column_name from ALL_TAB_COLS where REGEXP_LIKE (column_name, :1) and owner NOT IN ('SYS','SYSMAN') order by table_name,column_name"
cursor.execute(SQL,(col_pattern,))
for row_data in cursor:
if not row_data[0].startswith('BIN$'):
fileName = row_data[0]
csv_file_dest = "/u01/exp/test/identity/csvdata/"+ fileName + ".csv"
outputFile = open(csv_file_dest,'w') # 'wb'
output = csv.writer(outputFile, dialect='excel')
f = open('/u01/exp/test/identity/queries.sql')
full_sql = f.read()
sql_commands = full_sql.replace('\n', "").split(';')[:-1]
#print(sql_commands)
for sql_command in sql_commands:
curs2 = cursor.execute(sql_command)
if printHeader: # add column headers if requested
cols = []
for col in curs2.description:
cols.append(col[0])
output.writerow(cols)
for row_data in curs2: # add table rows
output.writerow(row_data)
outputFile.close()
Looks like it is because all the SQL data is being written to whatever the last thing is that the variable "output" was set to. So the file that the variable "output" is set to is just being constantly overwritten.
I'm trying to export the attributes of multiple shapefiles all contained in one folder to a text file. I wrote the code below to do so but it is only exporting the file names to a text file. Any ideas on what I may be doing wrong? I've been troubleshooting and researching for a while.
import arcpy
from arcpy import env
arcpy.env.workspace = "C:\\user\\rainfiles"
table = "C:\\user\\rain_files"
outWorkspace = "C:\\user"
fclist = arcpy.ListFields(table)
field_names = [field.name for field in fclist]
for field in fclist:
with open(r'C:\\user\\Exports.txt', 'w') as f:
for field in fclist:
f.write(field + '\n')
with open(r'C:\\user\\Exports.txt', 'r') as f:
w = csv.writer(f)
w.writerow(field_names)
for row in arcpy.SearchCursor(table):
field_vals = [row.getValue(field.name) for field in fclist]
w.writerow(field_vals)
del row
Here's one way:
import arcpy
import csv
f = open(r'C:\\user\\Exports.txt', 'w')
w = csv.writer(f, lineterminator='\n')
arcpy.env.workspace = "C:\\user\\rainfiles"
shapefileList = arcpy.ListFeatureClasses("*.shp")
for table in shapefileList:
f.write("Shapefile:\n")
f.write(table + "\n")
fieldList = arcpy.ListFields(table)
field_names = [field.name for field in fieldList]
w.writerow(field_names)
for row in arcpy.SearchCursor(table):
field_vals = []
for field in fieldList:
val = row.getValue(field.name)
# See if it's a geometry field; if so, use WKT
try:
val = val.WKT
except AttributeError:
# It's not a geometry, and that's okay
pass
field_vals.append(val)
w.writerow(field_vals)