I'm trying to export the attributes of multiple shapefiles all contained in one folder to a text file. I wrote the code below to do so but it is only exporting the file names to a text file. Any ideas on what I may be doing wrong? I've been troubleshooting and researching for a while.
import arcpy
from arcpy import env
arcpy.env.workspace = "C:\\user\\rainfiles"
table = "C:\\user\\rain_files"
outWorkspace = "C:\\user"
fclist = arcpy.ListFields(table)
field_names = [field.name for field in fclist]
for field in fclist:
with open(r'C:\\user\\Exports.txt', 'w') as f:
for field in fclist:
f.write(field + '\n')
with open(r'C:\\user\\Exports.txt', 'r') as f:
w = csv.writer(f)
w.writerow(field_names)
for row in arcpy.SearchCursor(table):
field_vals = [row.getValue(field.name) for field in fclist]
w.writerow(field_vals)
del row
Here's one way:
import arcpy
import csv
f = open(r'C:\\user\\Exports.txt', 'w')
w = csv.writer(f, lineterminator='\n')
arcpy.env.workspace = "C:\\user\\rainfiles"
shapefileList = arcpy.ListFeatureClasses("*.shp")
for table in shapefileList:
f.write("Shapefile:\n")
f.write(table + "\n")
fieldList = arcpy.ListFields(table)
field_names = [field.name for field in fieldList]
w.writerow(field_names)
for row in arcpy.SearchCursor(table):
field_vals = []
for field in fieldList:
val = row.getValue(field.name)
# See if it's a geometry field; if so, use WKT
try:
val = val.WKT
except AttributeError:
# It's not a geometry, and that's okay
pass
field_vals.append(val)
w.writerow(field_vals)
Related
I am new to Python and trying to create a Polyline feature from Lat longs in a text file. Using answers to similar questions, below is the code I came up with. It doesn't throw an error but doesn't create polyline feature either. The LatLongs.txt contains three columns 1) unique identifier, 2) Latitude, 3) Longitude. Any help is greatly appreciated.
import arcpy
arcpy.env.overwriteOutput=1
array = arcpy.Array()
outFolder=r'C:\Temp'
fc = 'PolylineCSV.shp'
spatRef = arcpy.SpatialReference(4326)
arcpy.CreateFeatureclass_management(out_path=outFolder, out_name=fc,
geometry_type='POLYLINE',
spatial_reference=spatRef)
featureList = []
cursor = arcpy.InsertCursor("C:\\Temp\\PolylineCSV.shp")
feat = cursor.newRow()
coordinateList="C:\\Temp\\LatLongs.txt"
with open(coordinateList) as f:
for line in f:
SplitLine = line.split(',')
x = float(SplitLine[2]) #Get longitude
y = float(SplitLine[1]) #Get latitude
point = arcpy.Point(x,y)
array.add(point)
try:
nextline=next(f)
SplitnextLine = nextline.split(',')
x = float(SplitnextLine[2])
y = float(SplitnextLine[1])
point = arcpy.Point(x,y)
array.add(point)
polyline = arcpy.Polyline(array)
array.removeAll()
# Append to the list of Polyline objects
featureList.append(polyline)
feat.shape = polyline
# Insert the feature
cursor.insertRow(feat)
except StopIteration as e:
print("error handled")
del feat
del cursor
ArcGIS for Desktop
Your issue is most likely this line: feat.shape = polyline. feat has no shape property. You may use feat.setValue("shape", polyline). Here is an example based on your code (but without looping through a CSV file):
import os
import arcpy
output_folder = r"c:\temp"
output_file = "trips.shp"
sr = arcpy.SpatialReference(4326)
arcpy.CreateFeatureclass_management(
output_folder, output_file, "POLYLINE", spatial_reference=sr)
cursor = arcpy.InsertCursor(os.path.join(output_folder, output_file))
array = arcpy.Array()
array.add(arcpy.Point(8.541, 47.374))
array.add(arcpy.Point(-63.716, 44.867))
polyline = arcpy.Polyline(array, spatial_reference=sr)
feature = cursor.newRow()
# set the geometry
feature.setValue("shape", polyline)
cursor.insertRow(feature)
del cursor
It looks like you are using ArcGIS for Desktop. If you are using 10.1 or newer, then you should use arcpy.da.InsertCursor instead arcpy.InsertCursor.
[arcpy.InsertCursor] was superceded by arcpy.da.InsertCursor at ArcGIS 10.1. For faster performance, use arcpy.da.InsertCursor.
https://desktop.arcgis.com/en/arcmap/latest/analyze/python/data-access-using-cursors.htm
ArcGIS Pro
If I understand your code correctly, then every two points (lines) in your CSV file should be a line. Here are two working examples:
import os
import csv
import arcpy, arcpy.da, arcpy.management
Use arcpy.da.InsertCursor:
arcpy.env.overwriteOutput = True
output_folder = r"c:\temp"
output_file_name = "trips.shp"
input_file = r"d:\projects\playground\python\stackgis\data\trips.csv"
sr = arcpy.SpatialReference(4326)
output_feature_class = arcpy.management.CreateFeatureclass(
output_folder, output_file_name, geometry_type="POLYLINE", spatial_reference=sr)
with open(input_file) as csv_file, \
arcpy.da.InsertCursor(output_feature_class, ["SHAPE#"]) as cursor:
reader = csv.reader(csv_file)
next(reader) # skip header line
for row in reader:
first_point = arcpy.Point(row[2], row[3])
next_row = next(reader, None)
if next_row:
second_point = arcpy.Point(next_row[2], next_row[3])
line = arcpy.Polyline(
arcpy.Array([first_point, second_point]), spatial_reference=sr)
cursor.insertRow([line])
Or, use arcpy.management.CopyFeatures:
arcpy.env.overwriteOutput = True
input_file = r"d:\projects\playground\python\stackgis\data\trips.csv"
output_file = r"c:\temp\trips.shp"
features = []
with open(input_file) as csv_file:
reader = csv.reader(csv_file)
next(reader) # skip header line
for row in reader:
first_point = arcpy.Point(row[2], row[3])
next_row = next(reader, None)
if next_row:
second_point = arcpy.Point(next_row[2], next_row[3])
line = arcpy.Polyline(
arcpy.Array([first_point, second_point]), spatial_reference=sr)
features.append(line)
arcpy.management.CopyFeatures(features, output_file)
My task is to find IfcQuantityArea values of all IfcWall in the project.ifc and export those values to .csv with another attributes such as GlobalId and Name.
The question is how I can "define" the result from def function, so I could set is as variable or list, so I could insert it into a column in my new .csv file?
I tried several ways, but as I print it, it looks fine, but I have no idea how to collect this values to my .csv file. Maybe there is another approach to count the IfcWall areas using some api functions? Any ideas both to python and ifcopenshell environment?
import ifcopenshell
def print_quantities(property_definition):
if 'IfcElementQuantity' == property_definition.is_a():
for quantity in property_definition.Quantities:
if 'IfcQuantityArea' == quantity.is_a():
print('Area value: ' + str(quantity.AreaValue))
model = ifcopenshell.open('D:/.../project-modified.ifc')
products = model.by_type('IfcWall')
for product in products:
if product.IsDefinedBy:
definitions = product.IsDefinedBy
for definition in definitions:
if 'IfcRelDefinesByProperties' == definition.is_a():
property_definition = definition.RelatingPropertyDefinition
print_quantities(property_definition)
if 'IfcRelDefinesByType' == definition.is_a():
type = definition.RelatingType
if type.HasPropertySets:
for property_definition in type.HasPropertySets:
print_quantities(property_definition)
import csv
header = ['GlobalId', 'Name', 'TotalArea']
data = []
for wall in model.by_type('IfcWall'):
row = [wall.GlobalId, wall.Name, AreaValue]
data.append(row)
with open('D:/.../quantities.csv', 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(data)
I am trying to export data into a csv file using Python 3's csv writer. I need a way to automaticlly create a dictionary out of the object and its respective properties.
Here is my code:
#Import the appropriate models and functions needed for our script
from cbapi.response import *
import logging
import csv
#Connect to our CB Server
conn = CbResponseAPI()
#Sample Query
q = "ipAddress:000.00.0.0"
#Initial our query
process_query = conn.select(Process).where(q).group_by("id")
#Set your path
my_path='/Users/path/tt_123.csv'
#all object properties for event
objects=['childproc_count'
'cmdline',
'comms_ip',
'crossproc_count',
'filemod_count',
'filtering_known_dlls',
'group',
'host_type',
'id']
with open(my_path, 'w', newline='') as file:
header = objects #add column headers
writer = csv.DictWriter(file, fieldnames=header)
writer.writeheader()
for x in process_query:
p_1 = '{}'.format(x.id)
p_2 = '{}'.format(x.childproc_count)
p_3 = '{}'.format(x.cmdline)
p_4 = '{}'.format(x.comms_ip)
p_5 = '{}'.format(x.crossproc_count)
p_6 = '{}'.format(x.filemod_count)
p_7 = '{}'.format(x.filtering_known_dlls)
p_8 = '{}'.format(x.group)
p_9 = '{}'.format(x.host_type)
# Put them in a dictionary to write to csv file
dd={'id': p_1, 'child':p_2 , 'cmdline':p_3}
#Write rows to csv file
writer.writerow(dd)
It works this way but is there a way to automatically take all of the p_'s (p_1,p_2,etc.) into a dictionary like it shows in variable dd? I am new to python and anything would help.
So, essentially dd would be:
dd={'id': p_1, 'child':p_2 , 'cmdline':p_3, 'comms_ip':p_4, 'crossproc_count':p_5, 'filemod_count':p_6, 'filtering_known_dlls':p_7, 'group':p_8, 'host_type':p_9 }
Here's a little example that creates a couple objects with attributes, then queries the attributes to write to a file.
import csv
class MyObject:
def __init__(self,id,cmd,p1,p2,p3):
self.id = id
self.cmdline = cmd
self.param1 = p1
self.param2 = p2
self.param3 = p3
objects = [MyObject(111,'string','param 1','param 2',123),
MyObject(222,'string2','param 1','param 2',456)]
headers = 'id cmdline param1 param2 param3'.split()
with open('output.csv', 'w', newline='') as file:
writer = csv.DictWriter(file, fieldnames=headers)
writer.writeheader()
for obj in objects:
# "dictionary comprehension" to build the key/value pairs
row = {item:getattr(obj,item) for item in headers}
writer.writerow(row)
Output:
id,cmdline,param1,param2,param3
111,string,param 1,param 2,123
222,string2,param 1,param 2,456
I am trying to extract values from json ld to csv as they are in the file. There are a couple of issues I am facing.
1. The values being read for different fields are getting truncated in most of the cases. In the remaining cases the value of some other field is appearing in some other field.
2. I am also getting an error - 'Additional data' after some 4,000 lines.
The file is quite big(half a gb). I am attaching a shortened version of my code. Please tell me where am I going wrong.
The input file - I have shortened it and kept it here. There was no way of putting it here.
https://github.com/Architsi/json-ld-issue
I tried writing this script and I tried multiple online converters too
import csv, sys, math, operator, re, os, json, ijson
from pprint import pprint
filelist = []
for file in os.listdir("."):
if file.endswith(".json"):
filelist.append(file)
for input in filelist:
newCsv = []
splitlist = input.split(".")
output = splitlist[0] + '.csv'
newFile = open(output, 'w', newline='') #wb for windows, else you'll see newlines added to csv
# initialize csv writer
writer = csv.writer(newFile)
#Name of the columns
header_row = ('Format', 'Description', 'Object', 'DataProvider')
writer.writerow(header_row)
with open(input, encoding="utf8") as json_file:
data = ijson.items(json_file, 'item')
#passing all the values through try except
for s in data:
source = s['_source']
try:
source_resource = source['sourceResource']
except:
print ("Warning: No source resource in record ID: " + id)
try:
data_provider = source['dataProvider'].encode()
except:
data_provider = "N/A"
try:
_object = source['object'].encode()
except:
_object = "N/A"
try:
descriptions = source_resource['description']
string = ""
for item in descriptions:
if len(descriptions) > 1:
description = item.encode() #+ " | "
else:
description = item.encode()
string = string + description
description = string.encode()
except:
description = "N/A"
created = ""
#writing it to csv
write_tuple = ('format', description, _object, data_provider)
writer.writerow(write_tuple)
print ("File written to " + output)
newFile.close()
The error that I am getting is this- raise common.JSONError('Additional Data')
Expected result is a csv file with all the columns and correct values
i want to write the result of for loop which is PMID = Id of litrature ,Date = date of publication ,title = title of article,Abstract = abtract of artilce in csv file but it is saving only one element of the output no all
import numpy as np
from Bio import Entrez
from Bio import Medline
import csv
import pandas as pd
Entrez.email = "shayezkarimcide#gmail.com"
handle = Entrez.esearch(db="pmc",
term = "Antimicrobial resistance Drug Resistance",
rettype = "medline",retmode = "txt",
retmax= "200",sort = "pub date")
result = Entrez.read(handle)
handle.close()
Id = result ['IdList']
print (Id)
handle2 = Entrez.efetch(db="pmc",
id=Id, rettype="medline",
retmode="text")
records = Medline.parse(handle2)
header = ['ID','Date','Title','Abstract']
for result in records :
PMID = result['PMID']
Abstract = result['AB']
title = result['TI']
Date = result['DP']
print (PMID,Date,title,Abstract)
fields = [PMID, title,Date,Abstract]
rows = [PMID,Date,title,Abstract]
with open ('/home/shayez/Desktop/karim.csv','wt') as csvfile:
writer = csv.writer(csvfile, delimiter ="\t" )
writer.writerow(header)
writer.writerow(rows)
handle2.close()
You are opening the file, writing and closing it inside the loop (the with makes sure the file is closed after the with's scope is done) so it is replacing the previous file for each element in the loop.
Try opening the file only once, before the loop:
with open ('/home/shayez/Desktop/karim.csv','wt') as csvfile:
writer = csv.writer(csvfile, delimiter ="\t" )
writer.writerow(header)
for result in records :
PMID = result['PMID']
Abstract = result['AB']
title = result['TI']
Date = result['DP']
print (PMID,Date,title,Abstract)
fields = [PMID, title,Date,Abstract]
rows = [PMID,Date,title,Abstract]
writer.writerow(rows)