The purpose of the code is to make a PDF map book that displays all of the large lakes in North America. I'm trying to run this code to make a map book but it gives me a blank PDF. How can I fix this?
## Import arcpy module
import arcpy
import math
import os
from arcpy import env
arcpy.env.overwriteOutput = True
# Define inputs and outputs - Script arguments
arcpy.env.workspace = r"F:\Geog173\Lab7\Lab7_Data"
Lakes = "NA_Big_Lakes.shp"
Cities = "NA_Cities.shp"
NA = "North_America.shp"
##Python arguments
## Arguments = NA_Big_Lakes.shp NA_Cities.shp New_Lakes.shp Center_Lakes.shp
Lakes= 'NA_Big_Lakes.shp'
NA = 'North_America.shp'
Cities = 'NA_Cities.shp'
##New_Lakes = 'New_Lakes.shp'
##Center_Lakes = 'Center_Lakes.shp'
# Identify the geometry field
desc = arcpy.Describe(Lakes)
shapeName = desc.ShapeFieldName
# Identify the geometry field in Cities shapefile
##desc = arcpy.Describe(Cities)
##shapefieldnameCity = desc.ShapeFieldName
#Get lake cursor
inrows = arcpy.SearchCursor(Lakes)
# Set up variables for output path and PDF file name
outDir = r"F:\Geog173\Lab7\Lab7_Data"
finalMapPDF_filename = outDir + r"\NA_Big_Lake_Mapbook.pdf"
# Check whether the mapbook PDF exists. If it does, delete it.
if os.path.exists(finalMapPDF_filename):
os.remove(finalMapPDF_filename)
# Create map book PDF
finalMapPDF = arcpy.mapping.PDFDocumentCreate(finalMapPDF_filename)
# Create MapDocument object pointing to specified mxd
mxd = arcpy.mapping.MapDocument(outDir + r"\OriginalMap.mxd")
# Get dataframe
df = arcpy.mapping.ListDataFrames(mxd)[0]
# ----------------------------------------------------------------------------#
# Start appending pages. Title page first.
# ----------------------------------------------------------------------------#
# Find text element with value "test", and replace it with other value
mapText = "A Map Book for North American Large Lakes " + '\n\r' + "Kishore, A., Geog173, Geography, UCLA" + '\n\r' + " Lake number: 18" + '\n\r' + " Total area: 362117 km2" + '\n\r' + " Mean area: 20118 km2"
print mapText
for elm in arcpy.mapping.ListLayoutElements(mxd, "TEXT_ELEMENT"):
if elm.text == "test":
elm.text = mapText
arcpy.RefreshTOC()
arcpy.RefreshActiveView()
#df.extent = feature.extent
arcpy.mapping.ExportToPDF(mxd, outDir + r"\TempMapPages.pdf")
# Append multi-page PDF to finalMapPDF
finalMapPDF.appendPages(outDir + r"\TempMapPages.pdf")
#initialize text value, so it can be reused in next iteration
for elm in arcpy.mapping.ListLayoutElements(mxd, "TEXT_ELEMENT"):
if elm.text == mapText:
elm.text = "test"
# ----------------------------------------------------------------------------#
# Loop through each lake
# ----------------------------------------------------------------------------#
# Loop through each row/feature
lakecount = 0
for row in inrows:
lakecount = lakecount + 1
CITY_NAME = ""
CNTRY_NAME = ""
ADMIN_NAME = ""
POP_CLASS = ""
DISTANCE = 0
XY = ""
#print "shapeName" , shapeName
# Create the geometry object
feature = row.getValue(shapeName)
mapText = "Lake FID: " + str(row.FID) + ", Area (km2): " + str(row.Area_km2)
print mapText
# Find text element with value "test", and replace it with other value
for elm in arcpy.mapping.ListLayoutElements(mxd, "TEXT_ELEMENT"):
if elm.text == "test":
elm.text = mapText
arcpy.RefreshTOC()
arcpy.RefreshActiveView()
df.extent = feature.extent
arcpy.mapping.ExportToPDF(mxd, outDir + r"\TempMapPages.pdf")
# Append multi-page PDF to finalMapPDF
finalMapPDF.appendPages(outDir + r"\TempMapPages.pdf")
# Set up properties for Adobe Reader and save PDF.
finalMapPDF.updateDocProperties(pdf_open_view = "USE_THUMBS",
pdf_layout = "SINGLE_PAGE")
finalMapPDF.saveAndClose()
# Done. Clean up and let user know the process has finished.
del row, inrows
del mxd, finalMapPDF
print "Map book for lakes in North America is complete!"
First off you should remove the last lines of your code where you delete the mxd. Run the code again and inspect the MXD. Are the data layers drawing properly? I recommend having code that completely works before performing file cleanup so you can identify potential errors.
Related
Currently I am using the following code to define and replace
Placeholder (Text data) in existing Powerpoint presentations.
current_dir = os.path.dirname(os.path.realpath(__file__))
prs = Presentation(current_dir + '/test2.pptx')
slides = prs.slides
title_slide_layout = prs.slide_layouts[0]
slide = slides[0]
for shape in slide.placeholders:
print('%d %s' % (shape.placeholder_format.idx, shape.name))
title = slide.shapes.title
subtitle1 = slide.shapes.placeholders[0]
subtitle2 = slide.shapes.placeholders[10]
subtitle10 = slide.shapes.placeholders[11]
subtitle11 = slide.shapes.placeholders[12]
subtitle1.text = "1"
subtitle2.text = "2"
subtitle10.text = "3"
subtitle11.text = "4"
slide2 = slides[1]
for shape in slide2.placeholders:
print('%d %s' % (shape.placeholder_format.idx, shape.name))
subtitle3 = slide2.shapes.placeholders[10]
subtitle4 = slide2.shapes.placeholders[11]
subtitle5 = slide2.shapes.placeholders[12]
subtitle6 = slide2.shapes.placeholders[13]
subtitle12 = slide2.shapes.placeholders[16]
companydate = slide2.shapes.placeholders[14]
subtitle3.text = "1"
subtitle4.text = "2"
subtitle5.text = "3"
subtitle6.text = "4"
subtitle12.text = "40%"
companydate.text = "Insert company"
slide3 = slides[2]
for shape in slide3.placeholders:
print('%d %s' % (shape.placeholder_format.idx, shape.name))
subtitle7 = slide3.shapes.placeholders[10]
subtitle8 = slide3.shapes.placeholders[11]
subtitle9 = slide3.shapes.placeholders[12]
subtitle13 = slide3.shapes.placeholders[16]
companydate2 = slide3.shapes.placeholders[14]
subtitle7.text = "1"
subtitle8.text = "2"
subtitle9.text = "3"
subtitle13.text = "5x"
companydate2.text = "Insert Company"
slide4 = slides[3]
# for shape in slide4.placeholders:
#print('%d %s' % (shape.placeholder_format.idx, shape.name))
companydate3 = slide4.shapes.placeholders[14]
companydate3.text = "Insert Company"
"'Adapting Charts'"
from pptx.chart.data import ChartData
from pptx.enum.chart import XL_CHART_TYPE
from pptx.util import Pt
"Adapting Chart 1"
prs1 = Presentation(current_dir + '/output4.pptx')
slides1 = prs1.slides
chart1 = prs1.slides[0].chart
However, I am also running analytics in the background and I was wondering if it is possible to recognize (define) charts in the same presentation along with extracting and replacing the data in those charts. These chards are not embedded in the template.
As plotting charts with plotly or mathplotlib does not render a compliant image I am not able to use these , unless fully modified into the following format:Graph budget Click Correl
If yes, would it be possible to give concrete coding examples?
Thanks in advance!
Yes, it's possible to do that. The documentation will be your best source.
This will find the chart shapes:
for shape in slide.shapes:
if shape.has_chart:
chart = shape.chart
print('found a chart')
Data is extracted from the chart series(es):
for series in chart.series:
for value in series.values:
print(value)
Data is replaced by creating a new ChartData object and calling .replace_data() on the chart using that chart data object:
chart_data = ChartData(...)
... # add categories, series with values, etc.
chart.replace_data(chart_data)
http://python-pptx.readthedocs.io/en/latest/api/chart.html#pptx.chart.chart.Chart.replace_data
Adding to the answer by #scanny above, this worked for me:
if shape.name == 'Chart1':
chart = shape.chart
print(shape.name)
for series in chart.plots:
print(list(series.categories))
cat = list(series.categories)
for series in chart.series:
ser = series.values
print(series.values)
try:
# ---define new chart data---
chart_data = CategoryChartData()
chart_data.categories = cat
chart_data.add_series('category', df['column'])
# ---replace chart data---
chart.replace_data(chart_data)
except KeyError:
continue
Using the code above, you can print the categories and the series values, then replace them with your new values (while keeping category the same).
I added the KeyError exception because without it, you get a "rId3" error. From the forums it seems like there is some XML writing issue in writing to PPTX.
Trying to use the latitude and longitude that is returned by geopy to create a shapefile. The shapefile creator part works line if I give it a set of numbers (44.977753, -93.2650108) but it will not work with the returned data lat_long. My thought is that it needs a "," but I dont know.
from geopy.geocoders import GoogleV3
import csv
import ogr, os
def geopy():
loc = raw_input("What location? ")
geolocator = GoogleV3()
location = geolocator.geocode(loc, exactly_one=True)
if location != None:
Address = location.address
lat_long = location.latitude, location.longitude
latitude = str(location.latitude)
longitude = str(location.longitude)
print Address, latitude, longitude
print""
else:
print "There is no geographic information to return for the word in input. \n"
# Input data
pointCoord = lat_long
fieldName = 'test'
fieldType = ogr.OFTString
fieldValue = 'test'
outSHPfn = "output file"
# create the spatial reference, WGS84
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
# Create the output shapefile
shpDriver = ogr.GetDriverByName("ESRI Shapefile")
if os.path.exists(outSHPfn):
shpDriver.DeleteDataSource(outSHPfn)
outDataSource = shpDriver.CreateDataSource(outSHPfn)
outLayer = outDataSource.CreateLayer(outSHPfn, srs, geom_type = ogr.wkbPoint )
#create point geometry
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(pointCoord[0],pointCoord[1])
# create a field
idField = ogr.FieldDefn(fieldName, fieldType)
outLayer.CreateField(idField)
# Create the feature and set values
featureDefn = outLayer.GetLayerDefn()
outFeature = ogr.Feature(featureDefn)
outFeature.SetGeometry(point)
outFeature.SetField(fieldName, fieldValue)
outLayer.CreateFeature(outFeature)
geopy()
Need to add a loop to put the latitude and longitude in a list. This code will create a point shapefile of any location you give it.
from geopy.geocoders import GoogleV3
import csv
import ogr, os
def geopy(location):
"""This function takes the word given about
and uses GoogleV3 to search for a location. If a
location is found it then returns the Address, latitude and longitude.
It then prints that information to a .CSV"""
geolocator = GoogleV3()
loc_input = raw_input("Add the location you would like data back for: ")
location = geolocator.geocode(loc_input, exactly_one=True)
if location != None:
Address = location.address
lat_lon = location.latitude, location.longitude
latitude = str(location.latitude)
longitude = str(location.longitude)
print Address, latitude, longitude
print""
#Converts lat_long to a list for use in making the shapefile.
list_lat = []
for i in range(1):
list_lat.append(lat_lon)
for list_of_lat_lon in list_lat:
print""
#Calls list_of_lat_lon for the shapefile function
shapefile(list_of_lat_lon)
# If there is no location data to return it prints the below line and does not create a shapefile
else:
print "There is no geographic information to return for the word in input. \n"
def shapefile(list_of_lat_lon):
"""This function uses the GDAL to return a ESRi shapefile
it uses the latitude and longitude in the list_of_lat_lon list.
"""
# Input data
pointCoord = list_of_lat_lon
fieldName = 'Lat'
fieldType = ogr.OFTString
fieldValue = 'test'
outSHPfn = "Input file location"
# create the spatial reference, WGS84
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
# Create the output shapefile
shpDriver = ogr.GetDriverByName("ESRI Shapefile")
if os.path.exists(outSHPfn):
shpDriver.DeleteDataSource(outSHPfn)
outDataSource = shpDriver.CreateDataSource(outSHPfn)
outLayer = outDataSource.CreateLayer(outSHPfn, srs, geom_type = ogr.wkbPoint )
#create point geometry longitude first then latitude
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(pointCoord[1],pointCoord[0])
# create a field
idField = ogr.FieldDefn(fieldName, fieldType)
outLayer.CreateField(idField)
# Create the feature and set values
featureDefn = outLayer.GetLayerDefn()
outFeature = ogr.Feature(featureDefn)
outFeature.SetGeometry(point)
outFeature.SetField(fieldName, fieldValue)
outLayer.CreateFeature(outFeature)
geopy(location)
I'm writing a Python script that will color in various areas of my city's Census Block Groups (of which there are 18) different colors according to their respective median household incomes on a map that's in the SVG format.
Sounds simple enough, right? Well, I can't figure out how, though I'm making slight progress. What I've tried so far is making a list of each of the block group paths according to how the SVG references them, making a list of the median household incomes, then passing in the code that colors them. However, this just.. doesn't seem to be working, for whatever reason. Can any of you wonderful people help figure out where I'm misfiring?
import csv
from bs4 import BeautifulSoup
icbg = []
reader = csv.reader(open('censusdata.csv'),delimiter=",")
#read and get income
for row in reader:
income = row[6]
income = int(income)
icbg.append(income)
svg = open('NM2.svg','r')
soup = BeautifulSoup(svg,"lxml")
#find CBGs and incomes
path1 = soup.find('path')
path2 = path1.find_next('path')
path3 = path2.find_next('path')
path4 = path3.find_next('path')
path5 = path4.find_next('path')
path6 = path5.find_next('path')
path7 = path6.find_next('path')
path8 = path7.find_next('path')
path9 = path8.find_next('path')
path10 = path9.find_next('path')
path11 = path10.find_next('path')
path12 = path11.find_next('path')
path13 = path12.find_next('path')
path14 = path13.find_next('path')
path15 = path14.find_next('path')
path16 = path15.find_next('path')
path17 = path16.find_next('path')
path18 = path17.find_next('path')
incomep1 = icbg[0]
incomep2 = icbg[1]
incomep3 = icbg[2]
incomep4 = icbg[3]
incomep5 = icbg[4]
incomep6 = icbg[5]
incomep7 = icbg[6]
incomep8 = icbg[7]
incomep9 = icbg[8]
incomep10 = icbg[9]
incomep11 = icbg[10]
incomep12 = icbg[11]
incomep13 = icbg[12]
incomep14 = icbg[13]
incomep15 = icbg[14]
incomep16 = icbg[15]
incomep17 = icbg[16]
incomep18 = icbg[17]
paths = (path1, path2, path3, path4, path5, path6, path7, path8, path9, path10,
path11, path12, path13, path14, path15, path16, path17, path18)
incomes = (incomep1,incomep2,incomep3,incomep4,incomep5,incomep6,incomep7,incomep8,
incomep9,incomep10,incomep11,incomep12,incomep13,incomep14,incomep15,incomep16,incomep17,incomep18)
#set colors
colors = ['fee5d9','fcae91','fb6a4a','de2d26','a50f15']
for p in paths:
for i in range(0,17):
it = incomes[i]
if it > 20000:
color_class = 2
elif it > 25000:
color_class = 1
elif it > 30000:
color_class = 3
elif it > 35000:
color_class = 4
color = colors[color_class]
path_style = "font-size:12px;fill:#%s;fill-rule:nonzero;stroke:#000000;stroke-opacity:1;stroke-width:0.1;stroke-miterlimit:4;stroke-dasharray:none;stroke-linecap:butt;marker-start:none;stroke-linejoin:bevel" % color
p['style'] = path_style
print(soup.prettify())
Running this gives me an SVG file like so: fill:#fb6a4a;fill-rule:nonzero;stroke:#000000;stroke-opacity:1;stroke-width:0.1;stroke-miterlimit:4;stroke-dasharray:none;stroke-linecap:butt;marker-start:none;stroke-linejoin:bevel"> comes up 18 times, meaning for every available path, even though these paths have different incomes.
could the problem be with the way I wrote my comparisons?
From my understanding of what you are trying to do, your problem is that you have 2 for loops instead of one. You should loop through the paths and incomes at the same time. The way you are doing it now is you are looping through all the incomes for each path. The following code simply moves the paths into the same loop as the income so they are looped through at the same time.
for i in range(0,17):
it = incomes[i]
p = paths[i]
if it > 20000:
color_class = 2
elif it > 25000:
color_class = 1
elif it > 30000:
color_class = 3
elif it > 35000:
color_class = 4
color = colors[color_class]
path_style = "font-size:12px;fill:#%s;fill-rule:nonzero;stroke:#000000;stroke-opacity:1;stroke-width:0.1;stroke-miterlimit:4;stroke-dasharray:none;stroke-linecap:butt;marker-start:none;stroke-linejoin:bevel" % color
p['style'] = path_style
I'm using a script I found online to convert some files through parsing some XML. The script was built in Python 2.6 and it's using a module that I believe doesn't come with 2.6 through what I've read on the web. I'm wondering if there's a work around. The error I am getting is:
No Module name EXT
In the following script, I think it's getting hung up on import xml.dom.ext and it only seems to use this object at the very end to PrettyPrint (See the very last Try statement) I'm wondering if there's a workaround for this in 2.6? I can't seem to find a module that contains the EXT object which I can import.
The script is:
from xml.dom.minidom import Document
import xml.dom.ext
import string
import os
import arcpy
#Read input parameters from GP dialog
output = arcpy.GetParameterAsText(0)
#Create an output qgs file
f = open(output, "w")
# Create the minidom
doc = Document()
# Create the <qgis> base element
qgis = doc.createElement("qgis")
qgis.setAttribute("projectname", " ")
qgis.setAttribute("version", "1.6.0-Capiapo")
doc.appendChild(qgis)
# Create the <title> element
title = doc.createElement("title")
qgis.appendChild(title)
# Assign current document
mxd = arcpy.mapping.MapDocument("CURRENT")
print 'Converting mxd........'
# Dataframe elements
df = arcpy.mapping.ListDataFrames(mxd)[0]
unit = doc.createTextNode(df.mapUnits)
xmin1 = doc.createTextNode(str(df.extent.XMin))
ymin1 = doc.createTextNode(str(df.extent.YMin))
xmax1 = doc.createTextNode(str(df.extent.XMax))
ymax1 = doc.createTextNode(str(df.extent.YMax))
# srsid = doc.createTextNode
srid1 = doc.createTextNode(str(df.spatialReference.factoryCode))
srid2 = doc.createTextNode(str(df.spatialReference.factoryCode))
epsg1 = doc.createTextNode(str(df.spatialReference.factoryCode))
epsg2 = doc.createTextNode(str(df.spatialReference.factoryCode))
description1 = doc.createTextNode(str(df.spatialReference.name))
description2 = doc.createTextNode(str(df.spatialReference.name))
ellipsoidacronym1 = doc.createTextNode(str(df.spatialReference.name))
ellipsoidacronym2 = doc.createTextNode(str(df.spatialReference.name))
geographicflag1 = doc.createTextNode("true")
geographicflag2 = doc.createTextNode("true")
authid2 = doc.createTextNode("EPSG:"+str(df.spatialReference.factoryCode))
authid3 = doc.createTextNode("EPSG:"+str(df.spatialReference.factoryCode))
# Layerlist elements
lyrlist = arcpy.mapping.ListLayers(df)
count1 = str(len(lyrlist))
# mapcanvas
def map_canvas():
# Create the <mapcanvas> element
mapcanvas = doc.createElement("mapcanvas")
qgis.appendChild(mapcanvas)
# Create the <units> element
units = doc.createElement("units")
units.appendChild(unit)
mapcanvas.appendChild(units)
# Create the <extent> element
extent = doc.createElement("extent")
mapcanvas.appendChild(extent)
# Create the <xmin> element
xmin = doc.createElement("xmin")
xmin.appendChild(xmin1)
extent.appendChild(xmin)
# Create the <ymin> element
ymin = doc.createElement("ymin")
ymin.appendChild(ymin1)
extent.appendChild(ymin)
# Create the <xmax> element
xmax = doc.createElement("xmax")
xmax.appendChild(xmax1)
extent.appendChild(xmax)
# Create the <ymax> element
ymax = doc.createElement("ymax")
ymax.appendChild(ymax1)
extent.appendChild(ymax)
# Create the <projections> element
projections = doc.createElement("projections")
mapcanvas.appendChild(projections)
# Create the <destinationsrs> element
destinationsrs = doc.createElement("destinationsrs")
mapcanvas.appendChild(destinationsrs)
# Create the <spatialrefsys> element
spatialrefsys = doc.createElement("spatialrefsys")
destinationsrs.appendChild(spatialrefsys)
# Create the <proj4> element
proj4 = doc.createElement("proj4")
spatialrefsys.appendChild(proj4)
# Create the <srsid> element
srsid = doc.createElement("srsid")
spatialrefsys.appendChild(srsid)
# Create the <srid> element
srid = doc.createElement("srid")
srid.appendChild(srid1)
spatialrefsys.appendChild(srid)
# Create the <authid> element
authid = doc.createElement("authid")
authid.appendChild(authid2)
spatialrefsys.appendChild(authid)
# Create the <description> element
description = doc.createElement("description")
description.appendChild(description1)
spatialrefsys.appendChild(description)
# Create the <projectionacronym> element
projectionacronym = doc.createElement("projectionacronym")
spatialrefsys.appendChild(projectionacronym)
# Create the <ellipsoidacronym element
ellipsoidacronym = doc.createElement("ellipsoidacronym")
ellipsoidacronym.appendChild(ellipsoidacronym1)
spatialrefsys.appendChild(ellipsoidacronym)
# Create the <geographicflag> element
geographicflag = doc.createElement("geographicflag")
geographicflag.appendChild(geographicflag1)
spatialrefsys.appendChild(geographicflag)
# Legend
def legend_func():
# Create the <legend> element
legend = doc.createElement("legend")
qgis.appendChild(legend)
for lyr in lyrlist:
if(lyr.isGroupLayer == False):
# Create the <legendlayer> element
legendlayer = doc.createElement("legendlayer")
legendlayer.setAttribute("open", "true")
legendlayer.setAttribute("checked", "Qt::Checked")
legendlayer.setAttribute("name",str(lyr.name))
legend.appendChild(legendlayer)
# Create the <filegroup> element
filegroup = doc.createElement("filegroup")
filegroup.setAttribute("open", "true")
filegroup.setAttribute("hidden", "false")
legendlayer.appendChild(filegroup)
# Create the <legendlayerfile> element
legendlayerfile = doc.createElement("legendlayerfile")
legendlayerfile.setAttribute("isInOverview", "0")
legendlayerfile.setAttribute("layerid", str(lyr.name)+str(20110427170816078))
legendlayerfile.setAttribute("visible", "1")
filegroup.appendChild(legendlayerfile)
# Project Layers
def project_layers():
# Create the <projectlayers> element
projectlayers = doc.createElement("projectlayers")
projectlayers.setAttribute("layercount", count1)
qgis.appendChild(projectlayers)
for lyr in lyrlist:
if(lyr.isGroupLayer == False and lyr.isRasterLayer == False):
geometry1 = arcpy.Describe(lyr)
geometry2 = str(geometry1.shapeType)
ds = doc.createTextNode(str(lyr.dataSource))
name1 = doc.createTextNode(str(lyr.name)+str(20110427170816078))
name2 = doc.createTextNode(str(lyr.name))
# Create the <maplayer> element
maplayer = doc.createElement("maplayer")
maplayer.setAttribute("minimumScale", "0")
maplayer.setAttribute("maximumScale", "1e+08")
maplayer.setAttribute("minLabelScale", "0")
maplayer.setAttribute("maxLabelScale", "1e+08")
maplayer.setAttribute("geometry", geometry2)
if(lyr.isRasterLayer == True):
maplayer.setAttribute("type", "raster")
else:
maplayer.setAttribute("type", "vector")
maplayer.setAttribute("hasScaleBasedVisibilityFlag", "0")
maplayer.setAttribute("scaleBasedLabelVisibilityFlag", "0")
projectlayers.appendChild(maplayer)
# Create the <id> element
id = doc.createElement("id")
id.appendChild(name1)
maplayer.appendChild(id)
# Create the <datasource> element
datasource = doc.createElement("datasource")
datasource.appendChild(ds)
maplayer.appendChild(datasource)
# Create the <layername> element
layername = doc.createElement("layername")
layername.appendChild(name2)
maplayer.appendChild(layername)
# Create the <srs> element
srs = doc.createElement("srs")
maplayer.appendChild(srs)
# Create the <spatialrefsys> element
spatialrefsys = doc.createElement("spatialrefsys")
srs.appendChild(spatialrefsys)
# Create the <proj4> element
proj4 = doc.createElement("proj4")
spatialrefsys.appendChild(proj4)
# Create the <srsid> element
srsid = doc.createElement("srsid")
spatialrefsys.appendChild(srsid)
# Create the <srid> element
srid = doc.createElement("srid")
srid.appendChild(srid2)
spatialrefsys.appendChild(srid)
# Create the <authid> element
authid = doc.createElement("authid")
authid.appendChild(authid3)
spatialrefsys.appendChild(authid)
# Create the <description> element
description = doc.createElement("description")
description.appendChild(description2)
spatialrefsys.appendChild(description)
# Create the <projectionacronym> element
projectionacronym = doc.createElement("projectionacronym")
spatialrefsys.appendChild(projectionacronym)
# Create the <ellipsoidacronym element
ellipsoidacronym = doc.createElement("ellipsoidacronym")
ellipsoidacronym.appendChild(ellipsoidacronym2)
spatialrefsys.appendChild(ellipsoidacronym)
# Create the <geographicflag> element
geographicflag = doc.createElement("geographicflag")
geographicflag.appendChild(geographicflag2)
spatialrefsys.appendChild(geographicflag)
# Create the <transparencyLevelInt> element
transparencyLevelInt = doc.createElement("transparencyLevelInt")
transparency2 = doc.createTextNode("255")
transparencyLevelInt.appendChild(transparency2)
maplayer.appendChild(transparencyLevelInt)
# Create the <customproperties> element
customproperties = doc.createElement("customproperties")
maplayer.appendChild(customproperties)
# Create the <provider> element
provider = doc.createElement("provider")
provider.setAttribute("encoding", "System")
ogr = doc.createTextNode("ogr")
provider.appendChild(ogr)
maplayer.appendChild(provider)
# Create the <singlesymbol> element
singlesymbol = doc.createElement("singlesymbol")
maplayer.appendChild(singlesymbol)
# Create the <symbol> element
symbol = doc.createElement("symbol")
singlesymbol.appendChild(symbol)
# Create the <lowervalue> element
lowervalue = doc.createElement("lowervalue")
symbol.appendChild(lowervalue)
# Create the <uppervalue> element
uppervalue = doc.createElement("uppervalue")
symbol.appendChild(uppervalue)
# Create the <label> element
label = doc.createElement("label")
symbol.appendChild(label)
# Create the <rotationclassificationfieldname> element
rotationclassificationfieldname = doc.createElement("rotationclassificationfieldname")
symbol.appendChild(rotationclassificationfieldname)
# Create the <scaleclassificationfieldname> element
scaleclassificationfieldname = doc.createElement("scaleclassificationfieldname")
symbol.appendChild(scaleclassificationfieldname)
# Create the <symbolfieldname> element
symbolfieldname = doc.createElement("symbolfieldname")
symbol.appendChild(symbolfieldname)
# Create the <outlinecolor> element
outlinecolor = doc.createElement("outlinecolor")
outlinecolor.setAttribute("red", "88")
outlinecolor.setAttribute("blue", "99")
outlinecolor.setAttribute("green", "37")
symbol.appendChild(outlinecolor)
# Create the <outlinestyle> element
outlinestyle = doc.createElement("outlinestyle")
outline = doc.createTextNode("SolidLine")
outlinestyle.appendChild(outline)
symbol.appendChild(outlinestyle)
# Create the <outlinewidth> element
outlinewidth = doc.createElement("outlinewidth")
width = doc.createTextNode("0.26")
outlinewidth.appendChild(width)
symbol.appendChild(outlinewidth)
# Create the <fillcolor> element
fillcolor = doc.createElement("fillcolor")
fillcolor.setAttribute("red", "90")
fillcolor.setAttribute("blue", "210")
fillcolor.setAttribute("green", "229")
symbol.appendChild(fillcolor)
# Create the <fillpattern> element
fillpattern = doc.createElement("fillpattern")
fill = doc.createTextNode("SolidPattern")
fillpattern.appendChild(fill)
symbol.appendChild(fillpattern)
# Create the <texturepath> element
texturepath = doc.createElement("texturepath")
texturepath.setAttribute("null", "1")
symbol.appendChild(texturepath)
map_canvas()
legend_func()
project_layers()
# Write to qgis file
try:
xml.dom.ext.PrettyPrint(doc, f)
finally:
f.close()
print 'Done'
The xml.dom.ext module was never added to the Python standard library.
It was only ever part of the PyXML distribution, but that has not seen any updates in years and I doubt it'll still work on Python 2.6.
Instead, just call the minidom .toprettyxml() method on your document to pretty print the output, then write that data out to the file:
f.write(doc.toprettyxml())
I am new to python and trying to get Kauffman's NK model work in it...I found the code online and was hoping I could make some changes over time but I am not able to run the code...It is giving error on line 31...'f = open(options.in_filenames)'....I am sure I am missing something really small....any help would be appreciated...
import csv
from numpy import *
import Gnuplot
import time
from optparse import OptionParser
from pylab import *
# set up and read command line options
parser = OptionParser()
parser.add_option("-f", "--file", dest="in_filenames",
help="read data from FILE - enclose comma-separated file list in quotes e.g. \"FILE1, FILE2\"", metavar="FILE")
(options, args) = parser.parse_args()
# set up constants
# column titles
columnvar_titles = (["A", "N", "K"])
series_titles = (["Average Fitness", "Maximum Fitness","Minimum Fitness",
"Average Wait Before Move","Maximum Wait Before Move",
"Minimum Wait Before Move","Average Number of Fitter Neighbours",
"Maximum Number of Fitter Neighbours",
"Minimum Number of Fitter Neighbours"])
f = open(options.in_filenames)
reader = csv.reader(f)
floats = []
options_dict={}
# start from the first line in the file
# read lines until we hit a blank
# lines will be in form "Key: Value"
# so split them and build a dictionary
while (1):
readstring = reader.next()
if len(readstring)==0:
break
dict_entry = readstring[0].split(': ', 1)
options_dict[dict_entry[0]] = dict_entry[1]
#print readstring
#print reader.line_num
#print options_dict
#print len(options_dict)
#print options_dict['Fitness_method']
# after the model parameters, we have blank line(s) before the data headers
# keep skipping blanks, then grab the first non-blank line
# then read the first line into a list of strings.
while (1):
readstring = reader.next()
if len(readstring) > 0:
column_headers = readstring;
break
#print column_headers
# need to check if we have a 'run' column
# single run gui output doesn't produce one, so need to add to column headers
# First six cols are "run, tick, A_size_of, RNGseed, [N & K]_size_of"
# "tick is already contained in the data, but is overwritten with K_size_of
# as it needs to be moved
# set add_run_data - flag to insert corresponding columns into the numeric data
add_run_data = 0
if column_headers[0]!="run":
column_headers[0] = "K_size_of"
column_headers = ["run", "tick", "A_size_of", "RngSeed", "N_size_of"] + column_headers
add_run_data = 1
print "Processing one run GUI output format..."
else:
print "Processing batch mode output..."
#print column_headers
# read lines from the data until we hit a blank.
# if data is numeric, put it into out 2d list of floats
countlines=0
while (1):
try:
readstring=reader.next()
countlines = countlines + 1
if len(readstring)==0:
print "Stopped reading"
break
try:
floats.append(map(float, readstring))
except:
print "Bad data - not adding"
except StopIteration:
#print "Read:", countlines
break
print "Read", countlines, "lines of data from file"
#print floats
xdata=array(floats)
#print xdata
# if we needed to add column headers before, we are dealing with single run
# output. If so, we need to add columns at the left of the data.
# First six cols are "run, tick, A_size_of, RNGseed, [N & K]_size_of"
# We add five (not six) cols, since "tick" is currently in the data already
# Other values are run = 1 (by def), RNGSeed (doesn't matter)
# A, N, K values are taken from the dictionary made from the header data in the file
if add_run_data == 1:
newcol = ones((xdata.shape[0],5), dtype="float")
xdata = concatenate((newcol, xdata), axis=1)
A_val = float(options_dict['A_size_of'])
N_val = float(options_dict['N_size_of'])
K_val = float(options_dict['K_size_of'])
#print A_val, N_val, K_val
xdata[:,1] = xdata[:,5] # ticks - already there but needs to move
xdata[:,2] = A_val
xdata[:,3] = 0 #RNG seed doesn't matter
xdata[:,4] = N_val
xdata[:,5] = K_val #overwrites original tick column
#print column_headers
#print xdata[1]
A_uniques = unique(xdata[:,2])
N_uniques = unique(xdata[:,4])
K_uniques = unique(xdata[:,5])
series_total = len(A_uniques) * len(N_uniques) * len(K_uniques)
# set up an array to hold averages (no columns for run number or rng seed)
# needs to move to handle multiple variables
maxticks = xdata[:,1].max()
#print series_total
#print maxticks
#print xdata.shape[1]-2
averages = zeros((series_total, maxticks, xdata.shape[1]-2), float)
# three loop setup for varying A / N / K values
series_counter = 0
Aseries_name=""
Nseries_name=""
Kseries_name=""
series_keys=[]
for A_value in A_uniques:
if len(A_uniques)>1:
Aseries_name = "A=" + str(A_value) + ", "
dataA = compress(xdata[:,2]==A_value, xdata, axis=0)
for N_value in N_uniques:
#if len(N_uniques)>1:
Nseries_name = "N=" + str(N_value)
dataAN = compress(dataA[:,4]==N_value, dataA, axis = 0)
for K_value in K_uniques:
#if len(K_uniques)>1:
Kseries_name = ", " + "K=" + str(K_value)
dataANK = compress(dataAN[:,5]==K_value, dataAN, axis = 0)
series_keys.append(Aseries_name + Nseries_name + Kseries_name)
# when multiple variables are used, run values continue to count from the
# previous variable value (e.g. A=2 (runs 1-100) A=3 (runs 101-200))
# we need to number the runs in ascending order from 1.
firstrun=dataANK[:,0].min()
lastrun=dataANK[:,0].max()
totalruns = 1 + lastrun - firstrun
#print firstrun, lastrun, totalruns
# for each run, find the last actual tick data
last_tick_array = zeros((totalruns, dataANK.shape[1]), float)
#print last_tick_array.shape
for k in arange(totalruns): # for each run get the data for the last tick
this_run=compress(dataANK[:,0]==k+firstrun, dataANK, axis=0)
last_tick_array[k]=this_run[-1]
#print "Last tick array"
#print last_tick_array[-1]
print "Processing simulation " + str(series_counter+1) + "/" + str(series_total)
for i in arange(maxticks): # for each tick value up to the maximum
# array to hold one tick from each run for averaging
# will contain either actual or extrapolated data
selected_ticks = zeros((totalruns, dataANK.shape[1]), float)
#print selected_ticks
# get dataANK for this tick from all runs. May be empty.
this_tick = compress(dataANK[:,1]==i+1, dataANK, axis=0)
#print "this tick"
#print this_tick
for j in arange(totalruns): # for each run
if (i+1) < last_tick_array[j,1]:# do we have actual data?
#print "Using real data"
# if so, get it
selected_ticks[j] = compress(this_tick[:,0]==j+firstrun, this_tick, axis=0)
#print selected_ticks
else:
# if not, use the last tick we do have
#print "Using last tick"
selected_ticks[j] = last_tick_array[j]
#print "selected_ticks"
#print selected_ticks[0]
averages[series_counter][i][0]=i+1 # tick number
averages[series_counter][i][1]=selected_ticks[:,2].max() #A_size_of
averages[series_counter][i][2]=selected_ticks[:,4].min() #N_size_of
averages[series_counter][i][3]=selected_ticks[:,5].min() #K_size_of
for m in xrange(6,16):
#print m
averages[series_counter][i][m-2]=selected_ticks[:,m].mean()
# increment to fill next index
series_counter = series_counter + 1
# matplotlib plots
print "Plotting graphs..."
matplotlib.use('Agg')
for graph_num in (4,5,6,7,8,9,10,11,12,13):
ylabel(column_headers[graph_num+2])
for ser in range(series_total):
plot(averages[ser][:,0], averages[ser][:,graph_num], label=series_keys[ser])
legend(loc='center right').draw_frame(0)
show()
savefig('nk' + column_headers[graph_num+2] + '.png')
clf()
print "Writing CSV files"
f1 = open("nk_allticks.csv","wt")
csv1 = csv.writer(f1)
f2 = open("nk_finaltick.csv","wt")
csv2 = csv.writer(f2)
column_headers.remove('RngSeed')
# replace spaces for underscores in column headers for better file compatability
for i in range(len(column_headers)):
column_headers[i]=column_headers[i].replace(' ','_')
try:
csv1.writerow(column_headers[1:])
csv2.writerow(column_headers[1:])
for series in range(series_total):
csv1.writerows(averages[series])
csv2.writerow(averages[series][-1])
finally:
f1.close
f2.close
f3 = open("nk_allticks_crosstab.csv","wt")
csv3 = csv.writer(f3)
out_array = zeros((averages[0].shape[0], 1 +(series_total * 10)), float)
out_array[:,0] = averages[0][:,0]
headers=[1000]#[8 * series_total]
headers[0]="tick"
datacol = 1
for column_num in (3, 4,5,6,7,8,9,10,11,12):
for ser in range (series_total):
headers.append((column_headers[column_num+2] + " " + series_keys[ser]).replace(' ','_'))
out_array[:,datacol] = averages[ser][:,column_num+1]
#print averages[ser][0]
datacol = datacol + 1
#print headers
#print out_array.shape
try:
csv3.writerow(headers)
csv3.writerows(out_array)
finally:
f3.close
You need to tell it which file to open. When you run the script you'd need to do something like -
python myscript.py -f myfile
Otherwise, you have not given it a file. Use the interpreter, it's great for catching these things.
>>> from optparse import OptionParser
>>> parser = OptionParser()
>>> parser.add_option("-f", "--file", dest="in_filenames",
... help="read data from FILE - enclose comma-separated file list in quotes e.g. \"FILE1, FILE2\"", metavar="FILE")
<Option at 0x29f47c8: -f/--file>
>>> (options, args) = parser.parse_args()
>>> options # Notice that in_filenames is None here
<Values at 0x29f6648: {'in_filenames': None}>
>>> import sys
>>> sys.argv
['']
>>> sys.argv = ['','-f','myfile'] # Let's explicitly set the argument
>>> (options, args) = parser.parse_args()
>>> options # Now it works...
<Values at 0x29fd848: {'in_filenames': 'myfile'}>