How to extract efficiently time-series data from a netCDF file? - python

I want to extract time-series of data from a unique netCDF file.
I have to extract three-time series of daily temperatures across more than 500 cities from 2004 to 2016 (more precisely, I extract 3-time series across 3 points coordinates for each city).
The following program works, but it is very slow. (More than 8hours to obtain one location time series). I have already tried to divide coordinates into several CSV files and run the program separately for each of these files, but it is not very efficient.
Maybe I should chunck the netCDF file (5 Go) into smaller files to reduce the "reading" process. But I don't know how to do that.
from netCDF4 import Dataset
from datetime import datetime
from netCDF4 import Dataset
import pandas as pd
import os
import numpy as np
os.chdir('D:PATH/tmp/')
date_range = pd.date_range(start = "2004-01-01", end = "2016-12-31", freq ='D')
df = pd.DataFrame(0.0, columns = ['Temp1','Temp2','Temp3'], index = date_range)
cities = pd.read_csv(r'D:\PATH\cities_coordinates.csv', sep =',')
cities['NUTS_ID']= cities['NUTS_ID'].map(str)
for index, row in cities.iterrows():
location = row['NUTS_ID']
location_latitude1 = row['lat1']
location_longitude1 = row['lon1']
location_latitude2 = row['lat2']
location_longitude2 = row['lon2']
location_latitude3 = row['lat3']
location_longitude3 = row['lon3']
for day in date_range:
data = Dataset("D:/PATH/temperature.nc",'r')
# Storing the lat and lon data into variables of the netCDF file into variables
lat = data.variables['latitude'][:]
lon = data.variables['longitude'][:]
# Squared difference between the specified lat, lon and the lat, lon of the netCDF
sq_diff_lat1 = (lat - location_latitude1)**2
sq_diff_lon1 = (lon - location_longitude1)**2
sq_diff_lat2 = (lat - location_latitude2)**2
sq_diff_lon2 = (lon - location_longitude2)**2
sq_diff_lat3 = (lat - location_latitude3)**2
sq_diff_lon3 = (lon - location_longitude3)**2
# Identify the index of the min value for lat and lon
min_index_lat1 = sq_diff_lat1.argmin()
min_index_lon1 = sq_diff_lon1.argmin()
min_index_lat2 = sq_diff_lat2.argmin()
min_index_lon2 = sq_diff_lon2.argmin()
min_index_lat3 = sq_diff_lat3.argmin()
min_index_lon3 = sq_diff_lon3.argmin()
# Accessing the temperature data
tx = data.variables['tx']
start = '2004-01-01'
end = '2016-12-31'
d_range = pd.date_range(start = start, end = end, freq='D')
for t_index in np.arange(0, len(d_range)):
print('Recording the value for: '+str(d_range[t_index]))
df.loc[d_range[t_index]]['Temp1']=tx[t_index, min_index_lat1, min_index_lon1]
df.loc[d_range[t_index]]['Temp2']=tx[t_index, min_index_lat2, min_index_lon2]
df.loc[d_range[t_index]]['Temp3']=tx[t_index, min_index_lat3, min_index_lon3]
df.to_csv(location +'.csv')

Related

Python: Extract CSVs of time series data from high-resolution netCDF data using xarray

Trying to load several high-resolution netCDF files using OpenDAP and xarray into a single dataset; and then extract time series data by latitude-longitude coordinates to individual CSV files. I am using xarray and dask.
I can write a single CSV, but looping through the entire dataset will take >1200 hours.
Is it better to select individual points or all points and then convert all to CSV files?
import xarray as xr
Variables = collections.namedtuple('Variables', ['new_name', 'conversion', 'units'])
toMJm2 = 3.6e-3
toDegC = 273.15
tomm = 1.0
VARS_DICT = {"rsds": Variables('SRAD', toMJm2, "MJ m**-2"),
"tasmax": Variables("TMax", toDegC, "C"),
"tasmin": Variables("TMin", toDegC, "C"),
"pr": Variables("Rain", tomm, "mm")}
def open_rename_ds(file):
ds = xr.open_dataset(file, chunks={"lat": 5, "lon": 10, "time": -1})
ds = ds.squeeze('crs').drop('crs')
ds = ds.assign_coords(lon=(((ds.lon + 180) % 360) - 180))
var_name = os.path.basename(file).split("_")[2]
v = VARS_DICT[var_name]
for k in ds.data_vars:
if v.conversion == 273.15:
ds[v.new_name] = ds[k] - v.conversion
else:
ds[v.new_name] = ds[k] * v.conversion
ds[v.new_name].attrs["units"] = v.units
_ds = ds.drop([k])
return _ds.sel(time=slice("2020-01-01","2099-12-30"))
fileslist = ['http://thredds.northwestknowledge.net:8080/thredds/dodsC/agg_macav2metdata_tasmax_bcc-csm1-1_r1i1p1_rcp85_2006_2099_CONUS_daily.nc',
'http://thredds.northwestknowledge.net:8080/thredds/dodsC/agg_macav2metdata_tasmin_bcc-csm1-1_r1i1p1_rcp85_2006_2099_CONUS_daily.nc', 'http://thredds.northwestknowledge.net:8080/thredds/dodsC/agg_macav2metdata_pr_bcc-csm1-1_r1i1p1_rcp85_2006_2099_CONUS_daily.nc', 'http://thredds.northwestknowledge.net:8080/thredds/dodsC/agg_macav2metdata_rsds_bcc-csm1-1_r1i1p1_rcp85_2006_2099_CONUS_daily.nc']
dss = [open_rename_ds(file) for file in fileslist]
# Create a single dataset for the four opened datasets using `.update`
ds = dss[0]
for d in dss[1:]:
ds.update(d)
# drop the attributes for brevity
ds.attrs = {}
Here is the size and information on the dataset:
print("ds size in GB {:0.2f}\n".format(ds.nbytes / 1e9))
print(ds.info())
ds size in GB 379.06
xarray.Dataset {
dimensions:
lat = 585 ;
lon = 1386 ;
time = 29219 ;
variables:
float64 lat(lat) ;
lat:long_name = latitude ;
lat:standard_name = latitude ;
lat:units = degrees_north ;
lat:axis = Y ;
lat:description = Latitude of the center of the grid cell ;
float64 lon(lon) ;
datetime64[ns] time(time) ;
time:description = days since 1900-01-01 ;
float32 TMax(time, lat, lon) ;
TMax:units = C ;
float32 TMin(time, lat, lon) ;
TMin:units = C ;
float32 Rain(time, lat, lon) ;
Rain:units = mm ;
float32 SRAD(time, lat, lon) ;
SRAD:units = MJ m**-2 ;
// global attributes:
}
Need to drop all the nan lat/lons, because this should reduce the file sizes.
I am trying something like:
dd = ds.isel(lat=list_of_valid_lats, lon=list_of_valid_lons).to_dataframe()
dd.to_csv("testing_filename.csv.gz", index=False, compression="gzip")
This takes forever and often crashes my computer. I have also tried using to_dask_dataframe()
I have also tried:
df = ds.isel(lat=0, lon=1049).to_dataframe().reset_index()
df.to_csv("testing_filename_csv.gz", index=False, compression="gzip")
This works but is really slow when there are over ~400,000 gridpoints.
I also need to repeat this for ~40 similar files.
Any thoughts or ideas to improve the performance would be appreciated?

Convert time series data from csv to netCDF python

Main problem during this process is the code below:
precip[:] = orig
Produces an error of:
ValueError: cannot reshape array of size 5732784 into shape (39811,144,144)
I have two CSV files, one of the CSV file contains all the actual data of a variable (precipitation), with each column as a station, and their corresponding coordinates is in the second separate CSV file.
My sample data is in google drive here.
If you want to have a look at the data itself, but my 1st CSV file has the shape (39811, 144) and 2nd CSV file has the shape (171, 10) but note; I'm only using the sliced dataframe as (144, 2).
This is the code:
stations = pd.read_csv(stn_precip)
stncoords = stations.iloc[:,[0,1]][:144]
orig = pd.read_csv(orig_precip, skiprows = 1, names = stations['Code'][:144])
lons = stncoords['X']
lats = stncoords['Y']
ncout = netCDF4.Dataset('Precip_1910-2018_homomod.nc', 'w')
ncout.createDimension('longitude',lons.shape[0])
ncout.createDimension('latitude',lats.shape[0])
ncout.createDimension('precip',orig.shape[1])
ncout.createDimension('time',orig.shape[0])
lons_out = lons.tolist()
lats_out = lats.tolist()
time_out = orig.index.tolist()
lats = ncout.createVariable('latitude',np.dtype('float32').char,('latitude',))
lons = ncout.createVariable('longitude',np.dtype('float32').char,('longitude',))
time = ncout.createVariable('time',np.dtype('float32').char,('time',))
precip = ncout.createVariable('precip',np.dtype('float32').char,('time', 'longitude','latitude'))
lats[:] = lats_out
lons[:] = lons_out
time[:] = time_out
precip[:] = orig
ncout.close()
I'm mostly basing my code to this post: convert-csv-to-netcdf
but does not include the variable 'TIME' as a 3rd dimension, so that's where I'm failing.
I think I should be expecting the precipitation variable to have a shape in the form (39811, 144, 144), but the error suggests otherwise.
Not exactly sure how to deal with this, any inputs are appreciated.
As you have data from different stations, I would suggest using dimension station for your netCDF file and not separate lon and lat. Of course, you can save the longitude and latitude of each station to separate variable.
Here is one possible solution, using your code as an example:
#!/usr/bin/env ipython
import pandas as pd
import numpy as np
import netCDF4
stn_precip='Precip_1910-2018_stations.csv'
orig_precip='Precip_1910-2018_origvals.csv'
stations = pd.read_csv(stn_precip)
stncoords = stations.iloc[:,[0,1]][:144]
orig = pd.read_csv(orig_precip, skiprows = 1, names = stations['Code'][:144])
lons = stncoords['X']
lats = stncoords['Y']
nstations = np.size(lons)
ncout = netCDF4.Dataset('Precip_1910-2018_homomod.nc', 'w')
ncout.createDimension('station',nstations)
ncout.createDimension('time',orig.shape[0])
lons_out = lons.tolist()
lats_out = lats.tolist()
time_out = orig.index.tolist()
lats = ncout.createVariable('latitude',np.dtype('float32').char,('station',))
lons = ncout.createVariable('longitude',np.dtype('float32').char,('station',))
time = ncout.createVariable('time',np.dtype('float32').char,('time',))
precip = ncout.createVariable('precip',np.dtype('float32').char,('time', 'station'))
lats[:] = lats_out
lons[:] = lons_out
time[:] = time_out
precip[:] = orig
ncout.close()
So the information about output file (ncdump -h Precip_1910-2018_homomod.nc) is like this:

How to convert a netCDF4 file to a geoTiff

I'm currently trying to get Tropomi data in geoTiff format. I downloaded some data in netCDF4 format. This way I obtain three numpy arrays. one with latitude coordinates, one with longitude coordinates and one with carbon-mono-oxide values.
So I have a matrix with values for my raster and of each value I know the longitude and latitude of that respective value.
With this information how can I construct a georeferenced raster?
I read in the data as follows
import netCDF4
from netCDF4 import Dataset
import numpy as np
file = '/home/daniel/Downloads/S5P_NRTI_L2__CO_____20190430T171319_20190430T171819_08006_01_010301_20190430T175151.nc'
rootgrp = Dataset(file, "r",format="NETCDF4")
lat = rootgrp.groups['PRODUCT']['latitude'][:]
lon = rootgrp.groups['PRODUCT']['longitude'][:]
carbon = rootgrp.groups['PRODUCT']['carbonmonoxide_total_column'][:]
obtaining 3 matrices with shape (1,290,215)
Now I would like to convert this to a Mercator projected geoTIFF, but I do not know how to go about it.
the gdal_translate option seems to work. But here is an alternative explicit way I did it.
#importing packages
import numpy as np
from scipy import interpolate
from netCDF4 import Dataset
from shapely.geometry import Point
import geopandas as gpd
from geopy.distance import geodesic
import rasterio
import matplotlib.pyplot as plt
#load data
file = '/home/daniel/Ellipsis/db/downloaded/rawtropomi/S5P_NRTI_L2__CO_____20190430T171319_20190430T171819_08006_01_010301_20190430T175151.nc'
rootgrp = Dataset(file, "r",format="NETCDF4")
lat = rootgrp.groups['PRODUCT']['latitude'][:]
lon = rootgrp.groups['PRODUCT']['longitude'][:]
carbon = rootgrp.groups['PRODUCT']['carbonmonoxide_total_column'][:]
carbon = carbon.filled(0)
lat = lat.filled(-1000)
lon = lon.filled(-1000)
carbon = carbon.flatten()
lat = lat.flatten()
lon = lon.flatten()
#calculate the real distance between corners and get the widht and height in pixels assuming you want a pixel resolution of at least 7 by 7 kilometers
w = max(geodesic((min(lat),max(lon)), (min(lat),min(lon))).meters/7000 , geodesic((max(lat),max(lon)), (max(lat),min(lon))).meters/14000)
h = geodesic((min(lat),max(lon)), (max(lat),max(lon))).meters/14000
# create a geopandas with as its rows the latitude, longitude an the measrument values. transfrom it to the webmercator projection (or projection of your choosing)
points = [Point(xy) for xy in zip(lon, lat)]
crs = {'init': 'epsg:4326'}
data = gpd.GeoDataFrame({'value':carbon}, crs=crs, geometry=points)
data = data.to_crs({'init': 'epsg:3395'})
data['lon'] = data.bounds['maxx'].values
data['lat'] = data.bounds['maxy'].values
#make grid of coordinates. You nee de calculate the coordinate of each pixel in the desired raster
minlon = min(data['lon'])
maxlon = max(data['lon'])
minlat = min(data['lat'])
maxlat = max(data['lat'])
lon_list = np.arange(minlon, maxlon, (maxlon-minlon)/w )
lat_list = np.arange(minlat, maxlat, (maxlat-minlat)/h)
lon_2d, lat_2d = np.meshgrid(lon_list, lat_list)
#use the values in the geopandas dataframe to interpolate values int the coordinate raster
r = interpolate.griddata(points = (data['lon'].values,data['lat'].values), values = data['value'].values, xi = (lon_2d, lat_2d))
r = np.flip(r, axis = 0)
#check result
plt.imshow(r)
#save raster
transform = rasterio.transform.from_bounds(south = minlat, east = maxlon, north = maxlat, west = minlon, width = r.shape[1], height = r.shape[2] )
file_out = 'test.tiff'
new_dataset = rasterio.open(file_out , 'w', driver='Gtiff', compress='lzw',
height = r.shape[1], width = r.shape[2],
count= r.shape[0], dtype=str( r.dtype),
crs= data.crs,
transform= transform)
new_dataset.write(r)
new_dataset.close()
I would suggest looking at this answer here using gdal_translate:
Convert NetCDF (.nc) to GEOTIFF
gdal_translate -of GTiff file.nc test.tiff

Adding a 45 degree line to a time series stock data plot

I guess this is supposed to be simple.. But I cant seem to make it work.
I have some stock data
import pandas as pd
import numpy as np
df = pd.DataFrame(index=pd.date_range(start = "06/01/2018", end = "08/01/2018"),
data = np.random.rand(62)*100)
I am doing some analysis on it, this results of my drawing some lines on the graph.
And I want to plot a 45 line somewhere on the graph as a reference for lines I drew on the graph.
What I have tried is
x = df.tail(len(df)/20).index
x = x.reset_index()
x_first_val = df.loc[x.loc[0].date].adj_close
In order to get some point and then use slope = 1 and calculate y values.. but this sounds all wrong.
Any ideas?
Here is a possibility:
import pandas as pd
import numpy as np
df = pd.DataFrame(index=pd.date_range(start = "06/01/2018", end = "08/01/2018"),
data=np.random.rand(62)*100,
columns=['data'])
# Get values for the time:
index_range = df.index[('2018-06-18' < df.index) & (df.index < '2018-07-21')]
# get the timestamps in nanoseconds (since epoch)
timestamps_ns = index_range.astype(np.int64)
# convert it to a relative number of days (for example, could be seconds)
time_day = (timestamps_ns - timestamps_ns[0]) / 1e9 / 60 / 60 / 24
# Define y-data for a line:
slope = 3 # unit: "something" per day
something = time_day * slope
trendline = pd.Series(something, index=index_range)
# Graph:
df.plot(label='data', alpha=0.8)
trendline.plot(label='some trend')
plt.legend(); plt.ylabel('something');
which gives:
edit - first answer, using dayofyear instead of the timestamps:
import pandas as pd
import numpy as np
df = pd.DataFrame(index=pd.date_range(start = "06/01/2018", end = "08/01/2018"),
data=np.random.rand(62)*100,
columns=['data'])
# Define data for a line:
slope = 3 # unit: "something" per day
index_range = df.index[('2018-06-18' < df.index) & (df.index < '2018-07-21')]
dayofyear = index_range.dayofyear # it will not work around the new year...
dayofyear = dayofyear - dayofyear[0]
something = dayofyear * slope
trendline = pd.Series(something, index=index_range)
# Graph:
df.plot(label='data', alpha=0.8)
trendline.plot(label='some trend')
plt.legend(); plt.ylabel('something');

Python: passing coordinates from list to function

I am using some code from a workshop to extract data from netCDF files by the coordinates closest to my specified coordinates. When using just one set of coordinates I am able to extract the values I need without trouble as below:
import numpy as np
import netCDF4
from math import pi
from numpy import cos, sin
def tunnel_fast(latvar,lonvar,lat0,lon0):
'''
Find closest point in a set of (lat,lon) points to specified point
latvar - 2D latitude variable from an open netCDF dataset
lonvar - 2D longitude variable from an open netCDF dataset
lat0,lon0 - query point
Returns iy,ix such that the square of the tunnel distance
between (latval[it,ix],lonval[iy,ix]) and (lat0,lon0)
is minimum.
'''
rad_factor = pi/180.0 # for trignometry, need angles in radians
# Read latitude and longitude from file into numpy arrays
latvals = latvar[:] * rad_factor
lonvals = lonvar[:] * rad_factor
ny,nx = latvals.shape
lat0_rad = lat0 * rad_factor
lon0_rad = lon0 * rad_factor
# Compute numpy arrays for all values, no loops
clat,clon = cos(latvals),cos(lonvals)
slat,slon = sin(latvals),sin(lonvals)
delX = cos(lat0_rad)*cos(lon0_rad) - clat*clon
delY = cos(lat0_rad)*sin(lon0_rad) - clat*slon
delZ = sin(lat0_rad) - slat;
dist_sq = delX**2 + delY**2 + delZ**2
minindex_1d = dist_sq.argmin() # 1D index of minimum element
iy_min,ix_min = np.unravel_index(minindex_1d, latvals.shape)
return iy_min,ix_min
ncfile = netCDF4.Dataset('E:\wind_level2_1.nc', 'r')
latvar = ncfile.variables['latitude']
lonvar = ncfile.variables['longitude']
#_________GG turbine_________GAD10 Latitude 51.735516, GAD10 Longitude 1.942656
iy,ix = tunnel_fast(latvar, lonvar, 51.735516, 1.942656)
print('Closest lat lon:', latvar[iy,ix], lonvar[iy,ix])
refLAT=latvar[iy,ix]
refLON = lonvar[iy,ix]
#try to find the data for this location
SARwind = ncfile.variables['sar_wind'][:,:]
ModelWind = ncfile.variables['model_speed'][:,:]
print 'iy,ix' #appears to be the index of the value of Lat,lon
print SARwind[iy,ix]
ncfile.close()
Now I am trying to loop through a text file containing coordinates coord_list to extract sets of coordinates, find the data then move to the next set of coordinates in the list. This code works on it's own as below:
import csv
from decimal import Decimal
with open('Turbine_locs_no_header.csv','rb') as f:
reader = csv.reader(f)
#coord_list = list(reader)
coord_list = [reader]
end_row = len(coord_list)
lon_ind=1
lat_ind=2
for row in range(0, end_row-1):#end_row - 1 due to the 0 index
turbine_lat = coord_list[row][lat_ind]
turbine_lon = coord_list[row][lon_ind]
turbine_lat = [Decimal(turbine_lat)]
print 'lat',turbine_lat, 'lon',turbine_lon, row
However, I want to pass coordinates from the text file to this part of the original code iy,ix = tunnel_fast(latvar, lonvar, 51.94341, 1.922094888), replacing the numbers with variables iy, ix = tunnel_fast(latvar, lonvar, turbine_lat, turbine_lon). I try to combine the two codes by creating a function get_coordinates, I get the following errors
File "C:/Users/mm/test_nc_bycoords_GG_turbines_AGW.py", line 65, in <module>
get_coordinates(coord_list, latvar, lonvar)
File "C:/Users/mm/test_nc_bycoords_GG_turbines_AGW.py", line 51, in get_coordinates
iy, ix = tunnel_fast(latvar, lonvar, turbine_lat, turbine_lon)
File "C:/Users/mm/test_nc_bycoords_GG_turbines_AGW.py", line 27, in tunnel_fast
lat0_rad = lat0 * rad_factor
TypeError: can't multiply sequence by non-int of type 'float'
I thought this is because the turbine_lat and turbine_lon are list items so cannot be used, but this doesn't seem to be connected to the errors. I know this code needs more work anyway, but if anyone could help me spot where I am going wrong that would be very helpful. My attempt to combine the two codes is below.
import numpy as np
import netCDF4
from math import pi
from numpy import cos, sin
import csv
# edited from https://github.com/Unidata/unidata-python-workshop/blob/a56daa50d7b343c7debe93968683613642d6b9f7/notebooks/netcdf-by-coordinates.ipynb
def tunnel_fast(latvar,lonvar,lat0,lon0):
'''
Find closest point in a set of (lat,lon) points to specified point
latvar - 2D latitude variable from an open netCDF dataset
lonvar - 2D longitude variable from an open netCDF dataset
lat0,lon0 - query point
Returns iy,ix such that the square of the tunnel distance
between (latval[it,ix],lonval[iy,ix]) and (lat0,lon0)
is minimum.
'''
rad_factor = pi/180.0 # for trignometry, need angles in radians
# Read latitude and longitude from file into numpy arrays
latvals = latvar[:] * rad_factor
lonvals = lonvar[:] * rad_factor
ny,nx = latvals.shape
lat0_rad = lat0 * rad_factor
lon0_rad = lon0 * rad_factor
# Compute numpy arrays for all values, no loops
clat,clon = cos(latvals),cos(lonvals)
slat,slon = sin(latvals),sin(lonvals)
delX = cos(lat0_rad)*cos(lon0_rad) - clat*clon
delY = cos(lat0_rad)*sin(lon0_rad) - clat*slon
delZ = sin(lat0_rad) - slat;
dist_sq = delX**2 + delY**2 + delZ**2
minindex_1d = dist_sq.argmin() # 1D index of minimum element
iy_min,ix_min = np.unravel_index(minindex_1d, latvals.shape)
return iy_min,ix_min
#________________my edits___________________________________________________
def get_coordinates(coord_list, latvar, lonvar):
"this takes coordinates from a .csv and assigns them to variables"
end_row = len(coord_list)
lon_ind=1
lat_ind=2
for row in range(0, end_row-1):#end_row - 1 due to the 0 index
turbine_lat = coord_list[row][lat_ind]
turbine_lon = coord_list[row][lon_ind]
iy, ix = tunnel_fast(latvar, lonvar, turbine_lat, turbine_lon)
print('Closest lat lon:', latvar[iy, ix], lonvar[iy, ix])
#________________________________________________________________________________________________________________________
ncfile = netCDF4.Dataset('NOGAPS_wind_level2_1.nc', 'r')
latvar = ncfile.variables['latitude']
lonvar = ncfile.variables['longitude']
#____added in to pass to get coordinates function
with open('Turbine_locs_no_header.csv','rb') as f:
reader = csv.reader(f)
coord_list = list(reader)
#_________take latitude from coordinateas function
get_coordinates(coord_list, latvar, lonvar)
#iy,ix = tunnel_fast(latvar, lonvar, turbine_lat, turbine_lon)#get these from the 'assign_coordinates_fromlist.py
#print('Closest lat lon:', latvar[iy,ix], lonvar[iy,ix])
SARwind = ncfile.variables['sar_wind'][:,:]
ModelWind = ncfile.variables['model_speed'][:,:]
print 'iy,ix' #appears to be the index of the value of Lat,lon
print SARwind[iy,ix]
ncfile.close()
When I try to convert
You can unpack an argument list using *args (see the docs). In your case you could do tunnel_fast(latvar, lonvar, *coord_list[row]). You need to make sure that the order of arguments in coord_list[row] is correct and if coord_list[row] contains more than the two values then you need to slice it appropriately.
Thanks to help from a_guest
It was a simple problem of lat0 and lon0 being passed as
<type 'str'> to tunnel_fast when it requires <type 'float'>. This appears to come from loading the coord_list as a list.
with open('Turbine_locs_no_header.csv','rb') as f:
reader = csv.reader(f)
coord_list = list(reader)
The workaround I used was to convert lat0 and lon0 to floats at the beginning of tunnel_fast
lat0 = float(lat0)
lon0 = float(lon0)
I am sure there is a more elegant way to do this, but it works.

Categories

Resources