GPS downsampling - python

My goal is to downsample my indata for every 100m and get the first and last line
My problem is that I get a lot fewer lines than i should when I downsample and I don't know how to get the last line.
Hope am clear enough for someone to understand
To make this
Line 20130904_0848.nmea
$GPGGA,111936.00,5849.37538,N,01739.88263,,E,2,09,00.9,00004.43,M,0024.87,M,007,0734*42
$GPGGA,111936.00,5849.37548,N,01739.88240,,E,2,09,00.9,00004.43,M,0024.87,M,007,0734*44
$GPGGA,111936.00,5849.37556,N,01739.88216,,E,2,09,00.9,00004.43,M,0024.87,M,007,0734*48
$GPGGA,111936.00,5849.37569,N,01739.88193,,E,2,09,00.9,00004.43,M,0024.87,M,007,0734*4a
$GPGGA,111936.00,5849.37581,N,01739.88171,,E,2,09,00.9,00004.43,M,0024.87,M,007,0734*40
$GPGGA,111936.00,5849.69118,N,01739.89674,,E,2,09,00.9,00004.43,M,0024.87,M,007,0734*4c
EOL
Line 20130904_0926.nmea
$GPGGA,111936.00,5849.67569,N,01739.98426,,E,2,09,00.9,00004.43,M,0024.87,M,007,0734*45
$GPGGA,111936.00,5849.67593,N,01739.98453,,E,2,09,00.9,00004.43,M,0024.87,M,007,0734*42
$GPGGA,111936.00,5849.67616,N,01739.98479,,E,2,09,00.9,00004.43,M,0024.87,M,007,0734*44
....
Look like this
Line 20081002-1119.nmea
58.853952 13.309779 0.00
58.853907 13.310688 101.15
58.853858 13.311593 100.72
58.853811 13.312498 100.62
58.853764 13.313402 100.59
58.853752 13.313660 28.70
EOL
Line 20081002-1119.nmea
58.853952 13.309779 0.00
58.853907 13.310688 101.15
58.853858 13.311593 100.72
58.853811 13.312498 100.62
58.853764 13.313402 100.59
...
This is my code so far
from math import sin, cos, sqrt, atan2, radians
coord=[]
coord1=None
def distance(coord1,coord2): #Haversin
lat1,lon1=coord1
lat2,lon2=coord2
dlat = radians(lat2-lat1)
dlon = radians(lon2-lon1)
a = sin(dlat/2) * sin(dlat/2)
+ cos(radians(lat1))*cos(radians(lat2))*sin(dlon/2)*sin(dlon/2)
c = 2 *atan2(sqrt(a),sqrt(1-a))
s = (6367*c)*1000 #meter
return s
# with open as data will close itself after reading each line. so you don't need to close it yourself
with open('asko_nav_2013.nmea', 'r') as indata: #making a indata and outdata, r stands for reading(readcapabilities
with open('asko_nav_out.txt', 'w') as outdata: #w stands for write write to a new file(open for writing-you can change things)
while True:
line = indata.readline()
if not line:
break
if line.startswith('EOL'): #if the line starts with EOL(end of line) it writes it in the output
outdata.writelines("EOL")
coord1=None
elif line.startswith('Line'):
LineID=line
outdata.writelines('\n%s' %LineID)
elif line.startswith('$GPGGA'): #when the fist line starts with $GPGGA it splits the columns
data=line.split(",") #the for loop reads the file line by line
# Importing only coordinates from asko input file (Row 2 and 4)
# Converting the coordinates from DDMM.MMMM to DD.DDDDDD
LAT=(data[2])
LAT_D=LAT[0:2]
LATID=float(LAT_D)
LAT_M=LAT[2:]
LATM=float(LAT_M)
LATIM = float(LATM) / 60.0
latitude=(LATID + LATIM)
LON=(data[4])
LON_D=LON[1:3]
LONGD=float(LON_D)
LON_M=LON[3:]
LONM=float(LON_M)
LONGM = float(LONM) / 60.0
longitude=(LONGD + LONGM)
if coord1 is None:
# The first time through the loop "coord1" is None
outdata.writelines('%0.6f\t%0.6f\t%s \n'%(latitude,longitude,0))
coord1=(latitude,longitude)
else:
coord2=(latitude,longitude)
dist=distance(coord1,coord2)
if dist <100:
continue
outdata.writelines('%0.6f\t%0.6f\t%f\n' % (latitude,longitude,dist))
coord1=coord2

Your code can do with a little bit of reorganising to make it clearer. You need to add an additional write whenever EOL is seen for the case where the distance is under 100m:
from math import sin, cos, sqrt, atan2, radians
def distance(coord1, coord2): #Haversin
lat1,lon1=coord1
lat2,lon2=coord2
dlat = radians(lat2-lat1)
dlon = radians(lon2-lon1)
a = sin(dlat/2) * sin(dlat/2)
+ cos(radians(lat1))*cos(radians(lat2))*sin(dlon/2)*sin(dlon/2)
c = 2 *atan2(sqrt(a),sqrt(1-a))
s = (6367*c)*1000 #meter
return s
def get_coordinates(data):
# Importing only coordinates from asko input file (Row 2 and 4)
# Converting the coordinates from DDMM.MMMM to DD.DDDDDD
LAT = (data[2])
LAT_D = LAT[0:2]
LATID = float(LAT_D)
LAT_M = LAT[2:]
LATM = float(LAT_M)
LATIM = float(LATM) / 60.0
latitude = (LATID + LATIM)
LON = (data[4])
LON_D = LON[1:3]
LONGD = float(LON_D)
LON_M = LON[3:]
LONM = float(LON_M)
LONGM = float(LONM) / 60.0
longitude = (LONGD + LONGM)
return (latitude, longitude)
coord1 = None
# with open as data will close itself after reading each line. so you don't need to close it yourself
with open('asko_nav_2013.nmea', 'r') as indata, open('asko_nav_out.txt', 'w') as outdata:
for line in indata:
if line.startswith('EOL'): #if the line starts with EOL(end of line) it writes it in the output
if dist < 100:
outdata.write('%0.6f\t%0.6f\t%f\n' % (latitude, longitude, dist))
outdata.write("\nEOL\n")
coord1 = None # Reset the first coordinate
elif line.startswith('Line'):
outdata.write('\n%s' % line)
elif line.startswith('$GPGGA'): #when the fist line starts with $GPGGA it splits the columns
data=line.split(",") #the for loop reads the file line by line
latitude, longitude = get_coordinates(data)
if coord1:
coord2 = (latitude, longitude)
dist = distance(coord1, coord2)
if dist >= 100:
outdata.write('%0.6f\t%0.6f\t%f\n' % (latitude, longitude, dist))
coord1 = coord2
else:
# The first time through the loop "coord1" is None
outdata.write('%0.6f\t%0.6f\t0.0 \n' % (latitude, longitude))
coord1 = (latitude, longitude)
For your given input, this produces the following output file:
Line 20130904_0848.nmea
58.822923 17.664710 0.0
58.828186 17.664946 584.888514
EOL
Line 20130904_0926.nmea
58.827928 17.666404 0.0
58.827936 17.666413 0.870480
EOL
You also need to reset coord1 whenever EOL is detected to make sure 0 is displayed again for the first entry.
It is a bit difficult to see if this completely solves matters as your sample data does not seem to tally with your expected output.

Addressing the second issue concerning fewer result lines than expected: You are providing too little information about the nature of your problem and the input data you are processing. Sampling your input "for every 100m" could mean something different if your input data is sampled from a trajectory travelled by a moving object, especially if the motion is not purely linear.
Imagine that your input describes coordinates obtained by measuring GPS coordinates in regular intervals while moving along a circle with radius smaller than, say, 15m. Then no matter how many data points your input provides, the output for your proposed solution will never be longer than two lines, because no two points along that curve can have an absolute distance greater than 100m. This might explain why you are seeing fewer lines in the output than expected.
If you mean to sample the input at every 100m travelled, you would have to sum over all distances between input samples since the last point sampled for output and use that instead of dist. Modifying Martin's reorganised code, it could be done like this (some lines omitted for brevity):
coord1 = None
coord_last = None # holds coordinate of last input sample
dist = 0.0 # total distance travelled since coord1
# [...]
with open('asko_nav_2013.nmea', 'r') as indata, open('asko_nav_out.txt', 'w') as outdata:
for line in indata:
# [...]
if coord1:
coord2 = (latitude, longitude)
delta = distance(coord_last, coord2)
dist += delta
coord_last = coord2
if dist >= 100:
outdata.write('%0.6f\t%0.6f\t%f\n' % (latitude, longitude, dist))
coord1 = coord2
dist = 0.0
else:
# The first time through the loop "coord1" is None
outdata.write('%0.6f\t%0.6f\t0.0 \n' % (latitude, longitude))
coord1 = (latitude, longitude)
coord_last = coord1
dist = 0.0

Related

How to extract time domain information when recreating the signal by taking the inverse FFT

I'm trying to recreate the the original signal from the FFT of a signal sample. When taking Inverse FFT, I'm only getting an amplitude information (only one column). How can I get the corresponding time coordinates?
This is a screen shot of my original signal, recorded from 0 to 10s with step 0.001s. When I take the IFFT, I'm getting the same number of data points as my signal, but can't find the corresponding time information.
How can I get the correct time information?
I'm including the Python code code I used and a plot of the 2 signals.
#generating signal here
import numpy as np
k = float ( 3.1416*2)
f1 = 100
f2 = 150
f3 = 250
ds = max(f1,f2,f3)
ds = float(4*ds)
dt = 1.000/ds
lf = min (f1,f2,f3)
lT = 1.00/lf
N = 10 # cycles
totaltime = N*lT
data = []
tt = []
mf = 1/dt
print "TotalTime =", totaltime
for t in np.arange(0.0, totaltime,dt/100 ) :
#t = tk/mf
print t
wave1 = np.sin(k*f1*t)
wave2 = np.sin(k*f2*t)
wave3 = np.sin(k*f3*t)
summ = wave1 + wave2 + wave3
print t," ", summ
tt.append(t)
data.append(summ)
print tt
print data
np.savetxt("data.txt",np.c_[tt,data])
#######################
#taking the FFT here
fourier = []
tt =[]
yy=[]
logname = str("data.txt")
with open (logname,"rb") as wdata:
for line in wdata :
if not line.startswith("#") :
sl = line.split()
c11 = float(sl[0])
#c11 = c1*10**(-12)
c2 = float(sl[1])
tt.append(c11)
yy.append(c2)
n = len(yy)
n1 = len(tt)
print "n=",n,"(",n1,")"
#to calculate the time step , find the difference between 2 time-values
t0 = float(tt[0])
print "t0=",t0
t1 = float(tt[1])
print "t1=",t1
ts = t1 - t0
print "ts=", ts
yf = numpy.fft.fft(yy)
yf_abso = numpy.abs(yf)
freq = numpy.fft.fftfreq(n,d=ts)
numpy.savetxt('fft-data.txt',numpy.c_[freq,yf_abso])
######################
# taking the inverese FFT
filename = str("fft-data.txt")
FFTdata =[]
FREQdata = []
with open (filename,'r') as fftfile :
for line in fftfile :
if not line.startswith("#") :
split_line = line.split()
fpoint = float(split_line[1])
freqz = float(split_line[0])
FFTdata.append(fpoint)
FREQdata.append(freqz)
ireverse = np.fft.ifft(FFTdata)
reverse = np.abs(ireverse)
print type(reverse)
np.savetxt ("ireverse.txt", ireverse)
np.savetxt("reverse.txt", reverse)
The sample locations for the output of the IFFT are the same as those for the input to the FFT. You are doing that part right.
The output of the IFFT looks shifted, but it is not. What happens is that you threw away the phase information of the frequency spectrum when you saved it. You do
yf_abso = numpy.abs(yf)
and then save yf_abso. By taking the absolute value, you have thrown away important information. There is a reason that the FFT produces complex values. Throwing away half that information means you cannot reconstruct the original signal any more.
If you save the complex values, and use those in the last part of your code to compute the IFFT, then the real component of the output of the IFFT will match your input signal. The imaginary component there should be close to zero, different just due to numerical precision issues in floating-point computations.

Python fast Fourier transform for very noisy data

I have a file with velocity magnitude data and vorticity magnitude data from a fluid simulation.
I want to find out what is the frequency for these two data sets.
my code:
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import re
import math
import matplotlib.pyplot as plt
import numpy as np
probeU1 = []
probeV1 = []
# this creates an array containig all the timesteps, cutting of the first 180, because the system has to stabelize.
number = [ round(x * 0.1, 1) for x in range(180, 301)]
# this function loops over the different time directories, and reads the velocity file.
for i in range(len(number)):
filenamepath = "/Refinement/Vorticity4/probes/" +str(number[i]) + "/U"
data= open(filenamepath,"r")
temparray = []
#removes all the formatting around the data
for line in data:
if line.startswith('#'):
continue
else:
line = re.sub('[()]', "", line)
values = line.split()
#print values[1], values[2]
xco = values[1::3]
yco = values[2::3]
#here it extracts all the velocity data from all the different probes
for i in range(len(xco)):
floatx = float(xco[i])
floaty = float(yco[i])
temp1 = math.pow(floatx,2)
temp2 = math.pow(floaty,2)
#print temp2, temp1
temp3 = temp1+temp2
temp4 = math.sqrt(temp3)
#takes the magnitude of the velocity
#print temp4
temparray.append(temp4)
probeU1.append(temparray)
#
#print probeU1[0]
#print len(probeU1[0])
#
# this function loops over the different time directories, and reads the vorticity file.
for i in range(len(number)):
filenamepath = "/Refinement/Vorticity4/probes/" +str(number[i]) + "/vorticity"
data= open(filenamepath,"r")
# print data.read()
temparray1 = []
for line in data:
if line.startswith('#'):
continue
else:
line = re.sub('[()]', "", line)
values = line.split()
zco = values[3::3]
#because the 2 dimensionallity the z-component of the vorticity is already the magnitude
for i in range(len(zco)):
abso = float(zco[i])
add = np.abs(abso)
temparray1.append(add)
probeV1.append(temparray1)
#Old code block to display the data and check that it made a wave pattern(which it did)
##Printing all probe data from 180-300 in one graph(unintelligible)
#for i in range(len(probeU1[1])):
# B=[]
# for l in probeU1:
# B.append(l[i])
## print 'B=', B
## print i
# plt.plot(number,B)
#
#
#plt.ylabel('magnitude of velocity')
#plt.show()
#
##Printing all probe data from 180-300 in one graph(unintelligible)
#for i in range(len(probeV1[1])):
# R=[]
# for l in probeV1:
# R.append(l[i])
## print 'R=', R
## print i
# plt.plot(number,R)
#
#
#plt.ylabel('magnitude of vorticity')
#plt.show()
#Here is where the magic happens, (i hope)
ans=[]
for i in range(len(probeU1[1])):
b=[]
#probeU1 is a nested list, because there are 117 different probes, wich all have the data from timestep 180-301
for l in probeU1:
b.append(l[i])
#the freqeuncy was not oscillating around 0, so moved it there by substracting the mean
B=b-np.mean(b)
#here the fft happens
u = np.fft.fft(B)
#This should calculate the frequencies?
freq = np.fft.fftfreq(len(B), d= (number[1] - number[0]))
# If im not mistakes this finds the peak frequency for 1 probe and passes it another list
val = np.argmax(np.abs(u))
ans.append(np.abs(freq[val]))
plt.plot(freq, np.abs(u))
#print np.mean(ans)
plt.xlabel('frequency?')
plt.savefig('velocitiy frequency')
plt.show()
# just duplicate to the one above it
ans1=[]
for i in range(len(probeV1[1])):
c=[]
for l in probeU1:
c.append(l[i])
C=c-np.mean(c)
y = np.fft.fft(C)
freq1 = np.fft.fftfreq(len(C), d= (number[1] - number[0]))
val = np.argmax(np.abs(y))
ans1.append(np.abs(freq1[val]))
plt.plot(freq1, np.abs(y))
#print np.mean(ans1)
plt.ylabel('frequency?')
plt.savefig('vorticity frequency')
plt.show()
data.close()
My data contains 117 probes each having their own 121 point of velocity magnitude data.
My aim is to find the dominate frequency for each probe and then collect all those and plot them in a histogram.
My question is about the part where it says this is where the magic happens. I believe the fft is already working correctly
y = np.fft.fft(C)
freq1 = np.fft.fftfreq(len(C), d= (number[1] - number[0]))
And if I'm not mistaken the freq1 list should contain all the frequencies for a given probe. I've checked this list visually and the amount of different frequencies is very high(20+) so the signal is probably very noisy.
# If im not mistakes this finds the peak frequency for 1 probe and passes it another list
val = np.argmax(np.abs(u))
ans.append(np.abs(freq1[val]))
That this part should in theory take the biggest signal from one probe and than put in the "ans" list. But I'm a bit confused as to how i can no correctly identify the right frequency. As there should i theory be one main frequency. How can I correctly estimate the "main" frequency from all this data from all the noise
For reference I'm modeling an Von Karmann vortex street and I'm looking for the frequency of vortex shedding. https://en.wikipedia.org/wiki/K%C3%A1rm%C3%A1n_vortex_street
Can anyone help me on how to solve this?
The line
freq1 = np.fft.fftfreq(len(C), d= (number[1] - number[0]))
Only generates an index going from
freq1 = [0, 1, ..., len(C)/2-1, -len(C)/2, ..., -1] / (d*len(C))
Which is useful to compute your frequencies array as
freq[i] = freq1[i]*alpha
Where alpha is your basic wavenumber computed as
alpha = 1/Ts
Being Ts your sampling period. I think that because freq1 is not scaled you array of frequencies is so high.
Note that if you are sampling your data using different time steps you will need to interpolate it at in a evenly space domain using numpy.interp (for example).
To estimate the main frequency just find the index where the fft-transformed variable is higher and relate that index to freq[i].

Python: passing coordinates from list to function

I am using some code from a workshop to extract data from netCDF files by the coordinates closest to my specified coordinates. When using just one set of coordinates I am able to extract the values I need without trouble as below:
import numpy as np
import netCDF4
from math import pi
from numpy import cos, sin
def tunnel_fast(latvar,lonvar,lat0,lon0):
'''
Find closest point in a set of (lat,lon) points to specified point
latvar - 2D latitude variable from an open netCDF dataset
lonvar - 2D longitude variable from an open netCDF dataset
lat0,lon0 - query point
Returns iy,ix such that the square of the tunnel distance
between (latval[it,ix],lonval[iy,ix]) and (lat0,lon0)
is minimum.
'''
rad_factor = pi/180.0 # for trignometry, need angles in radians
# Read latitude and longitude from file into numpy arrays
latvals = latvar[:] * rad_factor
lonvals = lonvar[:] * rad_factor
ny,nx = latvals.shape
lat0_rad = lat0 * rad_factor
lon0_rad = lon0 * rad_factor
# Compute numpy arrays for all values, no loops
clat,clon = cos(latvals),cos(lonvals)
slat,slon = sin(latvals),sin(lonvals)
delX = cos(lat0_rad)*cos(lon0_rad) - clat*clon
delY = cos(lat0_rad)*sin(lon0_rad) - clat*slon
delZ = sin(lat0_rad) - slat;
dist_sq = delX**2 + delY**2 + delZ**2
minindex_1d = dist_sq.argmin() # 1D index of minimum element
iy_min,ix_min = np.unravel_index(minindex_1d, latvals.shape)
return iy_min,ix_min
ncfile = netCDF4.Dataset('E:\wind_level2_1.nc', 'r')
latvar = ncfile.variables['latitude']
lonvar = ncfile.variables['longitude']
#_________GG turbine_________GAD10 Latitude 51.735516, GAD10 Longitude 1.942656
iy,ix = tunnel_fast(latvar, lonvar, 51.735516, 1.942656)
print('Closest lat lon:', latvar[iy,ix], lonvar[iy,ix])
refLAT=latvar[iy,ix]
refLON = lonvar[iy,ix]
#try to find the data for this location
SARwind = ncfile.variables['sar_wind'][:,:]
ModelWind = ncfile.variables['model_speed'][:,:]
print 'iy,ix' #appears to be the index of the value of Lat,lon
print SARwind[iy,ix]
ncfile.close()
Now I am trying to loop through a text file containing coordinates coord_list to extract sets of coordinates, find the data then move to the next set of coordinates in the list. This code works on it's own as below:
import csv
from decimal import Decimal
with open('Turbine_locs_no_header.csv','rb') as f:
reader = csv.reader(f)
#coord_list = list(reader)
coord_list = [reader]
end_row = len(coord_list)
lon_ind=1
lat_ind=2
for row in range(0, end_row-1):#end_row - 1 due to the 0 index
turbine_lat = coord_list[row][lat_ind]
turbine_lon = coord_list[row][lon_ind]
turbine_lat = [Decimal(turbine_lat)]
print 'lat',turbine_lat, 'lon',turbine_lon, row
However, I want to pass coordinates from the text file to this part of the original code iy,ix = tunnel_fast(latvar, lonvar, 51.94341, 1.922094888), replacing the numbers with variables iy, ix = tunnel_fast(latvar, lonvar, turbine_lat, turbine_lon). I try to combine the two codes by creating a function get_coordinates, I get the following errors
File "C:/Users/mm/test_nc_bycoords_GG_turbines_AGW.py", line 65, in <module>
get_coordinates(coord_list, latvar, lonvar)
File "C:/Users/mm/test_nc_bycoords_GG_turbines_AGW.py", line 51, in get_coordinates
iy, ix = tunnel_fast(latvar, lonvar, turbine_lat, turbine_lon)
File "C:/Users/mm/test_nc_bycoords_GG_turbines_AGW.py", line 27, in tunnel_fast
lat0_rad = lat0 * rad_factor
TypeError: can't multiply sequence by non-int of type 'float'
I thought this is because the turbine_lat and turbine_lon are list items so cannot be used, but this doesn't seem to be connected to the errors. I know this code needs more work anyway, but if anyone could help me spot where I am going wrong that would be very helpful. My attempt to combine the two codes is below.
import numpy as np
import netCDF4
from math import pi
from numpy import cos, sin
import csv
# edited from https://github.com/Unidata/unidata-python-workshop/blob/a56daa50d7b343c7debe93968683613642d6b9f7/notebooks/netcdf-by-coordinates.ipynb
def tunnel_fast(latvar,lonvar,lat0,lon0):
'''
Find closest point in a set of (lat,lon) points to specified point
latvar - 2D latitude variable from an open netCDF dataset
lonvar - 2D longitude variable from an open netCDF dataset
lat0,lon0 - query point
Returns iy,ix such that the square of the tunnel distance
between (latval[it,ix],lonval[iy,ix]) and (lat0,lon0)
is minimum.
'''
rad_factor = pi/180.0 # for trignometry, need angles in radians
# Read latitude and longitude from file into numpy arrays
latvals = latvar[:] * rad_factor
lonvals = lonvar[:] * rad_factor
ny,nx = latvals.shape
lat0_rad = lat0 * rad_factor
lon0_rad = lon0 * rad_factor
# Compute numpy arrays for all values, no loops
clat,clon = cos(latvals),cos(lonvals)
slat,slon = sin(latvals),sin(lonvals)
delX = cos(lat0_rad)*cos(lon0_rad) - clat*clon
delY = cos(lat0_rad)*sin(lon0_rad) - clat*slon
delZ = sin(lat0_rad) - slat;
dist_sq = delX**2 + delY**2 + delZ**2
minindex_1d = dist_sq.argmin() # 1D index of minimum element
iy_min,ix_min = np.unravel_index(minindex_1d, latvals.shape)
return iy_min,ix_min
#________________my edits___________________________________________________
def get_coordinates(coord_list, latvar, lonvar):
"this takes coordinates from a .csv and assigns them to variables"
end_row = len(coord_list)
lon_ind=1
lat_ind=2
for row in range(0, end_row-1):#end_row - 1 due to the 0 index
turbine_lat = coord_list[row][lat_ind]
turbine_lon = coord_list[row][lon_ind]
iy, ix = tunnel_fast(latvar, lonvar, turbine_lat, turbine_lon)
print('Closest lat lon:', latvar[iy, ix], lonvar[iy, ix])
#________________________________________________________________________________________________________________________
ncfile = netCDF4.Dataset('NOGAPS_wind_level2_1.nc', 'r')
latvar = ncfile.variables['latitude']
lonvar = ncfile.variables['longitude']
#____added in to pass to get coordinates function
with open('Turbine_locs_no_header.csv','rb') as f:
reader = csv.reader(f)
coord_list = list(reader)
#_________take latitude from coordinateas function
get_coordinates(coord_list, latvar, lonvar)
#iy,ix = tunnel_fast(latvar, lonvar, turbine_lat, turbine_lon)#get these from the 'assign_coordinates_fromlist.py
#print('Closest lat lon:', latvar[iy,ix], lonvar[iy,ix])
SARwind = ncfile.variables['sar_wind'][:,:]
ModelWind = ncfile.variables['model_speed'][:,:]
print 'iy,ix' #appears to be the index of the value of Lat,lon
print SARwind[iy,ix]
ncfile.close()
When I try to convert
You can unpack an argument list using *args (see the docs). In your case you could do tunnel_fast(latvar, lonvar, *coord_list[row]). You need to make sure that the order of arguments in coord_list[row] is correct and if coord_list[row] contains more than the two values then you need to slice it appropriately.
Thanks to help from a_guest
It was a simple problem of lat0 and lon0 being passed as
<type 'str'> to tunnel_fast when it requires <type 'float'>. This appears to come from loading the coord_list as a list.
with open('Turbine_locs_no_header.csv','rb') as f:
reader = csv.reader(f)
coord_list = list(reader)
The workaround I used was to convert lat0 and lon0 to floats at the beginning of tunnel_fast
lat0 = float(lat0)
lon0 = float(lon0)
I am sure there is a more elegant way to do this, but it works.

Python calculate lots of distances quickly

I have an input of 36,742 points which means if I wanted to calculate the lower triangle of a distance matrix (using the vincenty approximation) I would need to generate 36,742*36,741*0.5 = 1,349,974,563 distances.
I want to keep the pair combinations which are within 50km of each other. My current set-up is as follows
shops= [[id,lat,lon]...]
def lower_triangle_mat(points):
for i in range(len(shops)-1):
for j in range(i+1,len(shops)):
yield [shops[i],shops[j]]
def return_stores_cutoff(points,cutoff_km=0):
below_cut = []
counter = 0
for x in lower_triangle_mat(points):
dist_km = vincenty(x[0][1:3],x[1][1:3]).km
counter += 1
if counter % 1000000 == 0:
print("%d out of %d" % (counter,(len(shops)*len(shops)-1*0.5)))
if dist_km <= cutoff_km:
below_cut.append([x[0][0],x[1][0],dist_km])
return below_cut
start = time.clock()
stores = return_stores_cutoff(points=shops,cutoff_km=50)
print(time.clock() - start)
This will obviously take hours and hours. Some possibilities I was thinking of:
Use numpy to vectorise these calculations rather than looping through
Use some kind of hashing to get a quick rough-cut off (all stores within 100km) and then only calculate accurate distances between those stores
Instead of storing the points in a list use something like a quad-tree but I think that only helps with the ranking of close points rather than actual distance -> so I guess some kind of geodatabase
I can obviously try the haversine or project and use euclidean distances, however I am interested in using the most accurate measure possible
Make use of parallel processing (however I was having a bit of difficulty coming up how to cut the list to still get all the relevant pairs).
Edit: I think geohashing is definitely needed here - an example from:
from geoindex import GeoGridIndex, GeoPoint
geo_index = GeoGridIndex()
for _ in range(10000):
lat = random.random()*180 - 90
lng = random.random()*360 - 180
index.add_point(GeoPoint(lat, lng))
center_point = GeoPoint(37.7772448, -122.3955118)
for distance, point in index.get_nearest_points(center_point, 10, 'km'):
print("We found {0} in {1} km".format(point, distance))
However, I would also like to vectorise (instead of loop) the distance calculations for the stores returned by the geo-hash.
Edit2: Pouria Hadjibagheri - I tried using lambda and map:
# [B]: Mapping approach
lwr_tr_mat = ((shops[i],shops[j]) for i in range(len(shops)-1) for j in range(i+1,len(shops)))
func = lambda x: (x[0][0],x[1][0],vincenty(x[0],x[1]).km)
# Trying to see if conditional statements slow this down
func_cond = lambda x: (x[0][0],x[1][0],vincenty(x[0],x[1]).km) if vincenty(x[0],x[1]).km <= 50 else None
start = time.clock()
out_dist = list(map(func,lwr_tr_mat))
print(time.clock() - start)
start = time.clock()
out_dist = list(map(func_cond,lwr_tr_mat))
print(time.clock() - start)
And they were all around 61 seconds (I restricted number of stores to 2000 from 32,000). Perhaps I used map incorrectly?
This sounds like a classic use case for k-D trees.
If you first transform your points into Euclidean space then you can use the query_pairs method of scipy.spatial.cKDTree:
from scipy.spatial import cKDTree
tree = cKDTree(data)
# where data is (nshops, ndim) containing the Euclidean coordinates of each shop
# in units of km
pairs = tree.query_pairs(50, p=2) # 50km radius, L2 (Euclidean) norm
pairs will be a set of (i, j) tuples corresponding to the row indices of pairs of shops that are ≤50km from each other.
The output of tree.sparse_distance_matrix is a scipy.sparse.dok_matrix. Since the matrix will be symmetric and you're only interested in unique row/column pairs, you could use scipy.sparse.tril to zero out the upper triangle, giving you a scipy.sparse.coo_matrix. From there you can access the nonzero row and column indices and their corresponding distance values via the .row, .col and .data attributes:
from scipy import sparse
tree_dist = tree.sparse_distance_matrix(tree, max_distance=10000, p=2)
udist = sparse.tril(tree_dist, k=-1) # zero the main diagonal
ridx = udist.row # row indices
cidx = udist.col # column indices
dist = udist.data # distance values
Have you tried mapping entire arrays and functions instead of iterating through them? An example would be as follows:
from numpy.random import rand
my_array = rand(int(5e7), 1) # An array of 50,000,000 random numbers in double.
Now what is normally done is:
squared_list_iter = [value**2 for value in my_array]
Which of course works, but is optimally invalid.
The alternative would be to map the array with a function. This is done as follows:
func = lambda x: x**2 # Here is what I want to do on my array.
squared_list_map = map(func, test) # Here I am doing it!
Now, one might ask, how is this any different, or even better for that matter? Since now we have added a call to a function, too! Here is your answer:
For the former solution (via iteration):
1 loop: 1.11 minutes.
Compared to the latter solution (mapping):
500 loop, on average 560 ns.
Simultaneous conversion of a map() to list by list(map(my_list)) would increase the time by a factor of 10 to approximately 500 ms.
You choose!
Thanks everyone's help. I think I have solved this by incorporating all the suggestions.
I use numpy to import the geographic co-ordinates and then project them using "France Lambert - 93". This lets me fill scipy.spatial.cKDTree with the points and then calculate a sparse_distance_matrix by specifying a cut-off of 50km (my projected points are in metres). I then extract extract the lower-triangle to a CSV.
import numpy as np
import csv
import time
from pyproj import Proj, transform
#http://epsg.io/2154 (accuracy: 1.0m)
fr = '+proj=lcc +lat_1=49 +lat_2=44 +lat_0=46.5 +lon_0=3 \
+x_0=700000 +y_0=6600000 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 \
+units=m +no_defs'
#http://epsg.io/27700-5339 (accuracy: 1.0m)
uk = '+proj=tmerc +lat_0=49 +lon_0=-2 +k=0.9996012717 \
+x_0=400000 +y_0=-100000 +ellps=airy \
+towgs84=446.448,-125.157,542.06,0.15,0.247,0.842,-20.489 +units=m +no_defs'
path_to_csv = '.../raw_in.csv'
out_csv = '.../out.csv'
def proj_arr(points):
inproj = Proj(init='epsg:4326')
outproj = Proj(uk)
# origin|destination|lon|lat
func = lambda x: transform(inproj,outproj,x[2],x[1])
return np.array(list(map(func, points)))
tstart = time.time()
# Import points as geographic coordinates
# ID|lat|lon
#Sample to try and replicate
#points = np.array([
# [39007,46.585012,5.5857829],
# [88086,48.192370,6.7296289],
# [62627,50.309155,3.0218611],
# [14020,49.133972,-0.15851507],
# [1091, 42.981765,2.0104902]])
#
points = np.genfromtxt(path_to_csv,
delimiter=',',
skip_header=1)
print("Total points: %d" % len(points))
print("Triangular matrix contains: %d" % (len(points)*((len(points))-1)*0.5))
# Get projected co-ordinates
proj_pnts = proj_arr(points)
# Fill quad-tree
from scipy.spatial import cKDTree
tree = cKDTree(proj_pnts)
cut_off_metres = 1600
tree_dist = tree.sparse_distance_matrix(tree,
max_distance=cut_off_metres,
p=2)
# Extract triangle
from scipy import sparse
udist = sparse.tril(tree_dist, k=-1) # zero the main diagonal
print("Distances after quad-tree cut-off: %d " % len(udist.data))
# Export CSV
import csv
f = open(out_csv, 'w', newline='')
w = csv.writer(f, delimiter=",", )
w.writerow(['id_a','lat_a','lon_a','id_b','lat_b','lon_b','metres'])
w.writerows(np.column_stack((points[udist.row ],
points[udist.col],
udist.data)))
f.close()
"""
Get ID labels
"""
id_to_csv = '...id.csv'
id_labels = np.genfromtxt(id_to_csv,
delimiter=',',
skip_header=1,
dtype='U')
"""
Try vincenty on the un-projected co-ordinates
"""
from geopy.distance import vincenty
vout_csv = '.../out_vin.csv'
test_vin = np.column_stack((points[udist.row].T[1:3].T,
points[udist.col].T[1:3].T))
func = lambda x: vincenty(x[0:2],x[2:4]).m
output = list(map(func,test_vin))
# Export CSV
f = open(vout_csv, 'w', newline='')
w = csv.writer(f, delimiter=",", )
w.writerow(['id_a','id_a2', 'lat_a','lon_a',
'id_b','id_b2', 'lat_b','lon_b',
'proj_metres','vincenty_metres'])
w.writerows(np.column_stack((list(id_labels[udist.row]),
points[udist.row ],
list(id_labels[udist.col]),
points[udist.col],
udist.data,
output,
)))
f.close()
print("Finished in %.0f seconds" % (time.time()-tstart)
This approach took 164 seconds to generate (for 5,306,434 distances) - compared to 9 - and also around 90 seconds to save to disk.
I then compared the difference in the vincenty distance and the hypotenuse distance (on the projected co-ordinates).
The mean difference in metres was 2.7 and the mean difference/metres was 0.0073% - which looks great.
"Use some kind of hashing to get a quick rough-cut off (all stores within 100km) and then only calculate accurate distances between those stores"
I think this might be better called gridding. So first make a dict, with a set of coords as the key and put each shop in a 50km bucket near that point. then when you are calculating distances, you only look in nearby buckets, rather than iterate through each shop in the whole universe
You can use vectorization with the haversine formula discussed in this thread Haversine Formula in Python (Bearing and Distance between two GPS points)
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2
c = 2 * np.arcsin(np.sqrt(a))
km = 6371 * c
Here you have the %%timeit for 7 451 653 distances
642 ms ± 20.5 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)

SciPy RectSphereBivariateSpline interpolation over sphere returning ValueError

I have 3D measurement data on a sphere that is very coarse and I want to interpolate.
I found that RectSphereBivariateSpline from scipy.interpolate should be most suitable.
I used the example in the RectSphereBivariateSpline documentation as a starting point and now have the following code:
""" read csv input file, post process and plot 3D data """
import csv
import numpy as np
from mayavi import mlab
from scipy.interpolate import RectSphereBivariateSpline
# user input
nElevationPoints = 17 # needs to correspond with csv file
nAzimuthPoints = 40 # needs to correspond with csv file
threshold = - 40 # needs to correspond with how measurement data was captured
turnTableStepSize = 72 # needs to correspond with measurement settings
resolution = 0.125 # needs to correspond with measurement settings
# read data from file
patternData = np.empty([nElevationPoints, nAzimuthPoints]) # empty buffer
ifile = open('ttest.csv') # need the 'b' suffix to prevent blank rows being inserted
reader = csv.reader(ifile,delimiter=',')
reader.next() # skip first line in csv file as this is only text
for nElevation in range (0,nElevationPoints):
# azimuth
for nAzimuth in range(0,nAzimuthPoints):
patternData[nElevation,nAzimuth] = reader.next()[2]
ifile.close()
# post process
def r(thetaIndex,phiIndex):
"""r(thetaIndex,phiIndex): function in 3D plotting to return positive vector length from patternData[theta,phi]"""
radius = -threshold + patternData[thetaIndex,phiIndex]
return radius
#phi,theta = np.mgrid[0:nAzimuthPoints,0:nElevationPoints]
theta = np.arange(0,nElevationPoints)
phi = np.arange(0,nAzimuthPoints)
thetaMesh, phiMesh = np.meshgrid(theta,phi)
stepSizeRad = turnTableStepSize * resolution * np.pi / 180
theta = theta * stepSizeRad
phi = phi * stepSizeRad
# create new grid to interpolate on
phiIndex = np.linspace(1,360,360)
phiNew = phiIndex*np.pi/180
thetaIndex = np.linspace(1,180,180)
thetaNew = thetaIndex*np.pi/180
thetaNew,phiNew = np.meshgrid(thetaNew,phiNew)
# create interpolator object and interpolate
data = r(thetaMesh,phiMesh)
lut = RectSphereBivariateSpline(theta,phi,data.T)
data_interp = lut.ev(thetaNew.ravel(),phiNew.ravel()).reshape((360,180)).T
x = (data_interp(thetaIndex,phiIndex)*np.cos(phiNew)*np.sin(thetaNew))
y = (-data_interp(thetaIndex,phiIndex)*np.sin(phiNew)*np.sin(thetaNew))
z = (data_interp(thetaIndex,phiIndex)*np.cos(thetaNew))
# plot 3D data
obj = mlab.mesh(x, y, z, colormap='jet')
obj.enable_contours = True
obj.contour.filled_contours = True
obj.contour.number_of_contours = 20
mlab.show()
The example from the documentation works, but when I try to run the above code with the following test data: testdata I get a ValueError at the code position where the RectSphereBivariateSpline interpolator object is declared:
ValueError:
ERROR: on entry, the input data are controlled on validity
the following restrictions must be satisfied.
-1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1,
-1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0.
-1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0.
mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8,
kwrk>=5+mu+mv+nuest+nvest,
lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest)
0< u(i-1)=0: s>=0
if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7
if one of these conditions is found to be violated,control is
immediately repassed to the calling program. in that case there is no
approximation returned.
I have tried and tried, but I am absolutely clueless what I should change in order to satisfy the RectSphereBivariateSpline object.
Does anyone have any hint as to what I may be doing wrong?
-- EDIT --
With the suggestions from #HYRY, I now have the following code that runs without runtime errors:
""" read csv input file, post process and plot 3D data """
import csv
import numpy as np
from mayavi import mlab
from scipy.interpolate import RectSphereBivariateSpline
# user input
nElevationPoints = 17 # needs to correspond with csv file
nAzimuthPoints = 40 # needs to correspond with csv file
threshold = - 40 # needs to correspond with how measurement data was captured
turnTableStepSize = 72 # needs to correspond with measurement settings
resolution = 0.125 # needs to correspond with measurement settings
# read data from file
patternData = np.empty([nElevationPoints, nAzimuthPoints]) # empty buffer
ifile = open('ttest.csv') # need the 'b' suffix to prevent blank rows being inserted
reader = csv.reader(ifile,delimiter=',')
reader.next() # skip first line in csv file as this is only text
for nElevation in range (0,nElevationPoints):
# azimuth
for nAzimuth in range(0,nAzimuthPoints):
patternData[nElevation,nAzimuth] = reader.next()[2]
ifile.close()
# post process
def r(thetaIndex,phiIndex):
"""r(thetaIndex,phiIndex): function in 3D plotting to return positive vector length from patternData[theta,phi]"""
radius = -threshold + patternData[thetaIndex,phiIndex]
return radius
#phi,theta = np.mgrid[0:nAzimuthPoints,0:nElevationPoints]
theta = np.arange(0,nElevationPoints)
phi = np.arange(0,nAzimuthPoints)
thetaMesh, phiMesh = np.meshgrid(theta,phi)
stepSizeRad = turnTableStepSize * resolution * np.pi / 180
theta = theta * stepSizeRad
phi = phi * stepSizeRad
# create new grid to interpolate on
phiIndex = np.arange(1,361)
phiNew = phiIndex*np.pi/180
thetaIndex = np.arange(1,181)
thetaNew = thetaIndex*np.pi/180
thetaNew,phiNew = np.meshgrid(thetaNew,phiNew)
# create interpolator object and interpolate
data = r(thetaMesh,phiMesh)
theta[0] += 1e-6 # zero values for theta cause program to halt; phi makes no sense at theta=0
lut = RectSphereBivariateSpline(theta,phi,data.T)
data_interp = lut.ev(thetaNew.ravel(),phiNew.ravel()).reshape((360,180)).T
def rInterp(theta,phi):
"""rInterp(theta,phi): function in 3D plotting to return positive vector length from interpolated patternData[theta,phi]"""
thetaIndex = theta/(np.pi/180)
thetaIndex = thetaIndex.astype(int)
phiIndex = phi/(np.pi/180)
phiIndex = phiIndex.astype(int)
radius = data_interp[thetaIndex,phiIndex]
return radius
# recreate mesh minus one, needed otherwise the below gives index error, but why??
phiIndex = np.arange(0,360)
phiNew = phiIndex*np.pi/180
thetaIndex = np.arange(0,180)
thetaNew = thetaIndex*np.pi/180
thetaNew,phiNew = np.meshgrid(thetaNew,phiNew)
x = (rInterp(thetaNew,phiNew)*np.cos(phiNew)*np.sin(thetaNew))
y = (-rInterp(thetaNew,phiNew)*np.sin(phiNew)*np.sin(thetaNew))
z = (rInterp(thetaNew,phiNew)*np.cos(thetaNew))
# plot 3D data
obj = mlab.mesh(x, y, z, colormap='jet')
obj.enable_contours = True
obj.contour.filled_contours = True
obj.contour.number_of_contours = 20
mlab.show()
However, the plot is much different than the non-interpolated data, see picture here as reference.
Also, when running the interactive session, data_interp is much larger in value (>3e5) than the original data (this is around 20 max).
Any further tips?
It looks like that theta[0] can't be 0, if you change it a litte before call RectSphereBivariateSpline:
theta[0] += 1e-6

Categories

Resources