I have 5 different trajectories for my project. First I read them from the file, then save them to a list by parsing the file. After that I digitized these values. I want to align x and y coordinates separately and plot x and y coordinates together on a grid.
This is what I have done so far. I use dtw package in python, but it takes two lists and gives the path as an array. How can I convert it to an aligned trajectory of 5 different lists?
x_dict = {}
y_dict = {}
for x in ["1", "2", "3", "4", "5"]:
file = open("data-" + x + ".txt", encoding="latin-1")
data = file.read()
pos_list = re.findall(r'position:(.*?)orientation:', data, re.DOTALL)
or_list = re.findall(r'orientation:(.*?)scale:', data, re.DOTALL)
for i in range(len(pos_list)):
pos_list[i] = pos_list[i].replace('\n','')
regexx = re.compile(r'x: (.*?) y:')
regexy = re.compile(r'y: (.*?) z:')
posx_list = [m.group(1) for l in pos_list for m in [regexx.search(l)] if m]
posx_list = list(map(float, posx_list))
posy_list = [m.group(1) for l in pos_list for m in [regexy.search(l)] if m]
posy_list = list(map(float, posy_list))
bins = numpy.linspace(-1, 1, 100)
digitized_x = numpy.digitize(posx_list, bins)
digitized_y = numpy.digitize(posy_list, bins)
x_dict[x] = digitized_x
y_dict[x] = digitized_y
dist, cost, acc, path = dtw(y_dict["5"], y_dict["4"], dist= euclidean)
plt.imshow(acc.T, origin='lower', cmap=cm.gray, interpolation='nearest')
plt.plot(path[0], path[1], 'w')
plt.xlim((-0.5, acc.shape[0]-0.5))
plt.ylim((-0.5, acc.shape[1]-0.5))
Related
def Data_to_array(file):
r = int
x, y=[],[]
data = []
line_num = 0
#call data
P = open(file,'r')
data = P.readlines()
#Get it to ignore strings
for line in data:
line_num += 1
if line.find("[data]") >= 0:
r = (line_num+1)
# Data = P.readlines()[:r]
# print (Data)
if "Sampling Rate" in line:
SR = float(line[15:])
if "temperature=" in line:
T = float(line[12:18])
print(str("Temperature = "))
print(T)
Data = data[r:line_num]
#assign data into dataframe
df = pd.DataFrame(Data)
#rename column in data
df = df.rename(columns = {0: 'volts'})
#get it to recognise the index
df.index.name = 'Index'
#get it to recognise the data as number
df = df.astype({'volts': float})
#get index to start at 1
df.index += 1
#assign data to lists
I = df.index.to_list()
t = df['volts'].to_list()
#get it to invert data
y = [element * -1 for element in t]
#multiply by sampling rate
x = [element /(SR) for element in I]
return x,y
#This is to create the exponential function
def Exponential_func(file):
temp_array = Data_to_array(file)
X = np.asarray(temp_array[0])
a,b = float()
#Y = temp_array[1]
f = np.exp(a*X) + b
return f
#This is to get the optomize function to work
def Exponential_model(file):
temp_array = Data_to_array(file)
X = np.asarray(temp_array[0])
Y = np.asarray(temp_array[1])
#f = np.exp(X)
#exp_mod = lf.ExponentialModel(X,Y)
#pars = exp_mod.guess(Y, X)
r = sp.optimize.curve_fit(X,Y,Exponential_func.f)
return r
#This is to plot the data
def Plot_Data (file):
temp_array = Data_to_array(file)
X = np.asarray(temp_array[0])
Y = np.asarray(temp_array[1])
#p_0 = np.exp(X)
#sp.optimize.curve_fit(X,Y,p_0)
plt.scatter(X,Y)
#plt.plot(Exponential_model.r)
plt.show()
plt.xlabel("Time (s)")
plt.ylabel("Capacitence (μF)")
# print(Data_to_array('Cz-Si-T-1.txt')[1])
Plot_Data("Cz-Si-T-82.txt")
Exponential_func("Cz-Si-T-82.txt")
Exponential_model("Cz-Si-T-82.txt")
When I try to use the sp.optomize function, I get the error "'function' object has no attribute 'f'" but when looking up this problem I have the function and variables in the correct order.
I need this piece of code to fit an exponential curve to my data, which does have an exp fit, can anyone help? It would also be helpful for the code to print the function of the fitted curve as I will be integrating under this later.
I have set of lists (i.e. list x , list y, list z), e.g.
x = ['41.95915452', '41.96333025', '41.98135503', '41.95096716', '41.96504172', '41.96526867', '41.98068483', '41.98117072', '41.98059828', '41.95915452', '41.96333025', '41.98135503', '41.95096716']
y = ['12.60718918', '12.62725589', '12.6201431', '12.60017199', '12.62774075', '12.62800706', '12.62812394', '12.6278259', '12.62810614', '12.60718918', '12.62725589', '12.6201431', '12.60017199']
z = ['9.215398066', '8.249650758', '8.791595671', '8.246394455', '9.27132698', '5.667547722', '7.783268126', '9.471492129', '9.668210684', '9.215398066', '8.249650758', '8.791595671', '8.246394455']
There are such around 800 lists. I have to create a 3*3 matrix from the each of the lists x, y and z such that [x1, y1, z1], one of its row should be like ['41.95915452', '12.60718918', '9.215398066' ] and list must contain at least than 4 entries.
My code :
for i in np.arange(41.70, 42.10, 0.05):
#print(round(i,2), end=', ')
for j in np.arange(12.30, 12.80, 0.05):
# print(round(j,2), end=', ')
for k in np.arange(0,26,5):
#print("\n")
#print(round(i,2),round(j,2),k, end=', ')
xmax = round(i+0.05,2)
ymax = round(j+ 0.05,2)
zmax = round(k+5,2)
#print("Voxel",xmax,ymax,zmax)
v = []
x1 = []
y1 = []
z1 = []
count = 0;
with open('a.csv') as csvfile:
plots = csv.reader(csvfile,delimiter=',')
for rows in plots:
if(float(rows[0]) >= i and float(rows[0])<= xmax and float(rows[1]) >=j and float(rows[1])<=ymax and float(rows[2])>=k and float(rows[2])<=zmax):
#print("points", float(rows[0]),float(rows[1]),float(rows[2]))
x1.append(rows[0])
y1.append(rows[1])
z1.append(rows[2])
count= count+1
#f = open("demofile2.txt", "a")
#f.write(str(i)+","+str(j)+","+str(k)+","+str(count)+"\n")
#f.write(text)
#f.close()
#print(count)
if(count > 3):
v1 = [i,j,k]
v.append(v1)
print(v)
print(x1)
print(y1)
print(z1)
print("\n")
Use numpy vstack and transpose.
Try this code.
np.vstack([x, y, z]).T
If you want the output is list, then use
np.vstack([x, y, z]).T.tolist()
you can use np.c_ to concatenate along axis and slice the matrix.
res = np.c_[x,y,z][:3,:3]
output
array([['41.95915452', '12.60718918', '9.215398066'],
['41.96333025', '12.62725589', '8.249650758'],
['41.98135503', '12.6201431', '8.791595671']], dtype='<U11')
def get_nouns (text):
tagger = MeCab.Tagger()
words = []
for c in tagger.parse(text).splitlines()[:-1]:
if len(c.split('\t')) < 2:
continue
surface, feature = c.split('\t')
pos = feature.split(',')[0]
if pos == '名詞': # noun
words.append(surface)
return ' '.join(words)
def bio():
biolist =[]
howmany = 10
for giin in read:
if len(giin["education"]) < 1:
continue
biolist.append(get_nouns(" ".join(giin["education"])))
######################################################
nparray = np.array (biolist)
cv = CountVectorizer()
bags = cv.fit_transform(nparray)
tfidf=TfidfTransformer(norm='l2', sublinear_tf=True).fit_transform(bags)
km_model = KMeans(n_clusters=howmany, init='k-means++')
km_model.fit_transform(tfidf)
lsa2 = TruncatedSVD(2)
compressed_text_list = lsa2.fit_transform(tfidf)
compressed_center_list = lsa2.fit_transform(km_model.cluster_centers_)
X = []
Y = []
X_cent = []
Y_cent = []
for x, y in compressed_text_list:
X.append(x)
Y.append(y)
for x, y in compressed_center_list:
X_cent.append(x)
Y_cent.append(y)
clus_list = []
for i in range (howmany):
clus_list.append([])
for a in biolist:
if km_model.labels_[biolist.index(a)] == i:
clus_list[i].append(a)
for a in clus_list:
print (a)
print (" ")
plt.scatter(X, Y, c = km_model.labels_)
plt.scatter(X_cent, Y_cent, c="r", marker = "+")
plt.show()
I have this code that clusters educational history of people into 10 groups. My scatter plot looks like this.
As you can see, the scatter plot is not really sorted into groups and different colors are mixed up with one another. How could this code be changed to make the grouping more precise?
I have grid data at .csv format:
srcPath = "/data.csv"
f = open(srcPath,"r")
lines = f.readlines()
f.close()
miss = -9999.
ny,nx = 360,720
dlat = 0.5
dlon = 0.5
lat0 = -90.0
lon0 = -180.0
Lat = arange(-90, 90+0.001, dlat)
Lon = arange(-180, 180+0.001, dlon)
a2dat0 = ones([ny,nx],float32)*miss
a2dat1 = ones([ny,nx],float32)*miss
a2dat3 = ones([ny,nx],float32)*miss
a2dat4 = ones([ny,nx],float32)*miss
a2dat5 = ones([ny,nx],float32)*miss
a2dat6 = ones([ny,nx],float32)*miss
for line in lines:
lon, lat, v0,v1,v2,v3,v4,v5,v6 =map(float,line.split(","))
x = int(floor((lon - lon0)/dlon))
y = int(floor((lat - lat0)/dlat))
a2dat0 [y,x] = v0
a2dat1 [y,x] = v1
a2dat2 [y,x] = v2
a2dat3 [y,x] = v3
a2dat4 [y,x] = v4
a2dat5 [y,x] = v5
a2dat6 [y,x] = v6
X, Y = meshgrid(Lon, Lat)
m= Basemap(projection='robin',llcrnrlon=-180.,llcrnrlat=-90.,urcrnrlon=180.,urcrnrlat=90.,resolution='i',lon_0=0)
xi, yi = m(X, Y)
Now I want to make loof over "a2dat" like:
for i in range (0,7,1):
print a2dat+str(i)+":" ,a2dat+str(i).shape
or
for i in range (0,7,1):
cs+str(i) = m.pcolor(xi,yi,a2dat+str(i),cmap=cmap, norm=norm, )
to plot 7 different global maps using basemap and "a2dat" data, but It gives the back error, So how could I make the loop over these data name?
I think your dynamic names could be replaced with lists as follows (this isn't runnable code, so may have some bugs):
data = [np.ones([ny,nx], float32)*miss for _ in range(7)]
# data = [a2dat0, a2dat1, ... a2dat6] # or a list of the arrays
for line in lines:
lon, lat, *vs =map(float,line.split(",")) # py3 * unpacking
# vs = alist[2:] in py2?
x = int(floor((lon - lon0)/dlon))
y = int(floor((lat - lat0)/dlat))
for d,v in zip(data, vs):
d[y,x] = v
(Once the arrays are collecting a list, their names, if any, in the global environment aren't important.)
and
for i,d in enumerate(data):
print("a2dat%d: "%i ,data.shape)
csdata = [m.pcolor(xi, yi, d, ...) for d in data]
While it is possible to use for i in range(...): loops, generating the index with enumerate is more idiomatic. We're encouraged to think about iterating over a list of objects, not over the indices of those elements.
if you want to dynamically interpret python code from a string you need to use eval or exec. Also your print statement is wrong because it should not have :, and the first a2dat should be a string since you just want to get the text (also note I added a ,).
for i in range (0,7):
print "a2dat"+str(i)+":", eval("a2dat"+str(i)).shape
eval("a2dat"+str(i)) will get you the array object when it's evaluated. You can read up on the difference between eval and exec on your own.
I'm running through some tiffs that represent the amount of water being evaporated. My code below loops through the images and makes a polynomial function for each pixel.However, after poly_fn is appended to list z, only the coefficients are being appended. Can anyone recommend a solution for the functions to be stored as well in list z? List z must have 351 rows and 308 columns. I attached the tif files in a zip folder.
import os
import glob
import arcpy
import numpy as np
arcpy.CheckOutExtension("Spatial")
arcpy.env.overwriteOutput = True
arcpy.env.scratchWorkspace = r'E:\Alcalde\Scratch'
Scratchws = r'E:\Alcalde\Scratch\\'
KC_list = glob.glob(r'E:\Alcalde\Kc\*.tif')
KC_array = []
dates = []
for KC_tiff in KC_list:
name = os.path.basename(KC_tiff).split(".")[0]
date = os.path.basename(name).split("_")[2]
date = float(date[-3:])
KC_arr = arcpy.RasterToNumPyArray(KC_tiff)
KC_array.append(KC_arr)
shape = numpy.shape(KC_arr)
date_arr = np.full(shape, date)
dates.append(date_arr)
z = []
for row in range(0, 351):
for column in range(0,308):
x = []
y = []
for img in range(0,43):
pixel = KC_array[img][row][column]
date = dates[img][row][column]
x.append(dates[img][row][column])
y.append(KC_array[img][row][column])
print("Pixel (Row, Column): " + str(row) + "," + str(column))
poly = np.polyfit(x, y, 3)
poly_fn = np.poly1d(poly)
z.append(poly_fn)
print(poly_fn)
print("====================================")
w = np.array([np.array(xi) for xi in z])
q = np.reshape(w, (351, 308, 4))
print(q[350][307])
print("Complete..")