I'm supposed to be writing a for loop which does the following:
Using the singular vectors (columns of Ur etc. and rows of VrT etc.) corresponding to the largest n singular values create new R, G and B matrices of the same size as the original image (500 x 375)
here is what I have so far
from PIL import Image
from Image import new
from numpy import *
import numpy as np
from scipy.linalg import svd
r, g, b = im.split()
R = np.array(r.getdata())
R = np.asmatrix(R)
R = np.reshape(R, (375, 500), order = 'F')
G = np.array(g.getdata())
G = np.asmatrix(G)
G = np.reshape(G, (375, 500), order = 'F')
B = np.array(b.getdata())
B = np.asmatrix(B)
B = np.reshape(B, (375, 500), order = 'F')
Ur, Sr, VrT = svd(R.T, full_matrices=False)
Ug, Sg, VgT = svd(G.T, full_matrices=False)
Ub, Sb, VbT = svd(R.T, full_matrices=False)
R1 = np.dot(Ur, diag(Sr))
R1 = np.dot(R1, VrT)
G1 = np.dot(Ug, diag(Sg))
G1 = np.dot(G1, VgT)
B1 = np.dot(Ub, diag(Sb))
B1 = np.dot(B1, VbT)
R1 = np.around([R1])
G1 = np.around([G1])
B1 = np.around([B1])
R1 = np.uint8(R1)
G1 = np.uint8(G1)
B1 = np.uint8(B1)
R1 = R1.T
G1 = G1.T
B1 = B1.T
R1 = R1.flatten('F')
G1 = G1.flatten('F')
B1 = B1.flatten('F')
R1 = tuple(R1)
G1 = tuple(G1)
B1 = tuple(B1)
zipped = zip(R1,G1,B1)
newim = im.putdata(zipped,1,0)
im.show(newim)
for i in xrange(5):
N = array([200,100,50,10,1])
newUr = Ur[0:N[i], : ]
newSr = newSr[0:N[i]]
newVrT = VrT[ 0:N[i], :]
newUg = Ug[0:N[i], : ]
newSg = Sg[0:N[i]]
newVgT = VgT[ 0:N[i], :]
newUb = Ub[0:N[i], : ]
newSb = Sb[0:N[i]]
newVbT = VbT[ 0:N[i],:]
newR = dot(dot(newUr, diag(newSr), newVrT))
newG = dot(dot(newUg, diag(newSg), newVgT))
newB = dot(dot(newUb, diag(newSb), newVbT))
zipped = zip(newR,newG,newB)
newim = im.putdata(zipped,1,0)
im.show()
i = i+1
You could find the n greatest values in S using np.argsort. For example,
In [31]: S = np.array([1,3,5,2,4,7])
In [32]: np.argsort(S)[-3:]
Out[32]: array([4, 2, 5])
In [33]: idx = np.argsort(S)[-3:]
In [34]: S[idx]
Out[34]: array([4, 5, 7])
import Image
import numpy as np
linalg = np.linalg
N = 10
def ngreatest(arr, n):
idx = np.argsort(arr)[-n:]
return idx
img = Image.open(filename).convert('RGB')
arr = np.asarray(img)
r, g, b = np.rollaxis(arr, axis = -1)
Ur, Sr, VrT = linalg.svd(r, full_matrices=False)
idx = ngreatest(Sr, N)
Sr = np.diag(Sr[idx])
VrT = VrT[idx]
Ur = Ur[:,idx]
print(Ur.shape, Sr.shape, VrT.shape)
Related
I implemented the conjugate gradient method using TensorFlow to invert a sparse matrix.
The matrix I used to test the method is well-conditioned, as it is the sum of a mass matrix and a stiffness matrix obtained with finite elements.
I compared with the same method implemented using scipy and on the same data.
The solutions obtained with either methods are the same, however, TensorFlow is 5 times slower (I tested under colab environment).
Under colab environment, scipy ran in 0.27 s, while TensorFlow required 1.37 s
Why the algorithm is so slow under TensorFlow?
I can not cast to dense matrices, as I want to use the formula with matrices of large size (100k X100k or more).
Thanks,
Cesare
Here is the code I used to test this:
import tensorflow as tf
import numpy as np
from scipy.sparse import coo_matrix,linalg
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from time import time
from scipy.spatial import Delaunay
def create_mesh(Lx=1,Ly=1,Nx=100,Ny=100):
mesh0=dict()
dx = Lx/Nx
dy = Ly/Ny
XX,YY=np.meshgrid(np.arange(0,Lx+dx,dx),np.arange(0,Ly+dy,dy))
points=np.vstack((XX.ravel(),YY.ravel())).T
#np.random.shuffle(points)
tri = Delaunay(points)
mesh0['Pts']=np.copy(points).astype(np.float32)
mesh0['Tria']=np.copy(tri.simplices).astype(int)
return(mesh0)
def eval_connectivity(mesh0):
print('computing mesh connectivity')
npt=mesh0['Pts'].shape[0]
connectivity = {}
for jpt in range(npt):
connectivity[jpt] = []
for Tria in mesh0['Tria']:
for ilpt in range(3):
iglobalPt=Tria[ilpt]
for jlpt in range(1+ilpt,3):
jglobalPt=Tria[jlpt]
connectivity[iglobalPt].append(jglobalPt)
connectivity[jglobalPt].append(iglobalPt)
for key,value in connectivity.items():
connectivity[key]=np.unique(np.array(value,dtype=int))
return(connectivity)
def eval_local_mass(mesh0,iTri):
lmass = np.zeros(shape=(3,3),dtype=np.float32)
Tria=mesh0['Tria'][iTri]
v10 = mesh0['Pts'][Tria[1],:]-mesh0['Pts'][Tria[0],:]
v20 = mesh0['Pts'][Tria[2],:]-mesh0['Pts'][Tria[0],:]
N12 = np.cross(v10,v20)
Tsurf = 0.5*np.linalg.norm(N12)
for ipt in range(3):
lmass[ipt,ipt]=1.0/12.0
for jpt in range(1+ipt,3):
lmass[ipt,jpt] = 1.0/24.0
lmass[jpt,ipt] = lmass[ipt,jpt]
lmass = 2.0*Tsurf*lmass
return(lmass)
def eval_local_stiffness(mesh0,iTri):
Tria = mesh0['Tria'][iTri]
v10 = mesh0['Pts'][Tria[1],:]-mesh0['Pts'][Tria[0],:]
v20 = mesh0['Pts'][Tria[2],:]-mesh0['Pts'][Tria[0],:]
N12 = np.cross(v10,v20)
Tsurf = 0.5*np.linalg.norm(N12)
covbT = np.zeros(shape=(3,3),dtype=np.float32)
covbT[0,:2] = v10
covbT[1,:2] = v20
covbT[2,2] = N12/(2*Tsurf)
contrb = np.linalg.inv(covbT)
v1 = contrb[:,0]
v2 = contrb[:,1]
a = np.dot(v1,v1)
b = np.dot(v1,v2)
c = np.dot(v2,v2)
gij_c = np.array([[a,b],[b,c]],dtype=np.float32)
lgrad = np.array([[-1.0,1.0,0.0], [-1.0,0.0,1.0] ],dtype=np.float32)
lstif = Tsurf*np.matmul( np.matmul(lgrad.T,gij_c), lgrad )
return(lstif)
def compute_vectors_sparse_matrices(mesh0):
npt = mesh0['Pts'].shape[0]
connect = eval_connectivity(mesh0)
nzero = 0
for key,value in connect.items():
nzero += (1+value.shape[0])
I = np.zeros(shape=(nzero),dtype=int)
J = np.zeros(shape=(nzero),dtype=int)
VM = np.zeros(shape=(nzero),dtype=np.float32)
VS = np.zeros(shape=(nzero),dtype=np.float32)
k0 = np.zeros(shape=(npt+1),dtype=int)
k0[0] = 0
k = -1
for jpt in range(npt):
loc_con = connect[jpt].tolist()[:]
loc_con.append(jpt)
loc_con = np.sort(loc_con)
k0[jpt+1]=k0[jpt]+loc_con.shape[0]
for jloc in range(loc_con.shape[0]):
k=k+1
I[k]= jpt
J[k]= loc_con[jloc]
for iTr, Tria in enumerate(mesh0['Tria']):
lstiff = eval_local_stiffness(mesh0,iTr)
lmass = eval_local_mass(mesh0,iTr)
for iEntry,irow in enumerate(Tria):
loc_con = connect[irow].tolist()[:]
loc_con.append(irow)
loc_con = np.sort(loc_con)
for jEntry,jcol in enumerate(Tria):
indexEntry = k0[irow]+np.where(loc_con==jcol)[0]
VM[indexEntry] = VM[indexEntry]+lmass[iEntry,jEntry]
VS[indexEntry] = VS[indexEntry]+lstiff[iEntry,jEntry]
return(I,J,VM,VS)
def compute_global_sparse_matrices(mesh0):
I,J,VM,VS = compute_vectors_sparse_matrices(mesh0)
npt = mesh0['Pts'].shape[0]
MASS = coo_matrix((VM,(I,J)),shape=(npt,npt))
STIFF = coo_matrix((VS,(I,J)),shape=(npt,npt))
return(MASS,STIFF)
def compute_global_sparse_tensors(mesh0):
I,J,VM,VS = compute_vectors_sparse_matrices(mesh0)
npt = mesh0['Pts'].shape[0]
indices = np.hstack([I[:,np.newaxis], J[:,np.newaxis]])
MASS = tf.sparse.SparseTensor(indices=indices, values=VM.astype(np.float32), dense_shape=[npt, npt])
STIFF = tf.sparse.SparseTensor(indices=indices, values=VS.astype(np.float32), dense_shape=[npt, npt])
return(MASS,STIFF)
def compute_matrices_scipy(mesh0):
MASS,STIFF = compute_global_sparse_matrices(mesh0)
return(MASS,STIFF)
def compute_matrices_tensorflow(mesh0):
MASS,STIFF = compute_global_sparse_tensors(mesh0)
return(MASS,STIFF)
def conjgrad_scipy(A,b,x0,niter=100,toll=1.e-5):
x = np.copy(x0)
r = b - A * x
p = np.copy(r)
rsold = np.dot(r,r)
for it in range(niter):
Ap = A * p
alpha = rsold /np.dot(p,Ap)
x += alpha * p
r -= alpha * Ap
rsnew = np.dot(r,r)
if (np.sqrt(rsnew) < toll):
break
p = r + (rsnew / rsold) * p
rsold = rsnew
return([x,it,np.sqrt(rsnew)])
def conjgrad_tensorflow(A,b,x0,niter=100,toll=1.e-5):
x = x0
r = b - tf.sparse.sparse_dense_matmul(A,x)
p = r
rsold = tf.reduce_sum(tf.multiply(r, r))
for it in range(niter):
Ap = tf.sparse.sparse_dense_matmul(A,p)
alpha = rsold /tf.reduce_sum(tf.multiply(p, Ap))
x += alpha * p
r -= alpha * Ap
rsnew = tf.reduce_sum(tf.multiply(r, r))
if (tf.sqrt(rsnew) < toll):
break
p = r + (rsnew / rsold) * p
rsold = rsnew
return([x,it,tf.sqrt(rsnew)])
mesh = create_mesh(Lx=10,Ly=10,Nx=100,Ny=100)
x0 = tf.constant( (mesh['Pts'][:,0]<5 ).astype(np.float32) )
nit_time = 10
dcoef = 1.0
maxit = x0.shape[0]//2
stoll = 1.e-6
print('nb of nodes:\t{}'.format(mesh['Pts'].shape[0]))
print('nb of trias:\t{}'.format(mesh['Tria'].shape[0]))
t0 = time()
MASS0,STIFF0 = compute_matrices_scipy(mesh)
elapsed_scipy=time()-t0
print('Matrices; elapsed: {:3.5f} s'.format(elapsed_scipy))
A = MASS0+dcoef*STIFF0
x = np.copy(np.squeeze(x0.numpy()) )
t0 = time()
for jt in range(nit_time):
b = MASS0*x
x1,it,tol=conjgrad_scipy(A,b,x,niter=maxit,toll=stoll)
x=np.copy(x1)
print('time {}; iters {}; resid: {:3.2f}'.format(1+jt,it,tol) )
elapsed_scipy=time()-t0
print('elapsed, scipy: {:3.5f} s'.format(elapsed_scipy))
t0 = time()
MASS,STIFF =compute_matrices_tensorflow(mesh)
elapsed=time()-t0
print('Matrices; elapsed: {:3.5f} s'.format(elapsed))
x = None
x1 = None
A = tf.sparse.add(MASS,tf.sparse.map_values(tf.multiply, STIFF, dcoef))
x = tf.expand_dims(tf.identity(x0),axis=1)
t0 = time()
for jt in range(nit_time):
b = tf.sparse.sparse_dense_matmul(MASS,x)
x1,it,tol=conjgrad_tensorflow(A,b,x,niter=maxit,toll=stoll)
x = x1
print('time {}; iters {}; resid: {:3.2f}'.format(1+jt,it,tol) )
elapsed_tf=time()-t0
print('elapsed, tf: {:3.2f} s'.format(elapsed_tf))
print('elapsed times:')
print('scipy: {:3.2f} s\ttf: {:3.2f} s'.format(elapsed_scipy,elapsed_tf))
With the same for loop (for idm in range (0,n):), I would like to get the np.shape(R3_temp) = np.shape(WL_PS_storage) = (3,).
However, from the following codes, the result of R3_temp is different.
In this case, could anyone let me know what is missed for the code here, please?
Thank you in advance.
import numpy as np
import pandas as pd
w = 5
v = 3
m = np.array([2,3,2]) #parallel chambers
n = len(m)
steps = np.arange(1,n+1)
q = np.remainder(steps,2)
p_one = 30
c_one = 50
p_three = 40
c_three = 40
start_p_two = 20
start_c_two = 20
total_p_two = 21
total_c_two = 21
Total_combination = total_p_two*total_c_two
Z_comb = [np.arange(0,m[0]),np.arange(0,m[1]),np.arange(0,m[2])]
Z_comb
Z_comb = [array([0, 1]), array([0, 1, 2]), array([0, 1])]
storage_R1_R2_max = []
for p_two in range(start_p_two,total_p_two):
p = np.array([p_one, p_two, p_three])
for c_two in range(start_c_two,total_c_two):
c = np.array([c_one, c_two, c_three])
#calculate workload
WL_PS_storage = []
for idm in range (0,n):
WL_PS = (1/m[idm])*(p[idm] + c[idm] + 2*w)
WL_PS_storage.append(WL_PS) #in the loop of idm, len WL_PS_storage = 3
WL_PS_max = max(WL_PS_storage) #same column with for idm
WL_robot = 2*(n+1)*(v+w)
workload = max(WL_PS_max,WL_robot)
R2_storage = []
for idm in range (0,n):
#calculate R2
R2 = (1/m[idm])*(p[idm] + c[idm] + 2*w)
R2_storage.append(R2)
R2_max = max(R2_storage)
#calculate R1
R1 = (n+1)*(2*v + 2*w)
max_R1_R2 = max(R1, R2_max)
storage_R1_R2_max.append(max_R1_R2)
print('shape WL_PS_storage', np.shape(WL_PS_storage))
print('WL_PS_storage', WL_PS_storage)
print('R2 storage', R2_storage)
shape WL_PS_storage (3,)
WL_PS_storage [45.0, 16.666666666666664, 45.0]
R2 storage [45.0, 16.666666666666664, 45.0]
for p_two in range(start_p_two,total_p_two):
p = np.array([p_one, p_two, p_three])
for c_two in range(start_c_two,total_c_two):
c = np.array([c_one, c_two, c_three])
for z_one in Z_comb[0]:
for z_two in Z_comb[1]:
for z_three in Z_comb[2]:
R3_temp = []
for idm in range(0,n):
#calculate R3
if idm <= (n-1):
R3 = (q[idm]*c[idm]+ (1-q[idm])*p[idm] + (1+n)*w + n*v)* ((q[idm]/(m[idm]-z)) + ((1-q[idm])/(m[idm]-z)))
if idm == (n-1):
R3 = (q[idm]*p[idm] + (1-q[idm])*c[idm] + (1+n)*w + n*v)* (q[idm]/(m[idm]-z) + (1-q[idm])/(m[idm]-z))
R3_temp.append(R3)
print('np shape R3_temp', np.shape(R3_temp))
print('R3_temp', R3_temp)
np shape R3_temp (1, 2)
R3_temp [array([34.5, 69. ])]
Dear Python programmers,
I am currently working with curve_fit from scipy inorder to find out what correlation the x and y data have with echouter. However, the curve fit becomes really weird even when I fit a simple lineair formule towards it. I've tried changing the array to a numpy array at the: def func(x, a, b, c): "Fit functie" return a * np.asarray(x) + b part but it still gives me a graph that looks like a 3 year old who scratched with some red pencil.
One thing I do remember is sorting the values of massflows and rms_smote from low to high. Which you can view above the def func(x, a, b, c) bit. Since the curve_fit was giving me a fit. Yet also kinda scratched out as if you're sketching when the values ware unsorted. I don't know if curve_fit considers data differently if it's sorted or not.
If you need any more information, let me know :) Any suggestion is welcome!
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from scipy.stats import linregress
from scipy.optimize import curve_fit
data_15 = pd.read_csv(r"C:\Users\Thomas\Documents\Pythondata\2022-01-15_SMOTERapport.csv", header= 0, sep=';', decimal=',')
data_06 = pd.read_csv(r"C:\Users\Thomas\Documents\Pythondata\2022-02-06_SMOTERapport.csv", header= 0, sep=';', decimal=',')
data_10 = pd.read_csv(r"C:\Users\Thomas\Documents\Pythondata\2022-02-10_SMOTERapport.csv", header= 0, sep=';', decimal=',')
speed_15 = data_15['SPEED_ACT']
speed_06 = data_06['SPEED_ACT']
speed_10 = data_10['SPEED_ACT']
"Data filter 01_15"
filter = [i for i, e in enumerate(speed_15) if e >= 80]
s_15 = pd.DataFrame(data_15)
speed15 = s_15.filter(items = filter, axis=0)
speed15.reset_index(drop=True, inplace=True)
temp15 = speed15['TP_SMOTE']
foutmetingen2 = [i for i, e in enumerate(temp15) if e < 180]
speed15 = speed15.drop(foutmetingen2)
tp_strip15 = speed15['TP_AMBIENT']
tp_target15 = speed15['TP_TARGET']
tp_smote15 = speed15['TP_SMOTE']
v_15 = speed15['SPEED_ACT']
width15 = speed15['STRIP_WIDTH']
thickness15 = speed15['STRIP_THICKNESS']
power15 = speed15['POWER_INVERTER_PRE']
voltage15 = speed15['VOLTAGE_INVERTER_PRE']
"Data filter 02_06"
filter = [i for i, e in enumerate(speed_06) if e >= 80]
s_06 = pd.DataFrame(data_06)
speed06 = s_06.filter(items = filter, axis=0)
speed06.reset_index(drop=True, inplace=True)
temp06 = speed06['TP_SMOTE']
foutmetingen2 = [i for i, e in enumerate(temp06) if e < 180]
speed06 = speed06.drop(foutmetingen2)
tp_strip06 = speed06['TP_AMBIENT']
tp_target06 = speed06['TP_TARGET']
tp_smote06 = speed06['TP_SMOTE']
v_06 = speed06['SPEED_ACT']
width06 = speed06['STRIP_WIDTH']
thickness06 = speed06['STRIP_THICKNESS']
power06 = speed06['POWER_INVERTER_PRE']
voltage06 = speed06['VOLTAGE_INVERTER_PRE']
"Data filter 02_10"
filter = [i for i, e in enumerate(speed_10) if e >= 80]
s_10 = pd.DataFrame(data_10)
speed10 = s_10.filter(items = filter, axis=0)
speed10.reset_index(drop=True, inplace=True)
temp_01 = speed10['TP_SMOTE']
foutmetingen2 = [i for i, e in enumerate(temp_01) if e < 180]
speed10 = speed10.drop(foutmetingen2)
tp_strip10 = speed10['TP_AMBIENT']
tp_target10 = speed10['TP_TARGET']
tp_smote10 = speed10['TP_SMOTE']
v_10 = speed10['SPEED_ACT']
width10 = speed10['STRIP_WIDTH']
thickness10 = speed10['STRIP_THICKNESS']
power10 = speed10['POWER_INVERTER_PRE']
voltage10 = speed10['VOLTAGE_INVERTER_PRE']
"Constanten"
widthmax = 1253
Kra = 0.002033636
Kosc = 0.073086272
Pnominal = 2200
meting_15 = np.arange(0, len(speed15), 1)
meting_06 = np.arange(0, len(speed06), 1)
meting_10 = np.arange(0, len(speed10), 1)
cp = 480
rho = 7850
"---------------------------------------------------------------------"
def temp(power, speed, width, thickness, tp_strip, tp_target, tp_smote,
voltage):
"Berekende temperatuur vergelijken met target temperatuur"
massflow = (speed/60)*width*10**-3*thickness*10**-3*rho
LossesRA = Kra*Pnominal*(width/widthmax)
LossesOSC = Kosc*Pnominal*(voltage/100)**2
Plosses = (LossesRA + LossesOSC)
power_nl = (power/100)*Pnominal - Plosses
temp_c = ((power_nl*1000)/(massflow*cp)) + tp_strip
verschil_t = (temp_c/tp_target)*100-100
verschil_smote = (temp_c/tp_smote)*100-100
return temp_c, verschil_t, verschil_smote, massflow
temp_15 = temp(power15, v_15, width15, thickness15, tp_strip15, tp_target15,
tp_smote15, voltage15)
temp_06 = temp(power06, v_06, width06, thickness06, tp_strip06, tp_target06,
tp_smote06, voltage06)
temp_10 = temp(power10, v_10, width10, thickness10, tp_strip10, tp_target10,
tp_smote10, voltage10)
"---------------------------------------------------------------------"
def rms(Temperatuurberekend, TemperatuurGemeten):
"De Root Mean Square berekenen tussen berekend en gemeten data"
rootmeansquare = (TemperatuurGemeten - Temperatuurberekend)
rootmeansquare_totaal = np.sum(rootmeansquare)
rootmeansquare_gem = rootmeansquare_totaal/len(rootmeansquare)
return rootmeansquare, rootmeansquare_totaal, rootmeansquare_gem
rms_tp_smote15 = (rms(temp_15[0], tp_smote15))
rms_tp_smote06 = (rms(temp_06[0], tp_smote06))
rms_tp_smote10 = (rms(temp_10[0], tp_smote10))
"----------------------------------------------------------------------"
massflows = [np.sum(temp_06[3])/len(temp_06[3]), np.sum(temp_15[3])/
len(temp_15[3]), np.sum(temp_10[3])/len(temp_10[3])]
rms_smote = [rms_tp_smote06[2], rms_tp_smote10[2], rms_tp_smote15[2]]
rms_tp_smote_pre = np.append(rms_tp_smote15[0].tolist(),
rms_tp_smote06[0].tolist())
rms_tp_smote = np.append(rms_tp_smote_pre, rms_tp_smote10[0].tolist())
massflow_pre = np.append(temp_15[3].tolist(), temp_06[3].tolist())
massflow = np.append(massflow_pre, temp_10[3].tolist())
massflow_sort = np.sort(massflow)
rms_tp_smote_sort = [x for _, x in sorted(zip(massflow, rms_tp_smote))]
a,b,r,p, s_a= linregress (massflows,rms_smote)
print('RC: ' ,a ,'\n','std: ', s_a , '\n', 'Offset: ', b)
def func(x, a, b, c):
"Fit functie"
return a * np.asarray(x) + b
popt, pcov = curve_fit(func, massflow_sort, rms_tp_smote_sort)
popt
functie = func(massflow_sort, *popt)
sns.set_theme(style='whitegrid')
fig, axs = plt.subplots(2, figsize=(10, 10))
axs[0].plot(massflows, rms_smote, label='Temp afwijking als f(massflow)')
axs[0].plot ([massflows[0] ,massflows[len (massflows) -1]] ,
[a*massflows [0]+b,a*massflows[len (massflows) -1]+b] ,
label ='trendlijn')
axs[0].set(xlabel='Mass flow ($kg/s$)',
ylabel='Temperatuur afwijking gem ($\u00b0C$)', title='Met Verliezen')
axs[0].legend(loc='upper right')
axs[1].plot(massflow_sort, rms_tp_smote_sort, 'o', label='Temp/Massflow 01-15')
#axs[1].plot(temp_06[3], rms_tp_smote06[0], 'o', label='Temp/Massflow 02-06')
#axs[1].plot(temp_10[3], rms_tp_smote10[0], 'o', label='Temp/Massflow 02-10')
axs[1].plot(massflow, func(massflow_sort, *popt), 'r-',
label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
axs[1].set(xlabel='Mass flow ($kg/s$)',
ylabel='Temperatuur afwijking gem ($\u00b0C$)')
axs[1].legend(loc='upper right')
print("Gemiddelde verschil temperatuur smote: ", rms_tp_smote15[1])
print("Gemiddelde uitwijking temperatuur smote: ", rms_tp_smote15[2])
I have a normally distributed variable x (like product demand), an index id_1 (like product number) and a second index id_2 (like product group). My goal is to estimate the mean and the standard deviations for x hierarchically (all > product group > product).
That's my data:
import numpy as np
import pymc3 as pm
import arviz as az
# data
my_step_1 = 0.4
my_step_2 = 4.1
sd_step_1 = 0.1
sd_step_2 = 0.2
my = 10
sd = .1
grp_n = 8
grps_1 = 5
grps_2 = 4
x = np.round(np.concatenate([np.random.normal(my + i * my_step_1 + j * my_step_2, \
sd + i * sd_step_1 + j * sd_step_2, grp_n) \
for i in range(grps_1) for j in range(grps_2)]), 1) # demand
id_1 = np.repeat(np.arange(grps_1 * grps_2), grp_n) # group, product number
id_2 = np.tile(np.repeat(np.arange(grps_2), grp_n), grps_1) # super-group, product group
shape_1 = len(np.unique(id_1))
shape_2 = len(np.unique(id_2))
I've managed a single hierarchy:
with pm.Model() as model_h1:
#
mu_mu_hyper = pm.Normal('mu_mu_hyper', mu = 0, sd = 10)
mu_sd_hyper = pm.HalfNormal('mu_sd_hyper', 10)
sd_hyper = pm.HalfNormal('sd_hyper', 10)
#
mu = pm.Normal('mu', mu = mu_mu_hyper, sd = mu_sd_hyper, shape = shape_1)
sd = pm.HalfNormal('sd', sd = sd_hyper, shape = shape_1)
y = pm.Normal('y', mu = mu[id_1], sd = sd[id_1], observed = x)
trace_h1 = pm.sample(1000)
#az.plot_forest(trace_h1, var_names=['mu', 'sd'], combined = True)
But how can I code 2 hierarchies?
# 2 hierarchies .. doesn't work
with pm.Model() as model_h2:
#
mu_mu_hyper2 = pm.Normal('mu_mu_hyper2', mu = 0, sd = 10)
mu_sd_hyper2 = pm.HalfNormal('mu_sd_hyper2', sd = 10)
sd_mu_sd_hyper2 = pm.HalfNormal('sd_mu_sd_hyper2', sd = 10)
sd_hyper2 = pm.HalfNormal('sd_hyper2', sd = 10)
#
mu_mu_hyper1 = pm.Normal('mu_hyper1', mu = mu_mu_hyper2, sd = mu_sd_hyper2, shape = shape_2)
mu_sd_hyper1 = pm.HalfNormal('mu_sd_hyper1', sd = sd_mu_sd_hyper2, shape = shape_2)
sd_hyper1 = pm.HalfNormal('sd_hyper1', sd = sd_hyper2, shape = shape_2)
#sd_hyper1 = pm.HalfNormal('sd_hyper1', sd = sd_hyper2[id_2], shape = shape_2)??
#
mu = pm.Normal('mu', mu = mu_mu_hyper1, sd = mu_sd_hyper1, shape = shape_1)
sd = pm.HalfNormal('sd', sd = sd_hyper1, shape = shape_1)
y = pm.Normal('y', mu = mu[id_1], sd = sd[id_1], observed = x)
trace_h2 = pm.sample(1000)
You could try looping through product groups and use the mean, std for a group as constraints for the products belonging to this particular group.
# sample product group to product mapping
group_product_mapping = {0: [1, 2, 3], 1: [4, 5, 6]}
total_groups = len(group_product_mapping.keys())
with pm.model() as model_h:
mu_all = pm.Normal('mu_all', 0, 10)
sd_all = pm.HalfNormal('sd_all', 10)
sd_mu_group = pm.HalfNormal('sd_mu_group', 10)
# group parameters constrained to mu, sd from all
mu_group = pm.Normal('mu_group', mu_all, sd_all, shape=total_groups)
sd_group = pm.HalfNormal('sd_group', sd_mu_group, shape=total_groups)
mu_products = dict()
# iterate through groups and constrain product parameters to the product group they belong to
for idx, group in enumerate(group_product_mapping.keys()):
mu_products[group] = pm.Normal(f'mu_products_{group}', mu_group[idx], sd_group[idx], shape=len(group_product_mapping[group]))
sd_produtcs[group] = pm.HalfNormal(f'sd_mu_products_{group}', 10)
I have problem in remaping the image, I used my own millimeter sheet which contains coplanar set of points, I applied the method of direct radial alignment still the image is warped not rectified, can someone help me to find where my error is. Xw, Yw, Zw are the real world coordinates, Xf, Yf are the corresponding pixel coordinates. Cx, Cy are the coordinates of the center of distortion.
import cv2
import numpy as np
from scipy.optimize import minimize
Xd = dx*(Xf-Cx)/Sx
Yd = dy*(Yf-Cy)
n1=6
A=np.zeros((N, n1))
for i in range(N):
for j in range(n1):
A[:, 0] = Yd*Xw
A[:, 1] = Yd*Yw
A[:, 2] = Yd
A[:, 3] = -Xd*Xw
A[:, 4] = -Xd*Yw
A[:, 5] = -Xd
X = solution(A)
Sr = r1_prime**2 + r2_prime**2 + r4_prime**2 + r5_prime**2
Ty = np.sqrt(Sr-np.sqrt(Sr**2-4*(r1_prime*r5_prime-r2_prime*r4_prime)**2))/(2*(r1_prime*r5_prime-r2_prime*r4_prime)**2)
#compute the rotation matrix components:
r1 = (X[0]/X[5])*Ty
r2 = (X[1]/X[5])*Ty
r4 = (X[3]/X[5])*Ty
r5 = (X[5]/X[5])*Ty
Tx = (X[2]/X[5])*Ty
s = -np.sign(r1*r4+r2*r5)
r3 = np.sqrt(1-r1**2-r2**2)
r6 = s*np.sqrt(1-r4**2-r5**2)
r7 = np.sqrt(1-(r1**2+r4**2))
r8 = np.sqrt(1-(r2**2+r5**2))
r9 = np.sqrt(-1+Sr*Ty**2)
n11 = 2
A1=np.zeros((N, n11))
for i in range(N):
for j in range(n11):
A1[:, 0] = r4*Xw + r5*Yw +Ty
A1[:, 1] = -Yd
b1 = (r7*Xw + r8*Yw)*Yd
U1, S1, VT1 = np.linalg.svd(A1)
Sigma = np.zeros((A1.shape[0], A1.shape[1]))
Sigma[:A1.shape[1], :A1.shape[1]] = np.diag(S1)
J1 = np.zeros((A1.shape[0], A1.shape[1]))
J1[:A1.shape[1], :A1.shape[1]] = np.linalg.inv(np.diag(S1))
H1 = np.zeros((A1.shape[0], A1.shape[1]))
H1[:A1.shape[0], :A1.shape[0]] = np.linalg.multi_dot([U1, J1, VT1])
H1 = H1.T
x1 = np.dot(H1, b1)
f = x1[0]
Tz = x1[1]
R = np.array([[r1, r2, r3], [r4, r5, r6], [r7, r8, r9]])
def func(guess):
k1 = guess[0]
k2 = guess[1]
f = guess[2]
tz = guess[3]
sx = guess[4]
r = np.sqrt((dx*(Xf-Cx)/sx)**2 + (Yd)**2)
return np.sum((Yd*(1+k1*r**2 + k2*r**4)*(r7*Xw + r8*Yw + r9*Zw + tz)-f*(r4*Xw + r5*Yw + r6*Zw + Ty))**2)
x0 = np.array([0, 0, f, Tz, 1])
i = minimize(func, x0, method='COBYLA', options={'disp': True})
K1 = i.x[0]
K2 = i.x[1]
F = i.x[2]
Pz = i.x[3]
Sx = i.x[4]
dx_new = dx*Sx
nx, ny = img.shape[1], img.shape[0]
X, Y = np.meshgrid(np.arange(0, nx, 1), np.arange(0, ny, 1))
x = X.astype(np.float32)
y = Y.astype(np.float32)
rd = np.sqrt((y-Cy)**2 + (x-Cx)**2)
map_x = y*(1+K11*np.power(rd, 2)+K22*np.power(rd, 4))
map_y = x*(1+K11*np.power(rd, 2)+K22*np.power(rd, 4))
imgg = cv2.remap(img, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
enter image description here