What is wrong in my code using DHT to compute convolution? - python

The Discrete Hartley Transform can be computed as
import numpy as np
def dht(x:np.array):
X = np.fft.fft(x)
X = np.real(X) - np.imag(X)
return X
def idht(X:np.array):
n = len(X)
X = dht(X)
x = 1/n * X
return x
Circular convolution can be computed using the DHT according to a theorem illustrated here, here(eq.4.7.22-4.7.25) and in many other places. I implemented the corresponding algorithm and compared it with circular convolution compute using the FFT, but the results are not even close. Why?
def conv(x:np.array, y:np.array):
X = dht(x)
Y = dht(y)
Xflip = np.flip(X)
Yflip = np.flip(Y)
Yplus = Y + Yflip
Yminus = Y - Yflip
Z = 0.5 * (X * Yplus + Xflip * Yminus)
z = idht(Z)
return z
def test_conv():
x = np.ones((5, ))
y = np.copy(x)
z = conv(x, y)
z1 = np.real(np.fft.ifft(np.fft.fft(x)*np.fft.fft(y)))
np.testing.assert_allclose(z, z1, err_msg="test_convolution() failed")
if (__name__=='__main__'):
test_conv()
print("test_conv passed")
Output:
Traceback (most recent call last):
File "ronf.py", line 35, in <module>
test_conv()
File "ronf.py", line 31, in test_conv
np.testing.assert_allclose(z, z1, err_msg="test_convolution() failed")
File "/home/andrea/vscode_venv/lib/python3.8/site-packages/numpy/testing/_private/utils.py", line 1528, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/home/andrea/vscode_venv/lib/python3.8/site-packages/numpy/testing/_private/utils.py", line 842, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=1e-07, atol=0
test_convolution() failed
Mismatched elements: 5 / 5 (100%)
Max absolute difference: 5.65018378
Max relative difference: 1.13003676
x: array([ 0. , 4.105099, 5.992006, 3.053079, -0.650184])
y: array([5., 5., 5., 5., 5.])

The issue is about the flipping part. To show with an example:
For a given array X_k, we need X_{-k} i.e., X_{N-k}.
N := 5
X_k := [0, 1, 2, 3, 4]
np.flip does this:
[4, 3, 2, 1, 0]
However, this is not X_{N-k}! We need:
X_{N-k} = [0, 4, 3, 2, 1]
since:
for k = 0 => X[5-0] = X[0] = 0
for k = 1 => X[5-1] = X[4] = 4
... ...
for k = 4 => X[5-4] = X[1] = 1
That is, we assume periodicity of N s.t. X[0] = X[N] and so on, but np.flip's result doesn't obey that. In short, first element should stay as is, others should flip.
One way to fix is to np.roll after your flips to rotate it 1 position:
def conv(x:np.array, y:np.array):
X = dht(x)
Y = dht(y)
Xflip = np.roll(np.flip(X), shift=1) # change is here
Yflip = np.roll(np.flip(Y), shift=1) # and here only
Yplus = Y + Yflip
Yminus = Y - Yflip
Z = 0.5 * (X * Yplus + Xflip * Yminus)
z = idht(Z)
return z
Then I get:
>>> a = np.ones((5,))
>>> conv(a, a)
array([5., 5., 5., 5., 5.])

Related

Is it possible to use Python Mixed Integer Linear programming to get all solutions in an interval?

I have a linear problem to solve looking for integer numbers. I found a way to solve it using the new milp implementation in spicy. Hereafter is a demonstration code.
The problem is the following. From a vector of weights w I am looking for the integer vector x such as the dot product of x and weights is in a given range. It looks like something like this
# minimize
abs(w^T # x - target)
And I translated this in the following to implement in milp:
# maximize
w^T # x
# constraints
target - error <= w^T # x <= target + error
In my specific context, several solutions may exist for x. Is there a way to get all the solutions in the given interval instead of maximizing (or minimizing) something ?
Here is the milp implementation.
import numpy as np
from scipy.optimize import milp, LinearConstraint, Bounds
# inputs
ratio_l, ratio_u = 0.2, 3.0
max_bounds = [100, 200, 2, 20, 2]
target = 380.2772 # 338.34175
lambda_parameter = 2
error = lambda_parameter * 1e-6 * target
# coefficients of the linear objective function
w = np.array([12.0, 1.007825, 14.003074, 15.994915, 22.989769], dtype=np.float64)
# the aim is to minimize
# w^T x - target_mass
# instead I maximize
# w^T x
# in the constraint domain
# target - error <= w^T x <= target + error
# constraints on variables 0 and 1:
# ratio_l <= x[1] / x[0] <= ratio_u
# translation =>
# (ratio_l - ratio_u) * x[1] <= -ratio_u * x[0] + x[1] <= 0
# use max (x[1]) to have a constant
# linear objective function
c = w
# integrality of the decision variables
# 3 is semi-integer = within bounds or 0
integrality = 3 * np.ones_like(w)
# Matrice A that define the constraints
A = np.array([
# boundaries of the mass defined from lambda_parameters
w,
# c[1] / c[0] max value
[-ratio_u, 1.0, 0., 0., 0.],
])
# b_up and b_low vectors
# b_low <= A # x <= b_up
n_max_C = max_bounds[0]
b_up = [
target + error, # mass target
0., # c[1] / c[0] constraints up
]
b_low = [
target - error, # mass target
(ratio_l - ratio_u) * max_bounds[0], # H_C constraints up
]
# set up linear constraints
constraints = LinearConstraint(A, b_low, b_up)
bounds = Bounds(
lb=[0, 0, 0, 0, 0],
ub=max_bounds,
)
results = milp(
c=c,
constraints=constraints,
integrality=integrality,
bounds=bounds,
options=dict(),
)
print(results)
The results is this
fun: 380.277405
message: 'Optimization terminated successfully. (HiGHS Status 7: Optimal)'
mip_dual_bound: 380.27643944560145
mip_gap: 2.5390790665913637e-06
mip_node_count: 55
status: 0
success: True
x: array([19., 40., 0., 7., 0.])
But it exists other possible x arrays but with an highest error. This one is the
m = np.dot(w, [19., 40., 0., 7., 0.])
print(f"{'target':>10s} {'calc m':>27s} {'deviation':>27s} {'error':>12s} match?")
print(f"{target:10.6f} {target - error:14.6f} <= {m:10.6f} <= {target + error:10.6f}"
f" {m - target:12.6f} {error:12.6f} -> {target - error <= m <= target + error}")
target calc m deviation error match?
380.277200 380.276439 <= 380.277405 <= 380.277961 0.000205 0.000761 -> True
These two other examples work also and I wonder how I can got them without implementing a grid algorithm (like brute in scipy).
m = np.dot(w, [20., 39., 1., 4., 1.])
print(f"{'target':>10s} {'calc m':>27s} {'deviation':>27s} {'error':>12s} match?")
print(f"{target:10.6f} {target - error:14.6f} <= {m:10.6f} <= {target + error:10.6f}"
f" {m - target:12.6f} {error:12.6f} -> {target - error <= m <= target + error}")
target calc m deviation error match?
380.277200 380.276439 <= 380.277678 <= 380.277961 0.000478 0.000761 -> True
m = np.dot(w, [21., 38., 2., 1., 2.])
print(f"{'target':>10s} {'calc m':>27s} {'deviation':>27s} {'error':>12s} match?")
print(f"{target:10.6f} {target - error:14.6f} <= {m:10.6f} <= {target + error:10.6f}"
f" {m - target:12.6f} {error:12.6f} -> {target - error <= m <= target + error}")
target calc m deviation error match?
380.277200 380.276439 <= 380.277951 <= 380.277961 0.000751 0.000761 -> True
Linear programming is for optimization, which is generally to pick the single best solution in a solution space. Your problem cannot pick a single solution. Since you have well-bounded integral variables, "brute force" (though not iterative brute force) is quite practical. This looks like:
Within the known bounds of x, attempt all values for all dimensions excluding the largest one (x1, which can range between 0 and 200)
Calculate bounds arrays based on your "ratio" constraint
Calculate bounds arrays based on the "error from target" constraint
Combine the two to find overall bounds
Filter on integral solutions that are within the bounds
import numpy as np
ratio_l, ratio_u = 0.2, 3.0
max_bounds = (100, 200, 2, 20, 2)
target = 380.2772
lambda_parameter = 2
error = 1e-6 * lambda_parameter * target
w = np.array((12.0, 1.007825, 14.003074, 15.994915, 22.989769))
i0234 = [0, 2, 3, 4]
w0234 = w[i0234]
x0234 = np.stack(np.meshgrid(
*(np.arange(1+max_bounds[m]) for m in i0234)
)).reshape((4, -1))
x0, x2, x3, x4 = x0234
x1_ratio_lower, x1_ratio_upper = np.multiply.outer((ratio_l, ratio_u), x0)
x1_target_lower, x1_target_upper = (target - np.add.outer((error, -error), w0234#x0234))/w[1]
x1_lower = np.ceil(np.max((x1_ratio_lower, x1_target_lower), axis=0)).astype(int)
x1_upper = np.floor(np.min((x1_ratio_upper, x1_target_upper), axis=0)).astype(int)
ok, = (x1_upper >= x1_lower).nonzero()
for i in ok:
xi = x0234[:, i]
for x1 in range(x1_lower[i], x1_upper[i]+1):
x = [xi[0], x1, *xi[1:]]
target_approx = w.dot(x)
error_approx = target_approx - target
print(f'x={x} w#x={target_approx:.6f} ~ {target}, '
f'error={error_approx:.2e}<{error:.2e}')
x=[19, 40, 0, 7, 0] w#x=380.277405 ~ 380.2772, error=2.05e-04<7.61e-04
x=[20, 39, 1, 4, 1] w#x=380.277678 ~ 380.2772, error=4.78e-04<7.61e-04
x=[21, 38, 2, 1, 2] w#x=380.277951 ~ 380.2772, error=7.51e-04<7.61e-04
A simple boundary contraction reduces the search space:
import numpy as np
ratio_l, ratio_u = 0.2, 3.0
max_bounds = np.array((100, 200, 2, 20, 2))
target = 380.2772
lambda_parameter = 2
error = 1e-6 * lambda_parameter * target
w = np.array((12.0, 1.007825, 14.003074, 15.994915, 22.989769))
x0u = (target + error) / (w[0] + w[1] * ratio_l)
assert np.isclose(target, w.dot((x0u, x0u * ratio_l, 0, 0, 0)) - error)
max_bounds[0] = min(max_bounds[0], np.floor(x0u))
i0234 = [0, 2, 3, 4]
w0234 = w[i0234]
x0234 = np.stack(np.meshgrid(
*(np.arange(1+max_bounds[m]) for m in i0234)
)).reshape((4, -1))
x0, x2, x3, x4 = x0234
x1_ratio_lower, x1_ratio_upper = np.multiply.outer((ratio_l, ratio_u), x0)
x1_target_lower, x1_target_upper = (target - np.add.outer((error, -error), w0234#x0234))/w[1]
x1_lower = np.ceil(np.max((x1_ratio_lower, x1_target_lower), axis=0)).astype(int)
x1_upper = np.floor(np.min((x1_ratio_upper, x1_target_upper), axis=0)).astype(int)
ok, = (x1_upper >= x1_lower).nonzero()
for i in ok:
xi = x0234[:, i]
for x1 in range(x1_lower[i], x1_upper[i]+1):
x = [xi[0], x1, *xi[1:]]
target_approx = w.dot(x)
error_approx = target_approx - target
print(f'x={x} w#x={target_approx:.6f} ~ {target}, '
f'error={error_approx:.2e}<{error:.2e}')

How to fit a rotated and translated hyperbola to a set of x,y points in Python

I want to fit a set of data points in the xy plane to the general case of a rotated and translated hyperbola to back out the coefficients of the general equation of a conic.
I've tried the methodology proposed in here but so far I cannot make it work.
When fitting to a set of points known to be a hyperbola I get quite different outputs.
What I'm doing wrong in the code below?
Or is there any other way to solve this problem?
import numpy as np
from sympy import plot_implicit, Eq
from sympy.abc import x, y
def fit_hyperbola(x, y):
D1 = np.vstack([x**2, x*y, y**2]).T
D2 = np.vstack([x, y, np.ones(len(x))]).T
S1 = D1.T # D1
S2 = D1.T # D2
S3 = D2.T # D2
# define the constraint matrix and its inverse
C = np.array(((0, 0, -2), (0, 1, 0), (-2, 0, 0)), dtype=float)
Ci = np.linalg.inv(C)
# Setup and solve the generalized eigenvector problem
T = np.linalg.inv(S3) # S2.T
S = Ci#(S1 - S2#T)
eigval, eigvec = np.linalg.eig(S)
# evaluate and sort resulting constraint values
cond = eigvec[1]**2 - 4*eigvec[0]*eigvec[2]
# [condVals index] = sort(cond)
idx = np.argsort(cond)
condVals = cond[idx]
possibleHs = condVals[1:] + condVals[0]
minDiffAt = np.argmin(abs(possibleHs))
# minDiffVal = possibleHs[minDiffAt]
alpha1 = eigvec[:, idx[minDiffAt + 1]]
alpha2 = T#alpha1
return np.concatenate((alpha1, alpha2)).ravel()
if __name__ == '__main__':
# known hyperbola coefficients
coeffs = [1., 6., -2., 3., 0., 0.]
# hyperbola points
x_ = [1.56011303e+00, 1.38439984e+00, 1.22595618e+00, 1.08313085e+00,
9.54435408e-01, 8.38528681e-01, 7.34202759e-01, 6.40370424e-01,
5.56053814e-01, 4.80374235e-01, 4.12543002e-01, 3.51853222e-01,
2.97672424e-01, 2.49435970e-01, 2.06641170e-01, 1.68842044e-01,
1.35644673e-01, 1.06703097e-01, 8.17157025e-02, 6.04220884e-02,
4.26003457e-02, 2.80647476e-02, 1.66638132e-02, 8.27872926e-03,
2.82211172e-03, 2.37095181e-04, 4.96740239e-04, 3.60375275e-03,
9.59051203e-03, 1.85194083e-02, 3.04834928e-02, 4.56074477e-02,
6.40488853e-02, 8.59999904e-02, 1.11689524e-01, 1.41385205e-01,
1.75396504e-01, 2.14077865e-01, 2.57832401e-01, 3.07116093e-01,
3.62442545e-01, 4.24388335e-01, 4.93599021e-01, 5.70795874e-01,
6.56783391e-01, 7.52457678e-01, 8.58815793e-01, 9.76966133e-01,
1.10813998e+00, 1.25370436e+00]
y_ = [-0.66541515, -0.6339625 , -0.60485332, -0.57778425, -0.5524732 ,
-0.52865638, -0.50608561, -0.48452564, -0.46375182, -0.44354763,
-0.42370253, -0.4040097 , -0.38426392, -0.3642594 , -0.34378769,
-0.32263542, -0.30058217, -0.27739811, -0.25284163, -0.22665682,
-0.19857079, -0.16829086, -0.13550147, -0.0998609 , -0.06099773,
-0.01850695, 0.02805425, 0.07917109, 0.13537629, 0.19725559,
0.26545384, 0.34068177, 0.42372336, 0.51544401, 0.61679957,
0.72884632, 0.85275192, 0.98980766, 1.14144182, 1.30923466,
1.49493479, 1.70047747, 1.92800474, 2.17988774, 2.45875143,
2.76750196, 3.10935692, 3.48787892, 3.90701266, 4.3711261 ]
plot_implicit (Eq(coeffs[0]*x**2 + coeffs[1]*x*y + coeffs[2]*y**2 + coeffs[3]*x + coeffs[4]*y, -coeffs[5]))
coeffs_fit = fit_hyperbola(x_, y_)
plot_implicit (Eq(coeffs_fit[0]*x**2 + coeffs_fit[1]*x*y + coeffs_fit[2]*y**2 + coeffs_fit[3]*x + coeffs_fit[4]*y, -coeffs_fit[5]))
The general equation of hyperbola is defined with 5 independent coefficients (not 6). If the model equation includes dependant coefficients (which is the case with 6 coefficients) trouble might occur in the numerical regression calculus.
That is why the equation A * x * x + B * x * y + C * y * y + D * x + F * y = 1 is considered in the calculus below. The fitting is very good.
Then one can goback to the standard equation a * x * x + 2 * b * x * y + c * y * y + 2 * d * x + 2 * f * y + g = 0 in setting a value for g (for example g=-1).
The formulas to find the coordinates of the center, the equations of asymptotes, the equations of axis, are given in addition.
https://mathworld.wolfram.com/ConicSection.html
https://en.wikipedia.org/wiki/Conic_section
https://en.wikipedia.org/wiki/Hyperbola

autograd differentiation example in PyTorch - should be 9/8?

In the example for the Torch tutorial for Python, they use the following graph:
x = [[1, 1], [1, 1]]
y = x + 2
z = 3y^2
o = mean( z ) # 1/4 * x.sum()
Thus, the forward pass gets us this:
x_i = 1, y_i = 3, z_i = 27, o = 27
In code this looks like:
import torch
# define graph
x = torch.ones(2, 2, requires_grad=True)
y = x + 2
z = y * y * 3
out = z.mean()
# if we don't do this, torch will only retain gradients for leaf nodes, ie: x
y.retain_grad()
z.retain_grad()
# does a forward pass
print(z, out)
however, I get confused at the gradients computed:
# now let's run our backward prop & get gradients
out.backward()
print(f'do/dz = {z.grad[0,0]}')
which outputs:
do/dx = 4.5
By chain rule, do/dx = do/dz * dz/dy * dy/dx, where:
dy/dx = 1
dz/dy = 9/2 given x_i=1
do/dz = 1/4 given x_i=1
which means:
do/dx = 1/4 * 9/2 * 1 = 9/8
However this doesn't match the gradients returned by Torch (9/2 = 4.5). Perhaps I have a math error (something with the do/dz = 1/4 term?), or I don't understand autograd in Torch.
Any pointers?
do/dz = 1 / 4
dz/dy = 6y = 6 * 3 = 18
dy/dx = 1
therefore, do/dx = 9/2

Transfrom matrix from scipy.spatial.procrustes [duplicate]

Is there something like Matlab's procrustes function in NumPy/SciPy or related libraries?
For reference. Procrustes analysis aims to align 2 sets of points (in other words, 2 shapes) to minimize square distance between them by removing scale, translation and rotation warp components.
Example in Matlab:
X = [0 1; 2 3; 4 5; 6 7; 8 9]; % first shape
R = [1 2; 2 1]; % rotation matrix
t = [3 5]; % translation vector
Y = X * R + repmat(t, 5, 1); % warped shape, no scale and no distortion
[d Z] = procrustes(X, Y); % Z is Y aligned back to X
Z
Z =
0.0000 1.0000
2.0000 3.0000
4.0000 5.0000
6.0000 7.0000
8.0000 9.0000
Same task in NumPy:
X = arange(10).reshape((5, 2))
R = array([[1, 2], [2, 1]])
t = array([3, 5])
Y = dot(X, R) + t
Z = ???
Note: I'm only interested in aligned shape, since square error (variable d in Matlab code) is easily computed from 2 shapes.
I'm not aware of any pre-existing implementation in Python, but it's easy to take a look at the MATLAB code using edit procrustes.m and port it to Numpy:
def procrustes(X, Y, scaling=True, reflection='best'):
"""
A port of MATLAB's `procrustes` function to Numpy.
Procrustes analysis determines a linear transformation (translation,
reflection, orthogonal rotation and scaling) of the points in Y to best
conform them to the points in matrix X, using the sum of squared errors
as the goodness of fit criterion.
d, Z, [tform] = procrustes(X, Y)
Inputs:
------------
X, Y
matrices of target and input coordinates. they must have equal
numbers of points (rows), but Y may have fewer dimensions
(columns) than X.
scaling
if False, the scaling component of the transformation is forced
to 1
reflection
if 'best' (default), the transformation solution may or may not
include a reflection component, depending on which fits the data
best. setting reflection to True or False forces a solution with
reflection or no reflection respectively.
Outputs
------------
d
the residual sum of squared errors, normalized according to a
measure of the scale of X, ((X - X.mean(0))**2).sum()
Z
the matrix of transformed Y-values
tform
a dict specifying the rotation, translation and scaling that
maps X --> Y
"""
n,m = X.shape
ny,my = Y.shape
muX = X.mean(0)
muY = Y.mean(0)
X0 = X - muX
Y0 = Y - muY
ssX = (X0**2.).sum()
ssY = (Y0**2.).sum()
# centred Frobenius norm
normX = np.sqrt(ssX)
normY = np.sqrt(ssY)
# scale to equal (unit) norm
X0 /= normX
Y0 /= normY
if my < m:
Y0 = np.concatenate((Y0, np.zeros(n, m-my)),0)
# optimum rotation matrix of Y
A = np.dot(X0.T, Y0)
U,s,Vt = np.linalg.svd(A,full_matrices=False)
V = Vt.T
T = np.dot(V, U.T)
if reflection != 'best':
# does the current solution use a reflection?
have_reflection = np.linalg.det(T) < 0
# if that's not what was specified, force another reflection
if reflection != have_reflection:
V[:,-1] *= -1
s[-1] *= -1
T = np.dot(V, U.T)
traceTA = s.sum()
if scaling:
# optimum scaling of Y
b = traceTA * normX / normY
# standarised distance between X and b*Y*T + c
d = 1 - traceTA**2
# transformed coords
Z = normX*traceTA*np.dot(Y0, T) + muX
else:
b = 1
d = 1 + ssY/ssX - 2 * traceTA * normY / normX
Z = normY*np.dot(Y0, T) + muX
# transformation matrix
if my < m:
T = T[:my,:]
c = muX - b*np.dot(muY, T)
#transformation values
tform = {'rotation':T, 'scale':b, 'translation':c}
return d, Z, tform
There is a Scipy function for it: scipy.spatial.procrustes
I'm just posting its example here:
>>> import numpy as np
>>> from scipy.spatial import procrustes
>>> a = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
>>> b = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
>>> mtx1, mtx2, disparity = procrustes(a, b)
>>> round(disparity)
0.0
You can have both Ordinary Procrustes Analysis and Generalized Procrustes Analysis in python with something like this:
import numpy as np
def opa(a, b):
aT = a.mean(0)
bT = b.mean(0)
A = a - aT
B = b - bT
aS = np.sum(A * A)**.5
bS = np.sum(B * B)**.5
A /= aS
B /= bS
U, _, V = np.linalg.svd(np.dot(B.T, A))
aR = np.dot(U, V)
if np.linalg.det(aR) < 0:
V[1] *= -1
aR = np.dot(U, V)
aS = aS / bS
aT-= (bT.dot(aR) * aS)
aD = (np.sum((A - B.dot(aR))**2) / len(a))**.5
return aR, aS, aT, aD
def gpa(v, n=-1):
if n < 0:
p = avg(v)
else:
p = v[n]
l = len(v)
r, s, t, d = np.ndarray((4, l), object)
for i in range(l):
r[i], s[i], t[i], d[i] = opa(p, v[i])
return r, s, t, d
def avg(v):
v_= np.copy(v)
l = len(v_)
R, S, T = [list(np.zeros(l)) for _ in range(3)]
for i, j in np.ndindex(l, l):
r, s, t, _ = opa(v_[i], v_[j])
R[j] += np.arccos(min(1, max(-1, np.trace(r[:1])))) * np.sign(r[1][0])
S[j] += s
T[j] += t
for i in range(l):
a = R[i] / l
r = [np.cos(a), -np.sin(a)], [np.sin(a), np.cos(a)]
v_[i] = v_[i].dot(r) * (S[i] / l) + (T[i] / l)
return v_.mean(0)
For testing purposes, the output of each algorithm can be visualized as follows:
import matplotlib.pyplot as p; p.rcParams['toolbar'] = 'None';
def plt(o, e, b):
p.figure(figsize=(10, 10), dpi=72, facecolor='w').add_axes([0.05, 0.05, 0.9, 0.9], aspect='equal')
p.plot(0, 0, marker='x', mew=1, ms=10, c='g', zorder=2, clip_on=False)
p.gcf().canvas.set_window_title('%f' % e)
x = np.ravel(o[0].T[0])
y = np.ravel(o[0].T[1])
p.xlim(min(x), max(x))
p.ylim(min(y), max(y))
a = []
for i, j in np.ndindex(len(o), 2):
a.append(o[i].T[j])
O = p.plot(*a, marker='x', mew=1, ms=10, lw=.25, c='b', zorder=0, clip_on=False)
O[0].set(c='r', zorder=1)
if not b:
O[2].set_color('b')
O[2].set_alpha(0.4)
p.axis('off')
p.show()
# Fly wings example (Klingenberg, 2015 | https://en.wikipedia.org/wiki/Procrustes_analysis)
arr1 = np.array([[588.0, 443.0], [178.0, 443.0], [56.0, 436.0], [50.0, 376.0], [129.0, 360.0], [15.0, 342.0], [92.0, 293.0], [79.0, 269.0], [276.0, 295.0], [281.0, 331.0], [785.0, 260.0], [754.0, 174.0], [405.0, 233.0], [386.0, 167.0], [466.0, 59.0]])
arr2 = np.array([[477.0, 557.0], [130.129, 374.307], [52.0, 334.0], [67.662, 306.953], [111.916, 323.0], [55.119, 275.854], [107.935, 277.723], [101.899, 259.73], [175.0, 329.0], [171.0, 345.0], [589.0, 527.0], [591.0, 468.0], [299.0, 363.0], [306.0, 317.0], [406.0, 288.0]])
def opa_out(a):
r, s, t, d = opa(a[0], a[1])
a[1] = a[1].dot(r) * s + t
return a, d, False
plt(*opa_out([arr1, arr2, np.matrix.copy(arr2)]))
def gpa_out(a):
g = gpa(a, -1)
D = [avg(a)]
for i in range(len(a)):
D.append(a[i].dot(g[0][i]) * g[1][i] + g[2][i])
return D, sum(g[3])/len(a), True
plt(*gpa_out([arr1, arr2]))
Probably you want to try this package with various flavors of different Procrustes methods, https://github.com/theochem/procrustes.

How to calculate a Normal Distribution percent point function in python

How do I do the equivalent of scipy.stats.norm.ppf without using Scipy. I have python's Math module has erf built in but I cannot seem to recreate the function.
PS: I cannot just use scipy because Heroku does not allow you to install it and using alternate buildpacks breaches the 300Mb maximum slug size limit.
There's not a simple way to use erf to implement norm.ppf because norm.ppf is related to the inverse of erf. Instead, here's a pure Python implementation of the code from scipy. You should find that the function ndtri returns exactly the same value as norm.ppf:
import math
s2pi = 2.50662827463100050242E0
P0 = [
-5.99633501014107895267E1,
9.80010754185999661536E1,
-5.66762857469070293439E1,
1.39312609387279679503E1,
-1.23916583867381258016E0,
]
Q0 = [
1,
1.95448858338141759834E0,
4.67627912898881538453E0,
8.63602421390890590575E1,
-2.25462687854119370527E2,
2.00260212380060660359E2,
-8.20372256168333339912E1,
1.59056225126211695515E1,
-1.18331621121330003142E0,
]
P1 = [
4.05544892305962419923E0,
3.15251094599893866154E1,
5.71628192246421288162E1,
4.40805073893200834700E1,
1.46849561928858024014E1,
2.18663306850790267539E0,
-1.40256079171354495875E-1,
-3.50424626827848203418E-2,
-8.57456785154685413611E-4,
]
Q1 = [
1,
1.57799883256466749731E1,
4.53907635128879210584E1,
4.13172038254672030440E1,
1.50425385692907503408E1,
2.50464946208309415979E0,
-1.42182922854787788574E-1,
-3.80806407691578277194E-2,
-9.33259480895457427372E-4,
]
P2 = [
3.23774891776946035970E0,
6.91522889068984211695E0,
3.93881025292474443415E0,
1.33303460815807542389E0,
2.01485389549179081538E-1,
1.23716634817820021358E-2,
3.01581553508235416007E-4,
2.65806974686737550832E-6,
6.23974539184983293730E-9,
]
Q2 = [
1,
6.02427039364742014255E0,
3.67983563856160859403E0,
1.37702099489081330271E0,
2.16236993594496635890E-1,
1.34204006088543189037E-2,
3.28014464682127739104E-4,
2.89247864745380683936E-6,
6.79019408009981274425E-9,
]
def ndtri(y0):
if y0 <= 0 or y0 >= 1:
raise ValueError("ndtri(x) needs 0 < x < 1")
negate = True
y = y0
if y > 1.0 - 0.13533528323661269189:
y = 1.0 - y
negate = False
if y > 0.13533528323661269189:
y = y - 0.5
y2 = y * y
x = y + y * (y2 * polevl(y2, P0) / polevl(y2, Q0))
x = x * s2pi
return x
x = math.sqrt(-2.0 * math.log(y))
x0 = x - math.log(x) / x
z = 1.0 / x
if x < 8.0:
x1 = z * polevl(z, P1) / polevl(z, Q1)
else:
x1 = z * polevl(z, P2) / polevl(z, Q2)
x = x0 - x1
if negate:
x = -x
return x
def polevl(x, coef):
accum = 0
for c in coef:
accum = x * accum + c
return accum
The function ppf is the inverse of y = (1+erf(x/sqrt(2))/2. So we need to solve this equation for x, given y between 0 and 1. Here is a code doing this by the bisection method. I imported SciPy function to illustrate that the result is the same.
from math import erf, sqrt
from scipy.stats import norm # only for comparison
y = 0.123
z = 2*y-1
a = 0
while erf(a) > z or erf(a+1) < z: # looking for initial bracket of size 1
if erf(a) > z:
a -= 1
else:
a += 1
b = a+1 # found a bracket, proceed to refine it
while b-a > 1e-15: # 1e-15 ought to be enough precision
c = (a+b)/2.0 # bisection method
if erf(c) > z:
b = c
else:
a = c
print sqrt(2)*(a+b)/2.0 # this is the answer
print norm.ppf(y) # SciPy for comparison
Left for you to do:
preliminary bound checks (y must be between 0 and 1)
scaling and shifting if other mean / variance are desired; the code is for standard normal distribution (mean 0, variance 1).

Categories

Resources