How to implement inverse log-polar transform in tensorflow - python

I implement the log-polar transform and its inversion. The forward transform is ok but the inversion picture shows the incorrect result. I think the issue is on the normalization of the grid coordinates. But I can't figure out how this happend.
LPT:
def log_polar_transform(x, radius_factor = tf.sqrt(2.)):
b, h, w, c = x.shape
grid = make_grid(h, w) #(2, hw), represent log-polar coordinate system
grid = tf.repeat(grid[None, ...], b, axis=0) #(b, 2, hw)
X, Y = grid[:, 0], grid[:, 1]
#theta
theta = (Y + 1) * math.pi #[0, 2pi]
#radius
maxR = max(h, w) * radius_factor
r = tf.exp((X + 1)/2 * tf.math.log(maxR)) #[1, h]
r = (r-1)/(maxR -1) #[0, h]-->[0, 1]
r = r * (maxR/h) #scale factorize
#map to cartesian coordinate system
xs = tf.reshape(r * tf.math.cos(theta), [b, h, w])
ys = tf.reshape(r * tf.math.sin(theta), [b, h, w])
output = interpolate(x, xs, ys)
output = tf.reshape(output, [b, h, w, c])
return output
Inverse LPT:
def inverse_log_polar_transform(x):
b, h, w, c = x.shape
grid = make_grid(h, w)
grid = tf.repeat(grid[None, ...], b, axis=0) #(b, 2, hw)
X, Y = grid[:, 0], grid[:, 1]
rs = tf.sqrt(X**2 + Y **2)/tf.sqrt(2.)
ts = (tf.atan2(-Y, -X))/math.pi #[-1., 1.]
rs = tf.reshape(rs, [b, h, w])
ts = tf.reshape(ts, [b, h, w])
output = interpolate(x, rs, ts)
output = tf.reshape(output, [b, h, w, c])
return output
Here is the full code link:
https://www.kaggle.com/code/tom99763/log-polar-transfomr-inverse
and the reusults:
Results Image

Related

prediction with linear regression from scratch

I want to build a linear regression model to predict y values. I have my X and Y values as NumPy arrays. However, the model doesn't give the correct value even when the given dataset is straightforward to learn (for ex: x= [10, 15], y=[20, 30]).
Here is the code:
x = [10, 15, 23, 30, 40, 49, 57, 60]
y = [10000, 21000,25000, 30000,45000, 52000, 65000, 70000]
x = np.array(x)
y = np.array(y)
n = len(x)
m = np.ones(n)
b = np.ones(n)
def hypothesis(x, m, b):
h = x*m + b
return h
def cost(y, h):
c = (1/(2*n))*(np.sum(np.square(h - y)))
return c
def grad_des(x, y, h, theta, L):
theta = theta - L*(x/n)*(np.sum(h - y))
return theta
iter = 100000
L= 0.00005
for i in range(iter):
h = hypothesis(x, m, b)
c = cost(y, h)
m = grad_des(x, y, h, m, L)
b = grad_des(x, y, h, b, L)
if i%10 == 0:
print('h:', h, "c:", c,)
There seems to be a lot of confusion of how the gradient should look like. First of all gradient is different for w and for b, second of all the one you have right now is not correct for w nor for b.
It should be something among the lines of
def grad_des(x, y, h, w, b, L):
w = w - L*np.dot(x, h - y)/n
b = b - L*np.dot(np.ones_like(x), h - y)/n
return w, b
or if you prefer with np.sum:
def grad_des(x, y, h, w, b, L):
w = w - L*np.sum(x * (h - y))/n
b = b - L*np.sum(h - y)/n
return w, b
or just
def grad_des(x, y, h, w, b, L):
w = w - L*np.mean(x * (h - y))
b = b - L*np.mean(h - y)
return w, b

Find rotation matrix to align two vectors

I try to find the rotation matrix to align two vectors.
I have a vector A = [ax, ay, az] and I would like to align it on the vector B = [1, 0, 0] (x-axis unit vector).
I found the following explanation and tried to implement it: https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d/897677#897677
def align_vectors(a, b):
v = np.cross(a, b)
s = np.linalg.norm(v)
c = np.dot(a, b)
v1, v2, v3 = v
h = 1 / (1 + c)
Vmat = np.array([[0, -v3, v2],
[v3, 0, -v1],
[-v2, v1, 0]])
R = np.eye(3, dtype=np.float64) + Vmat + (Vmat.dot(Vmat) * h)
return R
When I apply it to find the rotation of a point, this is what I have :
x_axis = np.array([1, 0, 0], dtype=np.float64)
direction = np.array([-0.02, 1.004, -0.02], dtype=np.float64)
Ralign = align_vectors(x_axis, direction)
point = 1000 * np.array([-0.02, 1.004, -0.02], dtype=np.float64) # Point in the direction of the unit vector
result = Ralign.dot(point)
The resulting point is not aligned with the unit vector.
If you only want to rotate ONE vector a to align with b, not the entire coordinate contain that vector, use simple vector projection and the length of a:
a_norm = np.linalg.norm(a)
b_norm = np.linalg.norm(b)
result = b * a_norm / b_norm
The following fixes the issue in the question that input are not unit vector by vector normalization.
def align_vectors(a, b):
b = b / np.linalg.norm(b) # normalize a
a = a / np.linalg.norm(a) # normalize b
v = np.cross(a, b)
# s = np.linalg.norm(v)
c = np.dot(a, b)
v1, v2, v3 = v
h = 1 / (1 + c)
Vmat = np.array([[0, -v3, v2],
[v3, 0, -v1],
[-v2, v1, 0]])
R = np.eye(3, dtype=np.float64) + Vmat + (Vmat.dot(Vmat) * h)
return R
testing:
def angle(a, b):
"""Angle between vectors"""
a = a / np.linalg.norm(a)
b = b / np.linalg.norm(b)
return np.arccos(a.dot(b))
point = np.array([-0.02, 1.004, -0.02])
direction = np.array([1., 0., 0.])
rotation = align_vectors(point, direction)
# Rotate point in align with direction. The result vector is aligned with direction
result = rotation.dot(point)
print(result)
print('Angle:', angle(direction, point)) # 0.0
print('Length:', np.isclose(np.linalg.norm(point), np.linalg.norm(result))) # True
# Rotate direction by the matrix, result does not align with direction but the angle between the original vector (direction) and the result2 are the same.
result2 = rotation.dot(direction)
print(result2)
print('Same Angle:', np.isclose(angle(point,result), angle(direction,result2))) # True
print('Length:', np.isclose(np.linalg.norm(direction), np.linalg.norm(result2))) # True

Transfrom matrix from scipy.spatial.procrustes [duplicate]

Is there something like Matlab's procrustes function in NumPy/SciPy or related libraries?
For reference. Procrustes analysis aims to align 2 sets of points (in other words, 2 shapes) to minimize square distance between them by removing scale, translation and rotation warp components.
Example in Matlab:
X = [0 1; 2 3; 4 5; 6 7; 8 9]; % first shape
R = [1 2; 2 1]; % rotation matrix
t = [3 5]; % translation vector
Y = X * R + repmat(t, 5, 1); % warped shape, no scale and no distortion
[d Z] = procrustes(X, Y); % Z is Y aligned back to X
Z
Z =
0.0000 1.0000
2.0000 3.0000
4.0000 5.0000
6.0000 7.0000
8.0000 9.0000
Same task in NumPy:
X = arange(10).reshape((5, 2))
R = array([[1, 2], [2, 1]])
t = array([3, 5])
Y = dot(X, R) + t
Z = ???
Note: I'm only interested in aligned shape, since square error (variable d in Matlab code) is easily computed from 2 shapes.
I'm not aware of any pre-existing implementation in Python, but it's easy to take a look at the MATLAB code using edit procrustes.m and port it to Numpy:
def procrustes(X, Y, scaling=True, reflection='best'):
"""
A port of MATLAB's `procrustes` function to Numpy.
Procrustes analysis determines a linear transformation (translation,
reflection, orthogonal rotation and scaling) of the points in Y to best
conform them to the points in matrix X, using the sum of squared errors
as the goodness of fit criterion.
d, Z, [tform] = procrustes(X, Y)
Inputs:
------------
X, Y
matrices of target and input coordinates. they must have equal
numbers of points (rows), but Y may have fewer dimensions
(columns) than X.
scaling
if False, the scaling component of the transformation is forced
to 1
reflection
if 'best' (default), the transformation solution may or may not
include a reflection component, depending on which fits the data
best. setting reflection to True or False forces a solution with
reflection or no reflection respectively.
Outputs
------------
d
the residual sum of squared errors, normalized according to a
measure of the scale of X, ((X - X.mean(0))**2).sum()
Z
the matrix of transformed Y-values
tform
a dict specifying the rotation, translation and scaling that
maps X --> Y
"""
n,m = X.shape
ny,my = Y.shape
muX = X.mean(0)
muY = Y.mean(0)
X0 = X - muX
Y0 = Y - muY
ssX = (X0**2.).sum()
ssY = (Y0**2.).sum()
# centred Frobenius norm
normX = np.sqrt(ssX)
normY = np.sqrt(ssY)
# scale to equal (unit) norm
X0 /= normX
Y0 /= normY
if my < m:
Y0 = np.concatenate((Y0, np.zeros(n, m-my)),0)
# optimum rotation matrix of Y
A = np.dot(X0.T, Y0)
U,s,Vt = np.linalg.svd(A,full_matrices=False)
V = Vt.T
T = np.dot(V, U.T)
if reflection != 'best':
# does the current solution use a reflection?
have_reflection = np.linalg.det(T) < 0
# if that's not what was specified, force another reflection
if reflection != have_reflection:
V[:,-1] *= -1
s[-1] *= -1
T = np.dot(V, U.T)
traceTA = s.sum()
if scaling:
# optimum scaling of Y
b = traceTA * normX / normY
# standarised distance between X and b*Y*T + c
d = 1 - traceTA**2
# transformed coords
Z = normX*traceTA*np.dot(Y0, T) + muX
else:
b = 1
d = 1 + ssY/ssX - 2 * traceTA * normY / normX
Z = normY*np.dot(Y0, T) + muX
# transformation matrix
if my < m:
T = T[:my,:]
c = muX - b*np.dot(muY, T)
#transformation values
tform = {'rotation':T, 'scale':b, 'translation':c}
return d, Z, tform
There is a Scipy function for it: scipy.spatial.procrustes
I'm just posting its example here:
>>> import numpy as np
>>> from scipy.spatial import procrustes
>>> a = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
>>> b = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
>>> mtx1, mtx2, disparity = procrustes(a, b)
>>> round(disparity)
0.0
You can have both Ordinary Procrustes Analysis and Generalized Procrustes Analysis in python with something like this:
import numpy as np
def opa(a, b):
aT = a.mean(0)
bT = b.mean(0)
A = a - aT
B = b - bT
aS = np.sum(A * A)**.5
bS = np.sum(B * B)**.5
A /= aS
B /= bS
U, _, V = np.linalg.svd(np.dot(B.T, A))
aR = np.dot(U, V)
if np.linalg.det(aR) < 0:
V[1] *= -1
aR = np.dot(U, V)
aS = aS / bS
aT-= (bT.dot(aR) * aS)
aD = (np.sum((A - B.dot(aR))**2) / len(a))**.5
return aR, aS, aT, aD
def gpa(v, n=-1):
if n < 0:
p = avg(v)
else:
p = v[n]
l = len(v)
r, s, t, d = np.ndarray((4, l), object)
for i in range(l):
r[i], s[i], t[i], d[i] = opa(p, v[i])
return r, s, t, d
def avg(v):
v_= np.copy(v)
l = len(v_)
R, S, T = [list(np.zeros(l)) for _ in range(3)]
for i, j in np.ndindex(l, l):
r, s, t, _ = opa(v_[i], v_[j])
R[j] += np.arccos(min(1, max(-1, np.trace(r[:1])))) * np.sign(r[1][0])
S[j] += s
T[j] += t
for i in range(l):
a = R[i] / l
r = [np.cos(a), -np.sin(a)], [np.sin(a), np.cos(a)]
v_[i] = v_[i].dot(r) * (S[i] / l) + (T[i] / l)
return v_.mean(0)
For testing purposes, the output of each algorithm can be visualized as follows:
import matplotlib.pyplot as p; p.rcParams['toolbar'] = 'None';
def plt(o, e, b):
p.figure(figsize=(10, 10), dpi=72, facecolor='w').add_axes([0.05, 0.05, 0.9, 0.9], aspect='equal')
p.plot(0, 0, marker='x', mew=1, ms=10, c='g', zorder=2, clip_on=False)
p.gcf().canvas.set_window_title('%f' % e)
x = np.ravel(o[0].T[0])
y = np.ravel(o[0].T[1])
p.xlim(min(x), max(x))
p.ylim(min(y), max(y))
a = []
for i, j in np.ndindex(len(o), 2):
a.append(o[i].T[j])
O = p.plot(*a, marker='x', mew=1, ms=10, lw=.25, c='b', zorder=0, clip_on=False)
O[0].set(c='r', zorder=1)
if not b:
O[2].set_color('b')
O[2].set_alpha(0.4)
p.axis('off')
p.show()
# Fly wings example (Klingenberg, 2015 | https://en.wikipedia.org/wiki/Procrustes_analysis)
arr1 = np.array([[588.0, 443.0], [178.0, 443.0], [56.0, 436.0], [50.0, 376.0], [129.0, 360.0], [15.0, 342.0], [92.0, 293.0], [79.0, 269.0], [276.0, 295.0], [281.0, 331.0], [785.0, 260.0], [754.0, 174.0], [405.0, 233.0], [386.0, 167.0], [466.0, 59.0]])
arr2 = np.array([[477.0, 557.0], [130.129, 374.307], [52.0, 334.0], [67.662, 306.953], [111.916, 323.0], [55.119, 275.854], [107.935, 277.723], [101.899, 259.73], [175.0, 329.0], [171.0, 345.0], [589.0, 527.0], [591.0, 468.0], [299.0, 363.0], [306.0, 317.0], [406.0, 288.0]])
def opa_out(a):
r, s, t, d = opa(a[0], a[1])
a[1] = a[1].dot(r) * s + t
return a, d, False
plt(*opa_out([arr1, arr2, np.matrix.copy(arr2)]))
def gpa_out(a):
g = gpa(a, -1)
D = [avg(a)]
for i in range(len(a)):
D.append(a[i].dot(g[0][i]) * g[1][i] + g[2][i])
return D, sum(g[3])/len(a), True
plt(*gpa_out([arr1, arr2]))
Probably you want to try this package with various flavors of different Procrustes methods, https://github.com/theochem/procrustes.

Efficient way to calculate where a multivariable function takes values between a interval in Python

I have two multivariate equations that determine the inverse kinematics of a robot. These equations depend on the variables theta1 and theta2 (the other variables are geometric constants)
import numpy as np
def x(theta1, theta2, w, h, L1, L2):
sint1 = np.sin(theta1)
cost1 = np.cos(theta1)
sint2 = np.sin(theta2)
cost2 = np.cos(theta2)
i1 = L1 * (cost1 + cost2) + w
j1 = L1 * (sint1 - sint2) - h
D = np.sqrt((L1*(cost2-cost1)+w)**2+(L1*(sint2-sint1)+h)**2)
a = (0.25)*np.sqrt((4*L2**2-D**2)*D**2)
return i1/2 + 2*j1*a/(D**2)
def y(theta1, theta2, w, h, L1, L2):
sint1 = np.sin(theta1)
cost1 = np.cos(theta1)
sint2 = np.sin(theta2)
cost2 = np.cos(theta2)
i2 = L1 * (sint1 + sint2) + h
j2 = L1 * (cost1 - cost2) - w
D = np.sqrt((L1*(cost2-cost1)+w)**2+(L1*(sint2-sint1)+h)**2)
a = (0.25)*np.sqrt((4*L2**2-D**2)*D**2)
return i2/2 - 2*j2*a/(D**2)
using these equations I calculate the determinant of the Jacobian matrix (partial derivative matrix) using the second order finite difference method
def det_jacobian(theta1, theta2, w, h, L1, L2,eps):
dxdt1 = (-x(theta1+eps, theta2, w, h, L1, L2)+4*x(theta1, theta2, w, h, L1, L2)-3*x(theta1-eps, theta2, w, h, L1, L2))/(2*eps)
dxdt2 = (-x(theta1, theta2+eps, w, h, L1, L2)+4*x(theta1, theta2, w, h, L1, L2)-3*x(theta1, theta2-eps, w, h, L1, L2))/(2*eps)
dydt1 = (-y(theta1+eps, theta2, w, h, L1, L2)+4*y(theta1, theta2, w, h, L1, L2)-3*y(theta1-eps, theta2, w, h, L1, L2))/(2*eps)
dydt2 = (-y(theta1, theta2+eps, w, h, L1, L2)+4*y(theta1, theta2, w, h, L1, L2)-3*y(theta1, theta2-eps, w, h, L1, L2))/(2*eps)
return dxdt1,dxdt2,dydt1,dydt2
Evaluated for the values of theta1 and theta2 belonging to an interval
theta1 = np.linspace(theta1_min,theta1_max,n)
theta2 = np.linspace(theta2_min,theta2_max,n)
theta1, theta2 = np.meshgrid(theta1,theta2)
What I want to know is if there is an efficient way (using numpy arrays) to calculate the values of x and y where the determinant takes values between -tol and tol (tol=1e-08). Currently I'm using two nested for cycles, but it is very slow
I have written a function using for cycles, but it is very slow
def singularidades(theta1_min,theta1_max, theta2_min,theta2_max, n,tol, w, h, L1, L2,eps):
x_s = []
y_s = []
theta1_s = []
theta2_s = []
det = []
theta1 = np.linspace(theta1_min,theta1_max,n)
theta2 = np.linspace(theta2_min,theta2_max,n)
theta1, theta2 = np.meshgrid(theta1,theta2)
det_jac = det_jacobiano(theta1,theta2,w,h,L1,L2,eps)
for i in range(n):
for j in range(n):
if (g_tol[i,j] and l_tol[i,j]):
x_s.append(x(theta1[i,j], theta2[i,j], w, h, L1, L2))
y_s.append(y(theta1[i,j], theta2[i,j], w, h, L1, L2))
theta1_s.append(theta1[i,j])
theta2_s.append(theta2[i,j])
det.append(det_jac[i,j])
return x_s,y_s,theta1_s,theta2_s,det,(g_tol and l_tol)
Edit: I've modified the det_jacobian function to use it wiht scipy.optimize.root
def det_jacobiano(theta, w, h, L1, L2,eps):
theta1,theta2 = theta
dxdt1 = (-x(theta1+eps, theta2, w, h, L1, L2)+4*x(theta1, theta2, w, h, L1, L2)-3*x(theta1-eps, theta2, w, h, L1, L2))/(2*eps)
dxdt2 = (-x(theta1, theta2+eps, w, h, L1, L2)+4*x(theta1, theta2, w, h, L1, L2)-3*x(theta1, theta2-eps, w, h, L1, L2))/(2*eps)
dydt1 = (-y(theta1+eps, theta2, w, h, L1, L2)+4*y(theta1, theta2, w, h, L1, L2)-3*y(theta1-eps, theta2, w, h, L1, L2))/(2*eps)
dydt2 = (-y(theta1, theta2+eps, w, h, L1, L2)+4*y(theta1, theta2, w, h, L1, L2)-3*y(theta1, theta2-eps, w, h, L1, L2))/(2*eps)
return dxdt1*dydt2 - dxdt2*dydt1
and I'm trying to find the roots using
initial_guess = [2.693, 0.4538]
result = optimize.root(det_jacobiano, initial_guess,tol=1e-8,args=(20,0,100,100,1e-10),method='lm')
But I'm getting the error:
TypeError: Improper input: N=2 must not exceed M=1
You don't need a cycle for that. Your function can work with numpy arrays as well as single values:
def f(x,y):
return np.sin(x + y) / np.sqrt(x**2 + y**2)
x = [0.1, 0.2, 0.3, 0.4, 0.5]
y = [0.1, 0.2, 0.3, 0.4, 0.5]
print(f(x, y))
will return:
[1.40480431, 1.37680175, 1.33087507, 1.26811839, 1.19001968]
which is an array of function values for each pair of x and y
One way to do this is to define:
def f(x,y,a):
return np.sin(x + y) / np.sqrt(x**2 + y**2) - a
where a can take the limits of your interval and then calculate the roots x0 and y0 of this function using scipy.optimize.
These roots correspond to the x and y values of your function between which the function returns the desired values.

Image perspective transform using Pillow

I tried to draw bounding box of text on a image.The image
is perspective-transformed with a given set of coefficients. The coordinates of text before transformation is known, and I want to calculate the coordinates of text after transformation.
To my understanding if I apply perspective transformation with the coefficients used in image transform to the text coordinates, I will get the resulting coordinates of the text after transformation. However, the text does not appear on the place it is supposed to be.
See the following graphs
The smaller white box bounds the text well because I know the coordinates of the text.
The smaller white box is not bounding the text because of some error during transforming the coordinates.
I follow the documentation reference for coefficients of perspective transformation
and find the coefficients of image transformation using the following code:origin of the code is from this answer
def find_coeffs(pa, pb):
'''
find the coefficients for perspective transform.
parameters:
pa : verticies in the resulting plane
pb : verticies in the current plane
retrun:
coeffs : 8- tuple
coefficents for PIL perspective transform
'''
matrix = []
for p1, p2 in zip(pa, pb):
matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0]*p1[0], -p2[0]*p1[1]])
matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1]*p1[0], -p2[1]*p1[1]])
A = np.matrix(matrix, dtype=np.float)
B = np.array(pb).reshape(8)
res = np.dot(np.linalg.inv(A.T * A) * A.T, B)
return np.array(res).reshape(8)
My code for text bounding box transformation:
# perspective transformation
a, b, c, d, e, f, g, h = coeffs
# return two vertices defining the bounding box
new_x0 = float(a * new_x0 - b * new_y0 + c) / float(g * new_x0 + h * new_y0 + 1)
new_y0 = float(d * new_x0 + e * new_y0 + f) / float(g * new_x0 + h * new_y0 + 1)
new_x1 = float(a * new_x1 - b * new_y1 + c) / float(g * new_x1 + h * new_y1 + 1)
new_y1 = float(d * new_x1 + e * new_y1 + f) / float(g * new_x1 + h * new_y1 + 1)
I also went to Pillow Github, but I could not find the source code where perspective transformation is defined.
Some more info about the math of perspective transformation. The Geometry of Perspective Drawing on the Computer
Thanks.
To compute the new point after a transformation you should get the coefficients from A -> B not from B -> A, which is the standard from PIL library. As example:
# A1, B1 ... are points
# direct transform
coefs = find_coefs([B1, B2, B3, B4], [A1, A2, A3, A4])
# inverse transform
coefs_inv = find_coefs([A1, A2, A3, A4], [B1, B2, B3, B4])
You call the image.transform() function using the coefs_inv but calculate the new point using coefs to get something like this:
img = image.transform(((1500,800)),
method=Image.PERSPECTIVE,
data=coefs_inv)
a, b, c, d, e, f, g, h = coefs
old_p1 = [50, 100]
x,y = old_p1
new_x = (a * x + b * y + c) / (g * x + h * y + 1)
new_y = (d * x + e * y + f) / (g * x + h * y + 1)
new_p1 = (int(new_x),int(new_y))
old_p2 = [400, 500]
x,y = old_p2
new_x = (a * x + b * y + c) / (g * x + h * y + 1)
new_y = (d * x + e * y + f) / (g * x + h * y + 1)
new_p2 = (int(new_x),int(new_y))
Full code below:
import os
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
def find_coefs(original_coords, warped_coords):
matrix = []
for p1, p2 in zip(original_coords, warped_coords):
matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0]*p1[0], -p2[0]*p1[1]])
matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1]*p1[0], -p2[1]*p1[1]])
A = np.matrix(matrix, dtype=np.float)
B = np.array(warped_coords).reshape(8)
res = np.dot(np.linalg.inv(A.T * A) * A.T, B)
return np.array(res).reshape(8)
coefs = find_coefs(
[(867,652), (1020,580), (1206,666), (1057,757)],
[(700,732), (869,754), (906,916), (712,906)]
)
coefs_inv = find_coefs(
[(700,732), (869,754), (906,916), (712,906)],
[(867,652), (1020,580), (1206,666), (1057,757)]
)
image = Image.open('sample.png')
img = image.transform(((1500,800)),
method=Image.PERSPECTIVE,
data=coefs_inv)
a, b, c, d, e, f, g, h = coefs
old_p1 = [50, 100]
x,y = old_p1
new_x = (a * x + b * y + c) / (g * x + h * y + 1)
new_y = (d * x + e * y + f) / (g * x + h * y + 1)
new_p1 = (int(new_x),int(new_y))
old_p2 = [400, 500]
x,y = old_p2
new_x = (a * x + b * y + c) / (g * x + h * y + 1)
new_y = (d * x + e * y + f) / (g * x + h * y + 1)
new_p2 = (int(new_x),int(new_y))
plt.figure()
plt.imshow(image)
plt.scatter([old_p1[0], old_p2[0]],[old_p1[1], old_p2[1]] , s=150, marker='.', c='b')
plt.show()
plt.figure()
plt.imshow(img)
plt.scatter([new_p1[0], new_p2[0]],[new_p1[1], new_p2[1]] , s=150, marker='.', c='r')
plt.show()

Categories

Resources