Scipy Minimize SLSQP simply returns x0 - python

I am really new to programming and I am a bit in over my head here with this whole minimize business, so it might just be a simple mistake, but when I try to run my code below, it simply returns the x0 values that I put in to start.
What I'm trying to do:
I have two "functions" that are made up of points, f(x) and h(x). f(X) can be thought of a measured curve, and h(x) is a reference curve. I am trying to use the least squares to find the horizontal shift, x scale, and y scale terms that will best fit the reference curve to the measured results.
I am using the interpolate function to fit a spline to the reference data so the spline can be used to find intermediate values along the curve.
Here is my code:
import numpy
from scipy import optimize
from scipy import interpolate
def f(x):
vals = {1: 0.35, 17: 0.45, 33: 0.67, 49: 0.8, 65: 0.73, 81: 0.65, 97: 0.51, 113: 0.27, 129: 0.01, 145: -0.1,
161: -0.19, 177: -0.21, 193: -0.2, 209: -0.23, 225: -0.24, 241: -0.25, 257: -0.23, 273: -0.26, 289: -0.28,
305: -0.22, 321: -0.24, 337: -0.12, 353: 0.14}
return vals[x]
def h(x):
vals = {1: -0.2, 17: -0.2, 33: -0.2, 49: -0.2, 65: -0.2, 81: -0.2, 97: -0.2, 113: -0.2, 129: -0.1, 145: 0.1,
161: 0.32, 177: 0.4, 193: 0.7, 209: 0.81, 225: 0.7, 241: 0.6, 257: 0.5, 273: 0.3, 289: 0, 305: -0.1,
321: -0.2, 337: -0.2, 353: -0.2}
return vals[x]
x1 = []
y1 = []
for i in range(1, 365, 16):
x1.append(i)
y1.append(h(i))
tck = interpolate.splrep(x1, y1)
fun = lambda x: ((1 / 22.8125 * numpy.sum(
(f(i) - (x[0] * interpolate.splev((x[1] * (i + x[2]) + 0.5), tck)) - 0.5) ** 2 for i in range(1, 365, 16))) ** (
1 / 2))
bnds = ((0.3, 1.5), (0.3, 1.5), (0, 150))
res = optimize.minimize(fun, (1, 1, 0), method='SLSQP', bounds=bnds)
print res.x
Again, when I run this I simply get [1.0, 1.0, 0.0] for res.x. Any thoughts?
Thank you!

1 / 2 in Python2 without from __future__ import division is equal to 0, and this seems to be what's causing your problem. After replacement with 0.5 or 1./2, I get
[ 3.00000000e-01 1.14967789e+00 7.48854782e-04]
for res.x.

Related

How to more accurately approximate a set of points?

I would like to approximate bond yields in python. But the question arose which curve describes this better?
import numpy as np
import matplotlib.pyplot as plt
x = [0.02, 0.22, 0.29, 0.38, 0.52, 0.55, 0.67, 0.68, 0.74, 0.83, 1.05, 1.06, 1.19, 1.26, 1.32, 1.37, 1.38, 1.46, 1.51, 1.61, 1.62, 1.66, 1.87, 1.93, 2.01, 2.09, 2.24, 2.26, 2.3, 2.33, 2.41, 2.44, 2.51, 2.53, 2.58, 2.64, 2.65, 2.76, 3.01, 3.17, 3.21, 3.24, 3.3, 3.42, 3.51, 3.67, 3.72, 3.74, 3.83, 3.84, 3.86, 3.95, 4.01, 4.02, 4.13, 4.28, 4.36, 4.4]
y = [3, 3.96, 4.21, 2.48, 4.77, 4.13, 4.74, 5.06, 4.73, 4.59, 4.79, 5.53, 6.14, 5.71, 5.96, 5.31, 5.38, 5.41, 4.79, 5.33, 5.86, 5.03, 5.35, 5.29, 7.41, 5.56, 5.48, 5.77, 5.52, 5.68, 5.76, 5.99, 5.61, 5.78, 5.79, 5.65, 5.57, 6.1, 5.87, 5.89, 5.75, 5.89, 6.1, 5.81, 6.05, 8.31, 5.84, 6.36, 5.21, 5.81, 7.88, 6.63, 6.39, 5.99, 5.86, 5.93, 6.29, 6.07]
a = np.polyfit(np.power(x,0.5), y, 1)
y1 = a[0]*np.power(x,0.5)+a[1]
b = np.polyfit(np.log(x), y, 1)
y2 = b[0]*np.log(x) + b[1]
c = np.polyfit(x, y, 2)
y3 = c[0] * np.power(x,2) + np.multiply(c[1], x) + c[2]
plt.plot(x, y, 'ro', lw = 3, color='black')
plt.plot(x, y1, 'g', lw = 3, color='red')
plt.plot(x, y2, 'g', lw = 3, color='green')
plt.plot(x, y3, 'g', lw = 3, color='blue')
plt.axis([0, 4.5, 2, 8])
plt.rcParams['figure.figsize'] = [10, 5]
The parabolic too goes down at the end (blue), the logarithmic goes too quickly to zero at the beginning (green), and the square root has a strange hump (red). Is there any other ways of more accurate approximation or is it that I'm already getting pretty good?
Your fits look really good! If you wanted more information to compare which of your fits is better, you can look at sum of residuals and covariance of the coefficients.
a,residuals,cov = np.polyfit(np.power(x,0.5), y, 1, full=True, cov=True)
Residuals is the sum of squared residuals of the least-squares fit.
The cov matrix is the covariance of the polynomial coefficient estimates. The diagonal of this matrix is the variance estimates for each coefficient.
You need to search on google about "different types of error measures". These would help you to determine your best fit. Most commonly Root Mean Squared Error (RMSE) or Mean Absolute Percentage Error (MAPE) are used. You can also read about Relative Root Mean Squared Error (rRMSE). Choice of error measure depends on the problem at hand.

Double inequality constraint in Gekko

I have an optimization problem in which some inequalities constraints can either be 0 or greater than a certain value. For example, in the code below, qtde and c1 are lists and pp is a 2d numpy array.
import numpy as np
from gekko import GEKKO
qtde = [7, 2, 2, 12, 2, 7, 1.5, 8, 4, 16, 2, 1, 3, 0.2, 3, 1, 1, 10, 8, 5, 3, 2.5, 5, 2.5, 10, 3, 1, 6, 12, 2, 6, 1, 4, 1, 2, 10, 1, 1, 1, 1]
c1 = [26.0, 150.0, 300.0, 110.0, 400.0, 500.0, 200.0, 200.0, 27.0, 150.0, 50.0, 200.0, 75.0, 0.0, 250.0, 22.8, 300.0, 22.8, 22.8, 150.0, 300.0, 150.0, 100.0, 100.0, 1000.0, 150.0, 150.0, 200.0, 31.2, 100.0, 100.0, 50.0, 23.0, 300.0, 200.0, 300.0, 0.0, 300.0, 30.0, 26.0, 300.0, 300.0, 250.0, 100.0, 100.0, 200.0, 400.0, 21.2, 200.0, 500.0, 0.0]
mm = [[4,0,0,0,0,0,0,0,0,0,9,0,0,0,0,0,5,0,2,0,0,0,7,0,0,0,6,0,0,0,8,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,13,0,0,0,0,0,0,0,0,0,0,0,12,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,11,0,10,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,19,0,0,0,0,0,0,17,15,0,0,16,0,0,18,0,0,0,0,0,0,0,0,0,0],
[26,0,0,0,0,0,0,0,0,0,27,0,0,0,0,0,0,0,21,0,0,0,25,0,0,0,23,0,0,0,22,0,0,0,0,0,0,0,0,0,24,0,20,0,0,0,0,0,0,0,0],
[29,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,34,0,0,0,0,0,0,0,30,0,0,31,0,0,0,0,0,0,0,32,0,0,33,0,28,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,40,0,0,0,0,0,0,0,37,0,0,0,36,0,0,0,38,0,0,0,39,0,0,0,0,0,0,0,0,0,0,0,35,0,0,0,0,0,0,0,0],
[42,0,0,0,0,0,0,0,0,0,48,0,0,0,0,0,44,0,43,0,0,0,49,0,0,0,46,0,0,0,47,0,0,0,0,0,0,0,0,0,45,0,41,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,54,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,53,0,0,0,52,0,0,0,0,0,0,0,0,0,51,0,50,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,60,0,0,0,0,0,0,0,56,0,0,0,59,0,0,0,57,0,0,0,58,0,0,0,0,0,0,0,0,0,0,0,55,0,0,0,0,0,0,0,0],
[69,0,0,0,0,0,0,0,0,0,68,0,0,0,0,0,61,0,0,0,0,0,64,0,0,0,63,0,0,0,65,0,0,0,0,0,0,67,0,0,62,0,66,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,71,0,70,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,78,0,0,0,0,0,77,0,0,0,0,0,73,0,0,0,76,0,0,0,75,0,0,0,0,0,0,0,0,0,74,0,72,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,80,0,0,0,79,0,0,0,82,0,0,0,0,0,0,0,0,0,83,0,81,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,86,0,0,0,84,0,0,0,0,0,0,0,0,0,85,0,87,0,0,0,0,0,0,0,0],
[93,0,0,0,0,0,0,0,0,0,95,0,0,0,0,0,94,0,92,0,0,0,90,0,0,0,91,0,0,0,96,0,0,0,0,0,0,0,0,0,89,0,88,0,0,0,0,0,0,0,0],
[104,0,0,0,0,0,0,0,0,0,100,0,0,0,0,0,99,0,98,0,0,0,103,0,0,0,101,0,0,0,102,0,0,0,0,0,0,0,0,0,0,0,97,0,0,0,0,0,0,0,0],
[112,0,0,0,0,0,0,0,0,0,108,0,0,0,0,0,110,0,107,0,0,0,111,0,0,0,109,0,0,0,113,0,0,0,0,0,0,0,0,0,106,0,105,0,0,0,0,0,0,0,0],
[114,0,0,0,0,0,0,0,0,0,116,0,0,0,0,0,117,0,119,0,0,0,115,0,0,0,118,0,0,0,120,0,0,0,0,0,0,0,0,0,121,0,122,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,123,0,0,0,0,0,0,0,0],
[0,129,0,0,0,0,126,0,0,0,0,0,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,127,125,0,0,0,0,0,0,0,0,0,0,130,0,0,0,0,0,124,0,131,0,0,0],
[0,133,0,0,0,0,136,0,0,0,0,0,0,135,0,0,0,0,0,0,0,0,0,0,0,0,0,132,0,0,0,0,0,0,0,0,0,0,134,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,138,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,137,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,139,0,0,0,0,0,0,0,0,0,0,0,0,140,0,0,0,0,0,0,0,0,0,0,0,0,0,141],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,142,0,143,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,144,0,0,0,150,0,146,0,149,0,0,0,0,0,0,152,0,0,0,145,0,0,0,0,147,0,0,151,0,0,0,0,0,148],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,154,0,0,0,0,0,153,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,155,0,0,0,157,0,0,156,0,0,0,158,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,160,0,0,0,0,0,0,0,0,0,0,0,0,0,159,0],
[0,0,0,0,0,0,0,0,0,0,0,161,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,164,0,0,163,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,162,0],
[0,0,165,0,0,0,0,0,0,166,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,167,169,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,170,0,0,0,0,0,0,0,0,0,0,168,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,173,0,0,0,0,0,0,175,177,0,0,171,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,176,0,0,0,0,0,0,0,0,0,0,0,0,174,172,0],
[0,0,0,0,0,0,0,0,0,0,0,0,180,0,0,178,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,179,0],
[0,0,0,0,182,184,0,186,0,0,0,183,185,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,181,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,190,191,0,0,187,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,189,0,0,0,0,0,0,0,0,0,0,0,0,0,188,0],
[0,0,193,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,192,0,0,0,0],
[0,0,197,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,196,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,195,0,0,194,0,0,0,0],
[0,0,0,0,0,0,0,0,0,199,0,0,0,0,201,0,0,0,0,0,0,0,200,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,198,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,203,0,0,0,0,204,0,0,0,0,0,0,0,0,0,0,0,0,0,0,202,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,205,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
mm = np.array(mm)
#
pp = [[5.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,7.90,0.00,0.00,0.00,0.00,0.00,5.49,0.00,2.89,0.00,0.00,0.00,5.98,0.00,0.00,0.00,5.94,0.00,0.00,0.00,6.21,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,3.55,0.00,2.89,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,5.70,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,3.61,0.00,0.00,0.00,5.80,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,3.15,0.00,3.15,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,15.95,0.00,0.00,0.00,0.00,0.00,0.00,14.00,11.95,0.00,0.00,12.36,0.00,0.00,14.18,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[3.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,3.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,2.20,0.00,0.00,0.00,2.80,0.00,0.00,0.00,2.29,0.00,0.00,0.00,2.27,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,2.61,0.00,2.20,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[3.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,9.76,0.00,0.00,0.00,0.00,0.00,0.00,0.00,5.70,0.00,0.00,6.47,0.00,0.00,0.00,0.00,0.00,0.00,0.00,7.47,0.00,0.00,8.51,0.00,3.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,10.50,0.00,0.00,0.00,0.00,0.00,0.00,0.00,9.52,0.00,0.00,0.00,9.10,0.00,0.00,0.00,9.57,0.00,0.00,0.00,9.62,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,9.10,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[6.75,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,9.50,0.00,0.00,0.00,0.00,0.00,7.98,0.00,6.99,0.00,0.00,0.00,11.05,0.00,0.00,0.00,8.55,0.00,0.00,0.00,8.88,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,8.27,0.00,6.75,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,11.20,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,10.95,0.00,0.00,0.00,9.75,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,9.63,0.00,9.16,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,3.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.69,0.00,0.00,0.00,1.98,0.00,0.00,0.00,1.77,0.00,0.00,0.00,1.96,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,1.69,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[10.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,7.10,0.00,0.00,0.00,0.00,0.00,1.59,0.00,0.00,0.00,0.00,0.00,1.95,0.00,0.00,0.00,1.74,0.00,0.00,0.00,2.09,0.00,0.00,0.00,0.00,0.00,0.00,6.43,0.00,0.00,1.70,0.00,2.83,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,9.93,0.00,9.93,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,18.40,0.00,0.00,0.00,0.00,0.00,14.49,0.00,0.00,0.00,0.00,0.00,12.89,0.00,0.00,0.00,14.36,0.00,0.00,0.00,13.76,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,13.48,0.00,11.91,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,9.39,0.00,0.00,0.00,7.97,0.00,0.00,0.00,9.57,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,10.24,0.00,9.49,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,33.35,0.00,0.00,0.00,14.80,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,18.00,0.00,72.90,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[5.70,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,6.00,0.00,0.00,0.00,0.00,0.00,5.78,0.00,4.50,0.00,0.00,0.00,3.90,0.00,0.00,0.00,4.06,0.00,0.00,0.00,6.46,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,3.55,0.00,3.55,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[4.50,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,3.60,0.00,0.00,0.00,0.00,0.00,3.19,0.00,2.69,0.00,0.00,0.00,4.12,0.00,0.00,0.00,3.75,0.00,0.00,0.00,4.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,2.69,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[5.70,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,3.80,0.00,0.00,0.00,0.00,0.00,4.65,0.00,3.69,0.00,0.00,0.00,5.42,0.00,0.00,0.00,4.50,0.00,0.00,0.00,6.40,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,3.55,0.00,3.55,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[4.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,5.40,0.00,0.00,0.00,0.00,0.00,5.49,0.00,6.60,0.00,0.00,0.00,4.33,0.00,0.00,0.00,6.38,0.00,0.00,0.00,6.92,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,7.09,0.00,8.68,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,8.68,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,18.99,0.00,0.00,0.00,0.00,16.98,0.00,0.00,0.00,0.00,0.00,0.00,17.80,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,17.20,16.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,28.58,0.00,0.00,0.00,0.00,0.00,13.99,0.00,30.45,0.00,0.00,0.00],
[0.00,9.49,0.00,0.00,0.00,0.00,34.98,0.00,0.00,0.00,0.00,0.00,0.00,18.90,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,8.77,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,15.90,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,47.90,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,38.39,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,89.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,91.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,92.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,66.89,0.00,79.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,27.30,0.00,0.00,0.00,36.90,0.00,29.50,0.00,36.00,0.00,0.00,0.00,0.00,0.00,0.00,49.90,0.00,0.00,0.00,28.90,0.00,0.00,0.00,0.00,31.99,0.00,0.00,42.00,0.00,0.00,0.00,0.00,0.00,33.50],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,65.00,0.00,0.00,0.00,0.00,0.00,23.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,12.89,0.00,0.00,0.00,13.99,0.00,0.00,13.90,0.00,0.00,0.00,14.32,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,16.50,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,15.57,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,36.75,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,58.73,0.00,0.00,53.43,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,51.85,0.00],
[0.00,0.00,5.39,0.00,0.00,0.00,0.00,0.00,0.00,6.90,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,12.36,14.63,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,18.76,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,12.90,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,86.00,0.00,0.00,0.00,0.00,0.00,0.00,89.90,97.30,0.00,0.00,81.60,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,96.70,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,89.00,83.77,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,64.28,0.00,0.00,49.46,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,52.34,0.00],
[0.00,0.00,0.00,0.00,79.90,89.00,0.00,124.00,0.00,0.00,0.00,85.00,104.47,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,67.20,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,91.00,91.11,0.00,0.00,73.61,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,81.50,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,80.60,0.00],
[0.00,0.00,2.47,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,2.44,0.00,0.00,0.00,0.00],
[0.00,0.00,28.44,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,15.90,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,15.10,0.00,0.00,13.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,22.00,0.00,0.00,0.00,0.00,31.92,0.00,0.00,0.00,0.00,0.00,0.00,0.00,28.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,22.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,8.55,0.00,0.00,0.00,0.00,62.70,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,8.30,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00],
[0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,62.70,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00]]
pp = np.array(pp)
#c1 = [26.0, 150.0, 300.0, 110.0, 400.0, 500.0, 200.0, 200.0, 27.0, 150.0, 50.0, 200.0, 75.0, 0.0, 250.0, 22.8, 300.0, 22.8, 22.8, 150.0, 300.0, 150.0, 100.0, 100.0, 1000.0, 150.0, 150.0, 200.0, 31.2, 100.0, 100.0, 50.0, 23.0, 300.0, 200.0, 300.0, 0.0, 300.0, 30.0, 26.0, 300.0, 300.0, 250.0, 100.0, 100.0, 200.0, 400.0, 21.2, 200.0, 500.0, 0.0]
m = GEKKO()
ni = 40
nj = 51
x = [[m.Var(lb=0,integer=True) for j in range(nj)] for i in range(ni)]
s = 0
expr = []
for i in range(ni):
for j in range(nj):
s += x[i][j]
for i in range(ni):
expr.append(sum(x[i]))
for i in range(ni):
for j in range(nj):
if mm[i][j] == 0:
m.Equation(x[i][j] == 0)
for i in range(ni):
m.Equation(sum([x[i][j] for j in range(nj)]) >= qtde[i])
b = m.Array(m.Var,nj,integer=True,lb=0,ub=1)
iv = [None]*nj
for j in range(nj):
iv[j] = m.sum([pp[i][j]*x[i][j] for i in range(ni)])
m.Equation(iv[j] >= b[j]*c1[j])
m.Equation((1 - b[j])*iv[j] == 0)
m.Obj(m.sum(expr))
m.options.SOLVER=1 # switch to APOPT
m.solver_options = ['minlp_gap_tol 1.0e-1',\
'minlp_maximum_iterations 10000',\
'minlp_max_iter_with_int_sol 1000',\
'minlp_branch_method 1',\
'minlp_integer_leaves 2']
m.solve()
Edit: I have changed the writing of the last constraint as suggested by John Hedengren (bellow). However, with the insertion of the binary variable, the code now returns an error before starting any iterations. How can this be prevented?
You can use a binary variable (0=equipment off, 1=equipment on and above threshold) and equation as:
b = m.Array(m.Var,nj,integer=True,lb=0,ub=1)
iv = [None]*nj
for j in range(nj):
iv[j] = m.sum([pp[i][j]*x[i][j] for i in range(ni)])
m.Equation(iv[j] >= b[j]*c1[j])
m.Equation((1-b[j])*iv[j] <= 0)
m.options.SOLVER = 1 # Change to MINLP solver
You can split out the summation into an intermediate variable iv because it is used in two equations. Another recommendation is to use m.sum() instead of sum. Using the Gekko summation is typically faster. There are also other ways to pose the problem but this may be the most reliable. I can't verify this solution because your script is missing some inputs. It helps on future posts to reduce the problem to a Minimal and Reproducible example so that solutions can be verified. There is additional information on logical conditions in optimization problems.
Response to Edit
The MINLP does not converge quickly because there are nj x ni = 2040 binary variables. That is 2^2040 potential solutions. You can adjust solver settings to help it find at least one feasible solution.
m.options.SOLVER=3
m.solve() # sometimes it helps to solve with IPOPT first
m.options.SOLVER=1 # switch to APOPT
m.solver_options = ['minlp_gap_tol 1.0e-2',\
'minlp_maximum_iterations 10000',\
'minlp_max_iter_with_int_sol 500',\
'minlp_branch_method 1',\
'minlp_integer_leaves 2']
m.solve()
There is additional description on the solver options on the APOPT website.
Response to Edit
The error on the first MINLP iteration is because the problem is not feasible. If you switch to solver option minlp_as_nlp 1 then you can see the first NLP problem fail to converge. You can also see this with the IPOPT solver if you switch to m.options.SOLVER=3.
EXIT: Converged to a point of local infeasibility.
Problem may be infeasible.
If you solve locally with m=GEKKO(remote=False) and open the run folder before the solve command with m.open_folder() then you can see the infeasibilities.txt file that will help you identify the infeasible equation. I suspect that the infeasibility is because of the equations m.Equation(m.sum([x[i][j] for j in range(nj)]) >= qtde[i]) and m.Equation(x[i][j] == 0). You can also try to identify an infeasible problem with m.options.COLDSTART=2. There is additional help on troubleshooting applications in exercise 18 in the Gekko tutorials.

Simple Neural Network Value Error - shapes not aligned

Im a noob for this, but I started to learn Neural Networks.
I want to make simple NN with Python and Numpy. I have watched one tutorial on Youtube abut that, and I did everything the same, but I get an error:
output = sigmoid(np.dot(input_layer, weights))
ValueError: shapes (13,3) and (13,1) not aligned: 3 (dim 1) != 13 (dim 0)
I know that my output array should look like 1D array, but for some reason I cant get that.
What am I doing wrong
import numpy as np
import pandas as pd
df = pd.DataFrame({'input 1':[0.5, 0.3, 0, 0.1, 0.4, -0.4, 0.4, -0.1, -0.6, 0.2, 0.6, 0, 0.2],
'input 2':[0.3, 0.5, -0.4, -0.2, 0.9, 0, 0.35, -0.4, -0.9, 0.4, 0.3, -0.1, 0.1],
'input 3':[0, 0.4, 0, -0.1, 0.4, -0.2, 0.4, -0.3, -0.1, 0.1, 0.3, 0, 0.5],
'result':[1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1]})
print(df)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
features = np.array(df.iloc[:,:-1])
results = np.array(df.iloc[:,-1:]).T
np.random.seed(10)
weights = 2 * np.random.random((13,1)) - 1
print('These are my random weights:\n')
print(weights)
for iteration in range(1):
input_layer = features
output = sigmoid(np.dot(input_layer, weights))
print('\nOutput result:\n', output)
I have managed to find the result:
import numpy as np
import pandas as pd
df = pd.DataFrame({'input 1':[0.5, 0.3, 0, 0.1, 0.4, -0.4, 0.4, 0.1, -0.6, 0.2, 0.6, 0, 0.2, 0.2, -0.1, -0.1, 0, 0.4, -0.2, -0.4],
'input 2':[0.3, 0.6, -0.4, -0.2, 0.9, 0, 0.35, -0.4, -0.9, 0.4, 0.3, -0.1, 0.1, 0.3, 0.1, 0.1, 0.3, 0.1, 0.3, 0.3],
'input 3':[0, 0.4, 0, -0.1, 0.4, -0.2, 0.7, -0.3, -0.1, 0.1, 0.3, 0, 0.5, 0.4, -0.31, 0.1, 0.3, 0.1, 0.1, 0.2],
'result':[1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0]})
print(df)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivate(x):
return x * (1 - x)
features = df.iloc[:,:-1].to_numpy()
results = df.iloc[:,-1:].to_numpy()
np.random.seed(1)
weights = 2 * np.random.random((3,1)) - 1
print('These are my random weights:\n')
print(weights)
for iteration in range(100000):
input_layer = features
outputs = sigmoid(np.dot(input_layer, weights))
error = results - outputs
adjustments = error * sigmoid_derivate(outputs)
weights += np.dot(input_layer.T, adjustments)
df['output prediction'] = outputs.round(0)
print(df)

Tunning Neural Network made in Python with NumPy

I have wrote a code for neural network that uses sigmoid function. I made it with NumPy and Python.
Code works good, but now I want to tune it, to improve accuracy. How can I tune my NN, do I need to add some parameters, or to add hidden layers to it?
Is it even possible?
This is the code that I have:
import numpy as np
import pandas as pd
df = pd.DataFrame({'input 1':[0.5, 0.3, 0, 0.1, 0.4, -0.4, 0.4, 0.1, -0.6, 0.2, 0.6, 0, 0.2, 0.2, -0.1, -0.1, 0, 0.4, -0.2, -0.4],
'input 2':[0.3, 0.6, -0.4, -0.2, 0.9, 0, 0.35, -0.4, -0.9, 0.4, 0.3, -0.1, 0.1, 0.3, 0.1, 0.1, 0.3, 0.1, 0.3, 0.3],
'input 3':[0, 0.4, 0, -0.1, 0.4, -0.2, 0.7, -0.3, -0.1, 0.1, 0.3, 0, 0.5, 0.4, -0.31, 0.1, 0.3, 0.1, 0.1, 0.2],
'result':[1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0]})
print(df)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivate(x):
return x * (1 - x)
features = df.iloc[:,:-1].to_numpy()
results = df.iloc[:,-1:].to_numpy()
np.random.seed(1)
weights = 2 * np.random.random((3,1)) - 1
print('These are my random weights:\n')
print(weights)
for iteration in range(100000):
input_layer = features
outputs = sigmoid(np.dot(input_layer, weights))
error = results - outputs
adjustments = error * sigmoid_derivate(outputs)
weights += np.dot(input_layer.T, adjustments)
outputs = outputs.round(0).tolist()
outputs = list(itertools.chain(*outputs))
outputs.insert(0,'None')
df['output prediction'] = outputs
print(df)
df1 = df.tail(len(df)-1)
#print(df1)
acc = 0
for i, j in zip(df1['result'] ,df1['output prediction']):
if i == j:
acc += 1
accuracy = round(acc * 100 /len(df1), 2)
print(accuracy)
I think that I it should be added below part where I define weights, but Im not sure.
Thanks for your help!
import numpy as np
import pandas as pd
df = pd.DataFrame({'input 1':[0.5, 0.3, 0, 0.1, 0.4, -0.4, 0.4, 0.1, -0.6, 0.2, 0.6, 0, 0.2, 0.2, -0.1, -0.1, 0, 0.4, -0.2, -0.4],
'input 2':[0.3, 0.6, -0.4, -0.2, 0.9, 0, 0.35, -0.4, -0.9, 0.4, 0.3, -0.1, 0.1, 0.3, 0.1, 0.1, 0.3, 0.1, 0.3, 0.3],
'input 3':[0, 0.4, 0, -0.1, 0.4, -0.2, 0.7, -0.3, -0.1, 0.1, 0.3, 0, 0.5, 0.4, -0.31, 0.1, 0.3, 0.1, 0.1, 0.2],
'result':[1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0]})
print(df)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivate(x):
return x * (1 - x)
alpha=0.1#define alpha
features = df.iloc[:,:-1]
results = df.iloc[:,-1:]
features=np.array(features)
results=np.array(results)
np.random.seed(1)
weight0 = 2*np.random.random((3,10)) - 1 #3 - number of features; 10 - number of nodes in hidden layer
weight1 = 2*np.random.random((10,4)) - 1 #10 - number of nodes in hidden layer; 4 - number of nodes in output layer
weight2 = 2*np.random.random((4,1)) - 1 #4 - number of nodes in output layer; 1 - number of labels
# you can change layer's nodes, but they must be able to make dot product. For example (320,160) and (160,40)
for iteration in range(1000):
l0 = features
l1 = sigmoid(np.dot(l0,weight0))
l2 = sigmoid(np.dot(l1,weight1))
l3 = sigmoid(np.dot(l2,weight2))
l3_error = results - l3
print ("Error after "+str(iteration)+" iterations:" + str(np.mean(np.abs(l3_error))))
l3_delta = l3_error*sigmoid_derivate(l3)
l2_error = l3_delta.dot(weight2.T)
l2_delta = l2_error * sigmoid_derivate(l2)
l1_error = l2_delta.dot(weight1.T)
l1_delta = l1_error * sigmoid_derivate(l1)
weight2 += alpha*l2.T.dot(l3_delta)
weight1 += alpha*l1.T.dot(l2_delta)
weight0 += alpha*l0.T.dot(l1_delta)
Here is your code with 1 input, 1 hidden and 1 output layers.

Why is Scipy's ndimage.map_coordinates returning no values or wrong results for some arrays?

Code Returning Correct value but not always returning a value
In the following code, python is returning the correct interpolated value for arr_b but not for arr_a.
Event though, I've been looking at this problem for about a day now, I really am not sure what's going on.
For some reason, for arr_a, twoD_interpolate keeps returning [0] even if I play around or mess around with the data and input.
How can I fix my code so it's actually interpolating over arr_a and returning the correct results?
import numpy as np
from scipy.ndimage import map_coordinates
def twoD_interpolate(arr, xmin, xmax, ymin, ymax, x1, y1):
"""
interpolate in two dimensions with "hard edges"
"""
ny, nx = arr.shape # Note the order of ny and xy
x1 = np.atleast_1d(x1)
y1 = np.atleast_1d(y1)
# Mask upper and lower boundaries using #Jamies suggestion
np.clip(x1, xmin, xmax, out=x1)
np.clip(y1, ymin, ymax, out=y1)
# Change coordinates to match your array.
x1 = (x1 - xmin) * (xmax - xmin) / float(nx - 1)
y1 = (y1 - ymin) * (ymax - ymin) / float(ny - 1)
# order=1 is required to return your examples.
return map_coordinates(arr, np.vstack((y1, x1)), order=1)
# test data
arr_a = np.array([[0.7, 1.7, 2.5, 2.8, 2.9],
[1.9, 2.9, 3.7, 4.0, 4.2],
[1.4, 2.0, 2.5, 2.7, 3.9],
[1.1, 1.3, 1.6, 1.9, 2.0],
[0.6, 0.9, 1.1, 1.3, 1.4],
[0.6, 0.7, 0.9, 1.1, 1.2],
[0.5, 0.7, 0.9, 0.9, 1.1],
[0.5, 0.6, 0.7, 0.7, 0.9],
[0.5, 0.6, 0.6, 0.6, 0.7]])
arr_b = np.array([[6.4, 5.60, 4.8, 4.15, 3.5, 2.85, 2.2],
[5.3, 4.50, 3.7, 3.05, 2.4, 1.75, 1.1],
[4.7, 3.85, 3.0, 2.35, 1.7, 1.05, 0.4],
[4.2, 3.40, 2.6, 1.95, 1.3, 0.65, 0.0]])
# Test the second array
print twoD_interpolate(arr_b, 0, 6, 9, 12, 4, 11)
# Test first area
print twoD_interpolate(
arr_a, 0, 500, 0, 2000, 0, 2000)
print arr_a[0]
print twoD_interpolate(
arr_a_60, 0, 500, 0, 2000, 0, 2000)[0]
print twoD_interpolate(
arr_a, 20, 100, 100, 1600, 902, 50)
print twoD_interpolate(
arr_a, 100, 1600, 20, 100, 902, 50)
print twoD_interpolate(
arr_a, 100, 1600, 20, 100, 50, 902)
## Output
[ 1.7]
[ 0.]
[ 0.7 1.7 2.5 2.8 2.9]
0.0
[ 0.]
[ 0.]
[ 0.]
Code returning incorrect value:
arr = np.array([[12.8, 20.0, 23.8, 26.2, 27.4, 28.6],
[10.0, 13.6, 15.8, 17.4, 18.2, 18.8],
[5.5, 7.7, 8.7, 9.5, 10.1, 10.3],
[3.3, 4.7, 5.1, 5.5, 5.7, 6.1]])
twoD_interpolate(arr, 0, 1, 1400, 3200, 0.5, 1684)
# above should return 21 but is returning 3.44
This is actually my fault in the original question.
If we examine the position it is trying to interpolate twoD_interpolate(arr, 0, 1, 1400, 3200, 0.5, 1684) we get arr[ 170400, 0.1] as the value to find which will be clipped by mode='nearest' to arr[ -1 , 0.1]. Note I switched the x and y to get the positions as it would appear in an array.
This corresponds to a interpolation from the values arr[-1,0] = 3.3 and arr[-1,1] = 4.7 so the interpolation looks like 3.3 * .9 + 4.7 * .1 = 3.44.
The issues comes in the stride. If we take an array that goes from 50 to 250:
>>> a=np.arange(50,300,50)
>>> a
array([ 50, 100, 150, 200, 250])
>>> stride=float(a.max()-a.min())/(a.shape[0]-1)
>>> stride
50.0
>>> (75-a.min()) * stride
1250.0 #Not what we want!
>>> (75-a.min()) / stride
0.5 #There we go
>>> (175-a.min()) / stride
2.5 #Looks good
We can view this using map_coordinates:
#Input array from the above.
print map_coordinates(arr, np.array([[.5,2.5,1250]]), order=1, mode='nearest')
[ 75 175 250] #First two are correct, last is incorrect.
So what we really need is (x-xmin) / stride, for previous examples the stride was 1 so it did not matter.
Here is what the code should be:
def twoD_interpolate(arr, xmin, xmax, ymin, ymax, x1, y1):
"""
interpolate in two dimensions with "hard edges"
"""
arr = np.atleast_2d(arr)
ny, nx = arr.shape # Note the order of ny and xy
x1 = np.atleast_1d(x1)
y1 = np.atleast_1d(y1)
# Change coordinates to match your array.
if nx==1:
x1 = np.zeros_like(x1.shape)
else:
x_stride = (xmax-xmin)/float(nx-1)
x1 = (x1 - xmin) / x_stride
if ny==1:
y1 = np.zeros_like(y1.shape)
else:
y_stride = (ymax-ymin)/float(ny-1)
y1 = (y1 - ymin) / y_stride
# order=1 is required to return your examples and mode=nearest prevents the need of clip.
return map_coordinates(arr, np.vstack((y1, x1)), order=1, mode='nearest')
Note that clip is not required with mode='nearest'.
print twoD_interpolate(arr, 0, 1, 1400, 3200, 0.5, 1684)
[ 21.024]
print twoD_interpolate(arr, 0, 1, 1400, 3200, 0, 50000)
[ 3.3]
print twoD_interpolate(arr, 0, 1, 1400, 3200, .5, 50000)
[ 5.3]
Checking for arrays that are either 1D or pseudo 1D. Will interpolate the x dimension only unless the input array is of the proper shape:
arr = np.arange(50,300,50)
print twoD_interpolate(arr, 50, 250, 0, 5, 75, 0)
[75]
arr = np.arange(50,300,50)[None,:]
print twoD_interpolate(arr, 50, 250, 0, 5, 75, 0)
[75]
arr = np.arange(50,300,50)
print twoD_interpolate(arr, 0, 5, 50, 250, 0, 75)
[50] #Still interpolates the `x` dimension.
arr = np.arange(50,300,50)[:,None]
print twoD_interpolate(arr, 0, 5, 50, 250, 0, 75)
[75]

Categories

Resources