How can I test linearity (superposition) & shift-invariance in Python? - python

I'm new to Python & writing a program that takes a function:
X = np.linspace(0,50)
F1 = np.sin(X)
Tests whether the function is linear (as in, exhibits superposition):
for i in range(1,10):
LT1 = i*(F1)
X = i*X
LT2 = F1
if np.all(LT1) == np.all(LT2):
Linear = 'This function is linear.'
elif np.all(LT1) != np.all(LT2):
Linear = 'This function is nonlinear.'
break
And tests whether the function is shift-invariant:
for j in range(1,10):
SI1 = (F1)-j
X = X-j
SI2 = F1
if np.all(SI1) == np.all(SI2):
SI = 'This function is shift-invariant.'
elif np.all(SI1) != np.all(SI2):
SI = 'This function is shift-variant.'
break
But my code calls all functions, LSI or not, linear & shift-variant. Is there a better way to run these tests? I've seen linear regression offered as a means of testing linearity, but upon trying it with a sine function it misclassified the function as nonlinear. I have also been unable to find any guidance whatsoever on testing shift-invariance.

Lets first define all the required functionality
import numpy as np
def check_linearity(T, X, a, b):
# T[a*x+b*x] = a*T[x] + b*T[x]
LHS = T(a*x + b*x)
RHS = a*T(x) + b*T(x)
tolerence = 1e-4
if np.sum(LHS-RHS) < tolerence:
print('Linear System')
return True
else:
print('Not a Linear System')
return False
def check_shift_invariance(T, X, tau):
# T[X] = T[X-tau]
LHS = T(X)
RHS = T(X-tau)
tolerence = 1e-4
if np.sum(LHS-RHS) < tolerence:
print('Shift Invariant System')
return True
else:
print('Not a Shift Invariant System')
return False
def check_LSI(T, X, a, b, tau):
flag1 = check_linearity(T, X, a, b)
flag2 = check_shift_invariance(T, X, tau)
if flag1== True and flag2==True:
print('LSI system')
else:
print('not a LSI system')
Next, we define signal
# Signal X in range [-1,1]
X = np.linspace(-1,1,10)
The, define System
# Transformation T
T = lambda x: np.sin(x)
Now lets see everything we defined in action
a = 1
b = 1
tau = 2*np.pi
# Check Linearity
check_linearity(T, X, a, b);
# Check Shift Invariance
check_shift_invariance(T, X, tau);
# Check LSI or not
check_LSI(T, X, a, b, tau);
You can easily define other systems like,
T = lambda x: x
T = lambda x: np.sin(x) + np.cos(x)
T = lambda x: x**2 + x + 2
and so on

#Ragnar provided a very nice mathematical solution, but I want to post a short one.
If your function is given by the values which are equispaced, then
print(all(abs(x) < 0.001 for x in np.diff(function, n=2)))
returns True if the function is linear and False otherwise.
The idea is that each time, the function is incremented by the same value (here I use that X is equispaced). Therefore, taking the difference of consecutive numbers should return the array with all identical entries. Taking the difference of the consecutive numbers again returns all zeros if the function is linear.

Related

Python type error: 'numpy.ndarray' object is not callable--how to fix?

This is my code. When I run it, I get an issue with the python error: 'numpy.ndarray' object is not callable. I think that the issue is because I call V_function_prime in the functions for find_k, find_w, but I don't know how else to do this
import numpy as np
import copy as cp
import scipy as sp
from scipy import optimize as optimize
from scipy.interpolate import PchipInterpolator as pchip
#primatives
#beta R <1 means that the
beta = .8
R = 1.02
phi = .8
#define a grid
size = 100
w_grid = np.linspace(0.001,5,num = size)
#set up functions
def utility(c):
return np.log(c)
def u_prime(c):
return 1/c
def production(k):
return k**(1/3)
def production_prime(k):
return 1/3*k**(-2/3)
def production_2prime(k):
return (-2/9)*k**(-5/3)
def inv_prod_prime(x):
return (3*x)**(-2/3)
#define functions to get threshold value wbar and optimal policy b, k
def find_w(V_function_prime, k_star, capital_evolution):
w_bar = (1-phi)*k_star + 1/(beta*R*V_function_prime(capital_evolution))
return w_bar
#find_w(phi, R)
#takes in value w and current guess of v_prime and returns optimal bond choice (b)
def find_b(b, w, v_function_prime, k_star):
foc = u_prime(w - b - k_star) - beta*R*v_function_prime(production(k_star) + R*b)
return foc
#takes in value w and current guess of v_prime and returns optimal capital choice (k)
def find_k(k, w, v_function_prime):
foc = (1-phi)*u_prime(w - (1-phi)*k) - beta*v_function_prime(production(k) - R*phi*k)(production_prime(k)-R*phi)
return foc
#value function iteration function
def vfi(R, phi, beta, size, tol):
#use known info ab optimum--add explanation here
k_star = inv_prod_prime(R)
capital_evolution = production(k_star)-R*phi*k_star
#inital guess of value function is utility
VV = utility(w_grid)
#V_prime = u_prime(w_grid)
#params of loop
err = tol + 1
epsilon = 1e-5
while err > tol:
V_previous = cp.copy(VV)
V_function = pchip(w_grid, VV)
#V_w is value function evaluated at w_grid
V_w = V_function(w_grid)
V_function_prime = V_function.derivative(1)
V_prime_w = V_function_prime(w_grid)
w_bar = find_w(V_function_prime, k_star, capital_evolution)
k_prime = np.zeros(size)
b_prime = np.zeros(size)
for i in range(size):
#solve unconstrained region of state-space
if w_grid[i] >= w_bar:
k_choice = k_star
#limits set based on natural bounds for borrowing given in the SP
b_choice = optimize.brentq(find_b, (-phi*k_star), (w_grid[i] - k_star - epsilon), args = (w_grid[i], V_function_prime, k_star))
#solve constrained region of state-space
else:
bound = w_grid[i]/(1-phi) - epsilon
k_choice = optimize.brentq(find_k, (epsilon), (bound), args = (w_grid[i], V_function_prime))
b_choice = -phi*k_choice
#add in new guesses for optimal b, k, and update value function vector
k_prime[i] = k_choice
b_prime[i] = b_choice
VV[i] = utility(w_grid[i] - b_prime[i] - k_prime[i]) + beta*V_function(production(k_prime[i]) + R*b_prime[i])
V_function_update = pchip(w_grid, VV)
err = np.max(np.abs(V_function_update(w_grid) - V_w))
print(err)
V_function = V_function_update
return V_function, b_prime, k_prime
vfi(R, phi, beta, size = 100, tol = 1e-3)
I know this happens bc I have a function V_function_prime that I am passing into another function, but I'm not quite sure how to solve this
#takes in value w and current guess of v_prime and returns optimal capital choice (k)
def find_k(k, w, v_function_prime):
foc = (1-phi)*u_prime(w - (1-phi)*k) - beta*v_function_prime(production(k) - R*phi*k)(production_prime(k)-R*phi) ## Two times repeated function call
return foc
Check above block. v_function_prime(**args) will return an array. You are calling it again with some other arguments.
You might need to remove one of them.

Locally bind functions within lambda's

Is there a way to locally bind functions within lambdas? I have a loop, and within each loop, I create an array of functions. I want to create another function which is the sum of all of these functions and store it in another array. This new array should hold the sum of all functions for each loop.
Then I want to create another function that is the sum over all the sum of functions.
However, the problem I have is that the original functions keep updating, so I am not getting my desired result. Can I locally bind the functions? Am I approaching this the wrong way?
import numpy as np
lmax = 4
lapprox = []
# Function to estimate
def curve_func(x):
return np.sin(x*np.pi)*x**2
# Initialise residual function
def residual_func(x):
return curve_func(x)
# For each l, create 2**l+1 nodes and determine new nodes.
for l in range(lmax):
nodes = np.linspace(0, 1, 2**l+1, endpoint = True)
if (l==0):
old_nodes = nodes
new_nodes = nodes
else:
old_nodes = np.linspace(0, 1, 2**(l-1)+1, endpoint = True)
new_nodes = [x for x in nodes if x not in old_nodes]
# Create basis function corresponding to each new node
if (l==0):
phi = [lambda x, i=i: max(1 - abs(2**l * x - i), 0) for i in range(len(new_nodes))]
else:
phi = [lambda x, i=i: max(1 - abs(2**l * x - (2*i+1)), 0) for i in range(len(new_nodes))]
# Calculate hierarchical surpluses
coeff = [residual_func(n) for n in new_nodes]
# Array of functions: coeff*phi
coeff_phi = [lambda x, func=func, alpha=alpha: coeff[alpha]*func(x) for alpha, func in enumerate(phi)]
# Array of length lmax, where each value is sum of functions in coeff_phi for fixed l
lapprox.append(lambda x: sum(f(x) for f in coeff_phi))
# Sum of all functions in lapprox
totapprox = lambda x: sum(f(x) for f in lapprox)
# Compute new residual function
residual_func = lambda x: curve_func(x) - totapprox(x)
Extra detail on what the code is doing: The code is designed to approximate a function, such as sin(pi*x)*x^2 using hierarchical linear splines. For each level l, there are some basis functions, given by the array phi. The function is approximated using a linear combination of some coefficients multiplied by these basis functions. The approximation is done sequentially, starting from a low-level, with few basis functions, until a high-level, with many basis functions. I need to keep track of the rolling approximation of the function to determine the values of the new coefficients.
Edit 2: I've defined the functions outside the loop. However, I am struggling in working out how to create a function to keep track of the residual_function. I have attached the 'dirty' solution that works as intended for lmax=3, but I would like to generalise it for any lmax. How can I do that?
def curve_func(x):
return np.sin(x*np.pi)*x**2
def residual_func_0(x):
return curve_func(x)
# Define nodes function
def make_nodes(l):
return np.linspace(0, 1, 2**l+1, endpoint = True)
# Define new_nodes function
def make_new_nodes(l):
if (l==0):
new_nodes = np.linspace(0, 1, 2**l+1, endpoint = True)
else:
old_nodes = np.linspace(0, 1, 2**(l-1)+1, endpoint = True)
new_nodes = [x for x in make_nodes(l) if x not in old_nodes]
return new_nodes
# Define basis functions
def make_basis(l, i):
if l == 0:
return lambda x: max(1 - abs(2**l * x - i), 0)
else:
return lambda x: max(1 - abs(2**l * x - (2*i+1)), 0)
# Define coeff*basis functions
def make_scaled_basis(alpha, fn):
return lambda x: alpha * fn(x)
new_nodes_0 = make_new_nodes(0)
new_nodes_1 = make_new_nodes(1)
new_nodes_2 = make_new_nodes(2)
new_nodes_3 = make_new_nodes(3)
phi_0 = [make_basis(0, i) for i in range(len(new_nodes_0))]
phi_1 = [make_basis(1, i) for i in range(len(new_nodes_1))]
phi_2 = [make_basis(2, i) for i in range(len(new_nodes_2))]
phi_3 = [make_basis(3, i) for i in range(len(new_nodes_3))]
coeff_0 = [curve_func(n) for n in new_nodes_0]
coeff_phi_0 = [make_scaled_basis(alpha, fn) for alpha, fn in zip(coeff_0, phi_0)]
residual_func_0 = lambda x: curve_func(x) - sum(f(x) for f in coeff_phi_0)
coeff_1 = [residual_func_0(n) for n in new_nodes_1]
coeff_phi_1 = [make_scaled_basis(alpha, fn) for alpha, fn in zip(coeff_1, phi_1)]
residual_func_1 = lambda x: residual_func_0(x) - sum(f(x) for f in coeff_phi_1)
coeff_2 = [residual_func_1(n) for n in new_nodes_2]
coeff_phi_2 = [make_scaled_basis(alpha, fn) for alpha, fn in zip(coeff_2, phi_2)]
residual_func_2 = lambda x: residual_func_1(x) - sum(f(x) for f in coeff_phi_2)
coeff_3 = [residual_func_2(n) for n in new_nodes_3]
coeff_phi_3 = [make_scaled_basis(alpha, fn) for alpha, fn in zip(coeff_3, phi_3)]
residual_func_3 = lambda x: residual_func_2(x) - sum(f(x) for f in coeff_phi_3)
A simple way to localize both l and i in the body of a function is to create a function that returns a function which closes over the local variables l and i.
For example:
def make_basis(l, i):
if l == 0:
return lambda x: max(1 - abs(2**l * x - i), 0)
else:
return lambda x: max(1 - abs(2**l * x - (2*i+1)), 0)
...
for l in range(lmax):
...
phi = [make_basis(l, i) for i in range(len(new_nodes))]
Loops do not create new scopes; only function bodies do.

Writing lambda in MATLAB

I am trying to convert this code into MATLAB but I am not sure how to do the subscripts (Y[i] = Y[i-1]) as well as the func and f_exact variables
heres the code:
def Forward_Euler(y0,t0,T,dt,f):
t = np.arange(t0,T+dt,dt)
Y = np.zeros(len(t))
Y[0] = y0
for i in range(1,len(t)):
Y[i] = Y[i-1]+dt*f(Y[i-1], t[i-1])
return Y, t
func = lambda y,t: y-t
f_exact = lambda t: t+1-1/2*np.exp(t)
You can use anonymous functions in matlab:
func = #(y,t)(y - t)
f_exact = #(t)(t + 1 - exp(t)/2) % it works with any matrix t as well
And you can use for matrices as well (they should keep matrix operation rules). For example, in func function, as there is a minus in the form of function, the dimension of y and t must be the same.

Find two zeros of a function with Python

I have a function f(x) which I know has two zeros within an interval and I need to compute both x values for wich the function cross 0.
I usually use
import scipy.optimize as opt
opt.brentq(f, xmin, xmax)
But the problem is this method is working if the function has one 0 in the interval, and it is not very simple to know where to divide in two parts.
The function is also time costly to evaluate...
I think a good approach would be to pre-process the search of the zeros by sampling f before searching for the zeros. During that pre-process, you evaluate f to detect if the sign of the function has changed.
def preprocess(f,xmin,xmax,step):
first_sign = f(xmin) > 0 # True if f(xmin) > 0, otherwise False
x = xmin + step
while x <= xmax: # This loop detects when the function changes its sign
fstep = f(x)
if first_sign and fstep < 0:
return x
elif not(first_sign) and fstep > 0:
return x
x += step
return x # If you ever reach here, that means that there isn't a zero in the function !!!
With this function , you can separate your initial interval in several smaller intervals. For example :
import scipy.optimize as opt
step = ...
xmid = preprocess(f,xmin,max,step)
z0 = opt.brentq(f,xmin,xmid)
z1 = opt.brentq(f,xmid,xmax)
Depending of the functions f you use, you may need to separate your interval in more than two sub-intervals. Just iterates through [xmin,xmax] like this :
x_list = []
x = x_min
while x < xmax: # This discovers when f changes its sign
x_list.append(x)
x = preprocess(f,x,xmax,step)
x_list.append(xmax)
z_list = []
for i in range(len(x_list) - 1):
z_list.append(opt.brentq(f,x_list[i],x_list[i + 1]))
In the end, z_list contains all the zeros in the given interval [xmin,xmax].
Keep in mind that this algorithm is time-consuming but will do the job.

having trouble using scipy.optimize.leastsq

everything else works fine but when I use the leasesq function the pydev editor have an error that says Undefined variable from import: leastsq what is going on here?
the code is the MIT's python cost model timing.py at the url: http://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-006-introduction-to-algorithms-fall-2011/readings/python-cost-model/timing.py
and the leastsq part is in the function:
def fit2(A,b):
""" Relative error minimizer """
def f(x):
assert len(x) == len(A[0])
resids = []
for i in range(len(A)):
sum = 0.0
for j in range(len(A[0])):
sum += A[i][j]*x[j]
relative_error = (sum-b[i])/b[i]
resids.append(relative_error)
return resids
ans = scipy.optimize.leastsq(f,[0.0]*len(A[0]))
# print "ans:",ans
if len(A[0])==1:
x = [ans[0]]
else:
x = ans[0]
resids = sum([r*r for r in f(x)])
return (x,resids,0,0)
It seems to me that you're giving the LSQ-function two keyword arguments, while it requires three. You're supplying it with the function, the initial values, but not with the actual values over which the LSQ is to be made?
Instead of hard-coding the calculation of the residuals try just wrapping the residuals as a function which is the difference between the data values and the function to minimize:
for example, just fitting a gaussian function to some data set:
M = np.array(data) # your data as a Nx2 Matrix of (x, y) data points
initials = [3,2,1] # just some initial guess values
def gaussian(x, p):
return p[0]*np.exp((-(x-p[1])**2.0)/p[2]**2.0) # definition of the function
def residuals(p, y, x):
return y - gaussian(x, p) # definition of the residual
cnsts = leastsq(residuals, initials, args=(M[:,1], M[:,0]))[0] # outputs optimized initials

Categories

Resources