Object values not being reset in python function - python

Here is my code. In the calculateOptimalLambda() function, I am attempting to declare a copy of n and
store it as m, remove one point from m, and make some calculations and a graph. Then, the loop should
restart, make a fresh copy of m, remove the next point, and so on.
However, when in the next iteration
of the loop, a point has been removed. Eventually, I run out of points to remove, and I get an error.
How do I declare a fresh copy of m so I can remove the next point?
import numpy as np
from matplotlib import pyplot as plt
class Data:
def __init__(self, points, sigma, lamda):
self.points = points
self.sigma = sigma
self.sample = np.random.uniform(-1,1, (points, 2))
self.transformedData = np.ones((points, 5))
self.weight = np.zeros((5,1))
self.lamda = lamda
def changeLamda(self,x):
self.lamda = x
def removePoint(self, x):
self.points = self.points - 1
self.sample = np.delete(self.sample, x, 0)
self.transformedData = np.delete(self.transformedData, x, 0)
def transformedFunction(self, x):
transformedData = np.ones((1, 5))
transformedData[0,1] = x
transformedData[0,2] = 0.5 * (3*x**2 -1)
transformedData[0,3]= 0.5 * (5*x**3 - 3*x)
transformedData[0,4] = 0.125 * (35*x**4 -30*x**2 + 3)
return np.dot(transformedData, self.weight)
def setY(self):
for i in range(len(self.sample[0:,0])):
self.sample[i,1] = np.random.normal(0, self.sigma) + self.sample[i,0]**2
def transform(self):
for i in range(len(self.sample[0:,0])):
self.transformedData[i,1] = self.sample[i,0]
self.transformedData[i,2] = 0.5 * (3*self.sample[i,0]**2 -1)
self.transformedData[i,3]= 0.5 * (5*self.sample[i,0]**3 - 3*self.sample[i,0])
self.transformedData[i,4] = 0.125 * (35*self.sample[i,0]**4 -30*self.sample[i,0]**2 + 3)
def calculateWeight(self):
z = n.transformedData
zProd = np.linalg.inv(np.matmul(np.transpose(z), z) + np.identity(5)*self.lamda)
next1 = np.matmul(zProd,np.transpose(z))
a = self.sample[0:,1]
a = a.reshape((-1, 1))
print(a)
self.weight = np.matmul(next1,a)
def calculateError(self):
error= (np.matmul(self.transformedData, self.weight) - self.sample[1,0:])
return error/self.points
def calculateOptimalLambda(n, L):
a = 0
for i in range(len(L)):
n.changeLamda(L[i])
for x in range(n.getPoints()):
a+=1
plt.subplot(4,5,a)
m = n
m.removePoint(x)
m.calculateWeight()
weight = m.getWeight()
error = m.calculateError()
twoD_plot(m)
print(error)
def twoD_plot(n):
t = np.linspace(-1, 1, 400)
x = np.square(t)
plt.plot(t,x,'b')
error = 0
y = x
for i in range(len(t)):
y[i] = n.transformedFunction(t[i])
error += (y[i] - t[i]**2)**2
"""print(error/len(t))"""
plt.plot(t,y,'r')
plt.scatter(n.getSample()[0:,0],n.getSample()[0:,1], c = 'g', marker = 'o')
n = Data(5,0.1,0)
n.setY()
n.transform()
n.calculateWeight()
L = [1, 0.01, 0.00001, 0]
calculateOptimalLambda(n, L)
plt.show()

Related

Kalman Filter giving weird results

I am learning Kalman Filters and tried to implement the following problem in Python
( problem statement description)
My approach :
I modeled it as a system with no control input and x, theta as the state vector. Then I followed the formulation (truck example) here, but I am getting bizarre results in my code. I feel like my choice of state vector might be incorrect but I am unsure.
Can someone please help me with the state vectors and other matrices required for evaluation?
from numpy import identity
from numpy.linalg import inv
import numpy as np
import matplotlib.pyplot as plt
class KalmanFilter:
def __init__(self, state_estimate, error_cov, transition_model,
control_model, obser_model, process_cov, obser_cov):
self.state_estimate = state_estimate
self.error_cov = error_cov
self.transition_model = transition_model
self.control_model = control_model
self.obser_model = obser_model
self.process_cov = process_cov
self.obser_cov = obser_cov
def predict(self, control_input):
self.state_estimate = self.transition_model * self.state_estimate \
+ self.control_model * control_input
self.error_cov = self.process_cov \
+ self.transition_model * self.error_cov * self.transition_model.T
return self.state_estimate
def update(self, obser):
innovation = obser - self.obser_model * self.state_estimate
innovation_cov = self.obser_cov \
+ self.obser_model * self.error_cov * self.obser_model.T
kalman_gain = self.error_cov * self.obser_model.T * inv(innovation_cov)
self.state_estimate = self.state_estimate + kalman_gain * innovation
n = self.error_cov.shape[0]
self.error_cov = (identity(n) - kalman_gain * self.obser_model) \
* self.error_cov
return self.state_estimate
class Process:
acc_std_dev = 1
obs_std_dev = (0.010)*(0.010)
def __init__(self, initial_state, transition_model):
self.state = initial_state
self.transition_model = transition_model
def update(self):
rand_acc = np.random.normal(0, Process.acc_std_dev)
G = np.array([[0.25, 1]]).T
self.state = self.transition_model * self.state + G * rand_acc
return self.state
def observe(self):
return self.state[0, 0] + np.random.normal(0, Process.obs_std_dev)
if __name__ == '__main__':
# delta time
dt = 1
initial_state = np.array([[0, 0]]).T
# Assuming perfect initial state estimate
error_cov = np.matrix([[0, 0],
[0, 0]])
transition_model = np.matrix([[1, 0],
[0, (1/200)]])
# Assuming no input control
control_model = np.array([[0, 0]]).T
# Making observations of the position
obser_model = np.matrix([[1, 0]])
G = np.array([[1, 0]]).T
process_cov = G * G.T * Process.acc_std_dev
kf = KalmanFilter(initial_state, error_cov, transition_model,
control_model, obser_model, process_cov,
Process.obs_std_dev**2)
p = Process(initial_state, transition_model)
positions = list()
velocities = list()
observations = list()
positionEstimates = list()
velocityEstimates = list()
for i in range(0, 100):
state = p.update()
positions.append(state[0, 0])
velocities.append(state[1, 0])
obs = p.observe()
observations.append(obs)
kf.predict(np.array([[0, 0]]))
estimate = kf.update(obs)
positionEstimates.append(kf.update(obs)[0, 0])
velocityEstimates.append(kf.update(obs)[1, 0])
xs = np.arange(len(observations))
# Position - State, Observations, Estimate
pStates, = plt.plot(xs, positions, color='k')
pObs, = plt.plot(xs, observations, color='r')
pEstimates, = plt.plot(xs, positionEstimates, color='g')
plt.legend((pObs, pStates, pEstimates),
('observations', 'true state', 'estimates'))
plt.title("Kalman Filter - Position Observations vs Estimates")
plt.ylabel('x', rotation=90)
plt.xlabel('t')
plt.show()
# Velocity - State, Estimate
pStates, = plt.plot(xs, velocities, color='k')
pEstimates, = plt.plot(xs, velocityEstimates, color='g')
plt.legend((pStates, pEstimates),
('true state', 'estimates'))
plt.title("Kalman Filter - Velocity")
plt.ylabel('ẋ', rotation=90)
plt.xlabel('t')
plt.show()

Logistic regression - strange behaviour of the decision boundary when additional parameters are added

I am trying to build a logistic regression model for a dataset consisting of two parameters
x1 and x2, but instead of analyzing just the two of them, I have added their squares as well - x12, x22 and x1· x2.
At the first glance everything looks fine and the error function is decreasing, but whilist drawing the plot of the decision boundary I have noticed, that after circa 500 iterations something strange happens to it.
Here is an animation of the error function as a function of iterations and a respective plot of the decision boundary:
Now,I interpret the decision boundary as a quadratic function
x2=f(x1), where
the relation between both parameters is given like this:
0.5 = θ0 + θ1x1 + θ2x2 + θ3x12 + θ4x1x2
+ θ5x22
Here is the python code I use to do everything:
#!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
from math import log
from matplotlib.animation import FuncAnimation
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def loadData(filepath):
source=""
try:
f = open(filepath, "r")
source = f.read()
f.close()
except IOError:
print("Error while reading file (" + filepath + ")")
return ""
raw_data = source.split("\n")
raw_data = [x.split(",") for x in raw_data if x !=""]
raw_data = np.matrix(raw_data).astype(float)
return (raw_data[:,:np.size(raw_data,1)-1], raw_data[:,np.size(raw_data, 1)-1:])
def standardize(dataset, skipfirst=True):
means = np.amin(dataset, 0)
deviation = np.std(dataset, 0)
if skipfirst:
dataset[:,1:] -= means[:,1:]
dataset[:,1:] /= deviation[:,1:]
return dataset
else:
dataset -= means
dataset /= deviation
return dataset
def error(X, Y, Theta):
"Calculates error values"
v_sigm = np.vectorize(sigmoid)
h_x = X # Theta
sigmo = v_sigm(h_x)
partial_vect = (Y-1).T # np.log(1-sigmo) - Y.T # np.log(sigmo)
return 1/(2*np.size(Y, axis=0))*np.sum(partial_vect)
def gradientStep(X, Y, Theta, LR):
"Returns new theta Values"
v_sigm = np.vectorize(sigmoid)
h_x = X # Theta
modif = -1*LR/np.size(Y, 0)*(h_x-Y)
sums = np.sum(modif.T # X, axis = 0)
return Theta + sums.T
X, Y = loadData("ex2data1.txt")
#add bias to X
X = np.append(np.ones((np.size(X, 0), 1)), X, axis=1)
added_params = [[x[1]**2, x[1]*x[2], x[2]**2] for x in np.array(X)]
X = np.append(X, np.matrix(added_params), axis=1)
#standardize X
X = standardize(X)
#create vector of parameters
Theta=np.zeros((np.size(X, 1), 1))
iterations = 3000
Theta_vals = []
Error_vals = []
for i in range(0, iterations):
Theta_vals.append(np.asarray(Theta).flatten())
Error_vals.append(error(X, Y, Theta))
Theta = gradientStep(X, Y, Theta, 0.07)
#CALCULATING FINISHES HERE
#plot data:
fig = plt.figure()
def_ax = fig.add_subplot(211)
def_ax.set_xlim(np.amin(X[:,1:2]), np.amax(X[:,1:2]))
def_ax.set_ylim(np.amin(X[:,2:3]), np.amax(X[:,2:3]))
err_ax = fig.add_subplot(212)
err_ax.set_ylim(0, error(X, Y, Theta))
err_ax.set_xlim(0, iterations)
positive_X1 = []
positive_X2 = []
negative_X1 = []
negative_X2 = []
for i in range(0, np.size(Y, 0)):
if(Y[i, 0] == 1):
positive_X1.append(X[i, 1])
positive_X2.append(X[i, 2])
else:
negative_X1.append(X[i, 1])
negative_X2.append(X[i, 2])
err_ax.set_ylim(np.amin(Error_vals), np.amax(Error_vals))
def animation(frame):
global Theta_vals, Error_vals, def_ax, err_ax, positive_X1, positive_X2, negative_X1, negative_X2
def_limX = def_ax.get_xlim()
def_limY = def_ax.get_ylim()
err_limX = err_ax.get_xlim()
err_limY = err_ax.get_ylim()
def_ax.clear()
err_ax.clear()
def_ax.set_xlim(def_limX)
def_ax.set_ylim(def_limY)
err_ax.set_xlim(err_limX)
err_ax.set_ylim(err_limY)
def_ax.scatter(positive_X1, positive_X2, marker="^")
def_ax.scatter(negative_X1, negative_X2, marker="o")
Theta = Theta_vals[frame]
res_x = np.linspace(*def_ax.get_xlim(), num=5)
delta_x = [(Theta[4]*x+Theta[2])**2-4*Theta[5]*(Theta[3]*x**2+Theta[1]*x+Theta[0]-0.5) for x in res_x]
delta_x = [np.sqrt(x) if x >= 0 else 0 for x in delta_x]
minb = [-(Theta[4]*x+Theta[2]) for x in res_x]
res_1 = []
res_2 = []
for i in range(0, len(res_x)):
if Theta[5] == 0:
res_1.append(0)
res_2.append(0)
else:
res_1.append((minb[i]+delta_x[i])/(2*Theta[5]))
res_2.append((minb[i]-+delta_x[i])/(2*Theta[5]))
def_ax.plot(res_x, res_1)
def_ax.plot(res_x, res_2)
err_x = np.linspace(0, frame, frame)
err_y = Error_vals[0:frame]
err_ax.plot(err_x, err_y)
anim = FuncAnimation(fig, animation, frames=iterations, interval=3, repeat_delay=2000)
print(error(X, Y, Theta))
anim.save("anim.mp4")
What could be the reason of such a strange behaviour?

Python - Parallelisation of a loop

I am trying to parallelize a loop that is very costly.
Here is the code:
import numpy as np
class em:
def __init__(self, k, x, iterations):
self.k = k
self.x = x
self.iterations = iterations
self.n = self.x.shape[0]
self.pi = np.array([1 / self.k for _ in range(self.k)])
self.z = np.ndarray(shape=(self.k, self.n))
def fit(self):
for i in range(self.iterations):
print('iteration', i)
self.expectation_step()
self.maximization_step()
def expectation_step(self):
# update z
pass
def maximization_step(self):
# update pi and parameters
pass
class bmm_em(em):
def __init__(self, k, x, iterations=1000, d=784):
super().__init__(k, x, iterations)
self.d = d
self.mu = np.random.rand(self.k, self.d)
for m in range(self.k):
normalization_factor = 0.0
for i in range(self.d):
self.mu[m,i] = np.random.random() * 0.5 + 0.25
normalization_factor += self.mu[m, i]
for i in range(self.d):
self.mu[m,i] /= normalization_factor
def expectation_step(self):
prod = np.zeros(self.k)
for n in range(self.n):
for m in range(self.k):
t = self.pi[m]
t *= np.prod(np.power(self.mu[m], self.x[n]))
t *= np.prod(np.power((1.0 - self.mu[m]), (1.0 - self.x[n])))
prod[m] = t
s = sum(prod)
for n in range(self.n):
for m in range(self.k):
if s > 0.0:
self.z[m,n] = prod[m] / s
else:
self.z[m,n] = prod[m] / float(self.k)
def maximization_step(self):
for m in range(self.k):
n_m = np.sum(self.z[m])
self.pi[m] = n_m / self.n # update pi
self.mu[m] = 0
for i in range(self.n):
self.mu[m] += self.z[m,i] * self.x[i].T
self.mu[m] /= n_m
The very costly part is the first loop in bmm_em.expectation_step.
I looked at the joblib module but couldn't figure out how I can rewrite my code to make it work.
Can anyone give me a hint? :)
As #Sergei noted, the use of numpy is preferred here.
Here is what my code became, it's way way faster
def _log_support(self):
pi = self.pi; mu = self.mu
log_support = np.ndarray(shape=(self.k, self.n))
for k in range(self.k):
log_support[k, :] = np.log(pi[k]) \
+ np.sum(self.x * np.log(mu[k, :].clip(min=1e-20)), 1) \
+ np.sum(self.xc * np.log((1 - mu[k, :]).clip(min=1e-20)), 1)
return log_support
def expectation_step(self, log_support):
log_normalisation = np.logaddexp.reduce(log_support, axis=0)
log_responsibilities = log_support - log_normalisation
self.z = np.exp(log_responsibilities)

What is wrong with my Implementation of 4th Order runge kutta in python for nonholonomic constraints?

I am trying to implement 4th order Runge Kutta for nonholonomic motion for car-like robots.
I don't know what I am doing wrong,essentially I am passing +-Pi/4 to calculate hard left and right turns to get different trajectories.
But no matter if I pass +pi/4 or -pi/4 to it, I get the same answer.
I cannot figure out what I am doing wrong.
The constraint equations that I am using are:
thetadot = (s/L)*tan(phi)
xdot = s*cos(theta)
ydot = s*sin(theta)
Where s is the speed and L is the length of the car like robot.
#! /usr/bin/env python
import sys, random, math, pygame
from pygame.locals import *
from math import sqrt,cos,sin,atan2,tan
import numpy as np
import matplotlib.pyplot as plt
XDIM = 640
YDIM = 480
WINSIZE = [XDIM, YDIM]
PHI = 45
s = 0.5
white = 255, 240, 200
black = 20, 20, 40
red = 255, 0, 0
green = 0, 255, 0
blue = 0, 0, 255
cyan = 0,255,255
pygame.init()
screen = pygame.display.set_mode(WINSIZE)
X = XDIM/2
Y = YDIM/2
THETA = 45
def main():
nodes = []
nodes.append(Node(XDIM/2.0,YDIM/2.0,0.0))
plt.plot(runge_kutta(nodes[0], (3.14/4))) #Hard Left turn
plt.plot(runge_kutta(nodes[0], 0)) #Straight ahead
plt.plot(runge_kutta(nodes[0], -(3.14/4))) #Hard Right turn
plt.show()
class Node:
x = 0
y = 0
theta = 0
distance=0
parent=None
def __init__(self,xcoord, ycoord, theta):
self.x = xcoord
self.y = ycoord
self.theta = theta
def rk4(f, x, y, n):
x0 = y0 = 0
vx = [0]*(n + 1)
vy = [0]*(n + 1)
h = 0.8
vx[0] = x = x0
vy[0] = y = y0
for i in range(1, n + 1):
k1 = h*f(x, y)
k2 = h*f(x + 0.5*h, y + 0.5*k1)
k3 = h*f(x + 0.5*h, y + 0.5*k2)
k4 = h*f(x + h, y + k3)
vx[i] = x = x0 + i*h
vy[i] = y = y + (k1 + k2 + k2 + k3 + k3 + k4)/6
print "1"
print vy
return vy
def fun1(x,y):
x = (0.5/2)*tan(y)
print "2"
print x
return x
def fun2(x,y):
x = 0.5*cos(y)
print "3"
print x
return x
def fun3(x,y):
x = 0.5*sin(y)
print "4"
print x
return x
def runge_kutta(p, phi):
x1 = p.x
y1 = p.y
theta1 = p.theta
fi = phi
for i in range(0,5):
x2 = rk4(fun2, x1, theta1, 5)
y2 = rk4(fun3, y1, theta1, 5)
theta2 = rk4(fun1, theta1 ,fi, 5)
theta1 = theta2
print "5"
print zip(x2,y2)
return zip(x2,y2)
# if python says run, then we should run
if __name__ == '__main__':
main()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
I can't really say much about the algorithm, but the way you set up our rk4 function, the x, and y arguments will never have any effect:
def rk4(f, x, y, n):
x0 = y0 = 0 # x0 and y0 will both be 0 after this
vx = [0]*(n + 1)
vy = [0]*(n + 1)
h = 0.8
vx[0] = x = x0 # now x will be 0
vy[0] = y = y0 # and y will be 0 too
...
The rest of the function will use x=0 and y=0 in any case.
Also, I don't know if that's intentional, but the other functions fun1, fun2 and fun3 don't ever use the parameter passed as x, they only use y. They change x locally, but that won't reflect outside the function.

Does anyone have example numpy.fftn code that correctly implements multivariate_normal pdf?

With the code below, I'm attempting to implement the Levy-Khintchine formula (https://en.wikipedia.org/wiki/L%C3%A9vy_process#L.C3.A9vy.E2.80.93Khintchine_representation). In the limit of no jumps, the Levy-Khitchine formula reduces to the multivariate normal distribution. My code uses the (multi-dimensional) trapeziodal integration rule (http://mathfaculty.fullerton.edu/mathews/n2003/SimpsonsRule2DMod.html) to approximate the Fourier transform of the characteristic function as a discrete Fourier transform. For the 1-dimensional case, the code works perfectly. For the 2-D case, I can't find what I'm doing wrong.
Does anyone have example numpy.fftn code that correctly implements multivariate_normal pdf?
class LevyKhintchine:
def __init__(self, mean, cov, jump_measure):
self.mean = mean
self.cov = cov
self.jump_measure = jump_measure
self.factors = mean.shape[0]
def logCF(self, k):
rolled = Roll(k)
out = np.empty(Shape(k))
return (self.jump_measure(k) -
Dot(rolled, self.cov, rolled, out)*0.5 +
np.sum(np.multiply(Roll(k), self.mean), axis=-1)*1j)
def pdf_grid(self, J):
diag = np.diagonal(self.cov)
tmp = np.pi*2/J
dk = np.sqrt(tmp/diag)
dx = np.sqrt(tmp*diag)
k = Grid(np.zeros(self.factors), dk, J)
x0 = self.mean - dx*J*0.5
f = np.exp(self.logCF(k) - Coef(dk, x0, J)*1j)
for n in range(self.factors):
f[ 0] *= 0.5
f[-1] *= 0.5
f = np.rollaxis(f, 0, factors)
pdf = np.fft.fftn(f)
return Grid(x0, dx, J), pdf.real*(np.product(dk)/np.pi)
def Grid(left, width, J):
def Slice(slices, j):
slices.append(slice(left[j], left[j] + width[j]*(J-1), 1j*J))
return slices
slices = reduce(Slice, range(len(left)), [])
return np.mgrid[slices]
def Shape(grid):
return np.asarray(grid).shape[1:]
def Roll(grid):
grid = np.asarray(grid)
try:
rolled = np.rollaxis(grid, 0, len(grid)+1)
except ValueError:
rolled = grid
return rolled
def Dot(x, cov, y, out): #x & y are "rolled"
for j in np.ndindex(out.shape):
out[j] = np.dot(x[j].T, np.dot(cov, y[j]))
return out
def Coef(dks, x0s, J):
factors = len(dks)
coef = np.zeros((J,)*factors)
for n, (dk, x0) in enumerate(zip(dks, x0s)):
shape = np.ones(factors, dtype=int)
shape[n] = J
coef += np.arange(J).reshape(shape)*(dk*x0)
return coef
Here's the tests:
from scipy.stats import multivariate_normal
J = 64
factors = 1
mean = np.full((factors,), -1)
cov = np.identity(factors)
rv = LevyKhintchine(mean, cov, lambda k: 0)
rv0 = multivariate_normal(mean, cov)
x, pdf = rv.pdf_grid(J)
plt.plot(x[0], pdf, x[0], rv0.pdf(Roll(x)))
factors = 2
mean = np.full((factors,), 5)
cov = np.identity(factors)
rv = LevyKhintchine(mean, cov, lambda k: 0)
x, pdf = rv.pdf_grid(J)
rv0 = multivariate_normal(mean, cov)
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.contourf(x[0], x[1], pdf)
fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
ax3.contourf(x[0], x[1], rv0.pdf(Roll(x)))
I figured it out: in 1-d I can get away with integrating over only positive wave numbers k, in higher dimensions I cannot.
Here's the corrected code:
class LevyKhintchine:
def __init__(self, mean, cov, jump_measure):
self.mean = mean
self.cov = cov
self.jump_measure = jump_measure
self.factors = mean.shape[0]
def logCF(self, k):
rolled = Roll(k)
out = np.empty(Shape(k))
return (self.jump_measure(k) -
Dot(rolled, self.cov, rolled, out)*0.5 +
np.sum(np.multiply(Roll(k), self.mean), axis=-1)*1j)
def pdf_grid(self, J):
diag = np.diagonal(self.cov)
tmp = np.pi*2/J
dk = np.sqrt(tmp/diag)
dx = np.sqrt(tmp*diag)
k0 = -0.5*dk*J
x0 = -0.5*dx*J + self.mean
k = Grid(k0, dk, J)
x = Grid(x0, dx, J)
f = np.exp(-1j*Coef(dk, x0, J) + self.logCF(k))
for n in range(self.factors):
f[ 0] *= 0.5
f[-1] *= 0.5
f = np.rollaxis(f, 0, factors)
c = ((0.5/np.pi)**self.factors*np.product(dk)*np.exp(-1j*np.dot(k0, x0)))
pdf = np.fft.fftn(f)*np.exp(-1j*Coef(k0, dx, J))*c
return x, pdf.real
def Grid(left, width, J):
def Slice(slices, j):
slices.append(slice(left[j], left[j] + width[j]*(J-1), 1j*J))
return slices
slices = reduce(Slice, range(len(left)), [])
return np.mgrid[slices]
def Shape(grid):
return np.asarray(grid).shape[1:]
def Roll(grid):
grid = np.asarray(grid)
try:
rolled = np.rollaxis(grid, 0, len(grid)+1)
except ValueError:
rolled = grid
return rolled
def Dot(x, cov, y, out): #x & y are "rolled"
for j in np.ndindex(out.shape):
out[j] = np.dot(x[j].T, np.dot(cov, y[j]))
return out
def Coef(dks, x0s, J):
factors = len(dks)
coef = np.zeros((J,)*factors)
for n, (dk, x0) in enumerate(zip(dks, x0s)):
shape = np.ones(factors, dtype=int)
shape[n] = J
coef += np.arange(J).reshape(shape)*(dk*x0)
return coef
Here's the tests:
from scipy.stats import multivariate_normal
J = 32
for factors in range(1, 4):
mean = np.full((factors,), -1)
cov = np.identity(factors)
rv = LevyKhintchine(mean, cov, lambda k: 0)
rv0 = multivariate_normal(mean, cov)
x, pdf = rv.pdf_grid(J)
pdf0 = rv0.pdf(Roll(x))
print np.allclose(pdf, pdf0)
True
True
True

Categories

Resources