says I have a numpy array but I cannot see one - python

I have below code that converts an implied volatility into piecewise constant volatility. In the below code I get an error:
for j, _vol in enumerate(_boot_vol,2):
TypeError: 'numpy.float64' object is not iterable
But neither _vol or _boot_vol is a numpy array. Need your wisdom to resolve this please
Code:
termstruct = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0]);
forwardcurve = np.array([0.0112, 0.0118, 0.0123, 0.0127, 0.0132, 0.0137, 0.0145,0.0154, 0.0163, 0.0174]);
capletvols = np.array([0.2366, 0.2487, 0.2573, 0.2564, 0.2476, 0.2376, 0.2252,0.2246, 0.2223]);
num_times = len(termstruct);
tau= np.diff(termstruct);
class computevol:
def _caliberatevol():
global termstruct
global forwardcurve
global tau
global capletvols
_vols = np.zeros((len(forwardcurve),len(termstruct)))
_boot_vol = []
for i , _capvol in enumerate(capletvols,2):
_boot_vol = _capvol**2 * termstruct[i-1]
for j, _vol in enumerate(_boot_vol,2):
_boot_vol -= _vol**2*tau[j-1]
_boot_vol.append(_boot_vol,np.sqrt(_boot_vol/tau(0)))
_vols[1:,1] = _boot_vol
for i in range(2,len(termstruct)):
_vols[i:,i] = _boot_vol[:-i+1]
return _vols

Needed to use a temporary variable in between
class computevol:
def _caliberatevol():
global termstruct
global forwardcurve
global tau
global capletvols
_vols = np.zeros((len(forwardcurve),len(termstruct)))
_boot_vol = []
for i , _capvol in enumerate(capletvols,2):
_temp= _capvol**2 * termstruct[i-1]
for j, _vol in enumerate(_boot_vol,2):
_temp -= _vol**2*tau[j-1]
_boot_vol.append(np.sqrt(_temp/tau[0]))
_vols[1:,1] = _boot_vol
for i in range(2,len(termstruct)):
_vols[i:,i] = _boot_vol[:-i+1]
return _vols

Related

DCPError Problem does not follow DCP rules. Specifically: The objective is not DCP

I try to solve the bellow convex problem:
In my opinion, the objective is convex.
my code is:
import cvxpy as cp
import numpy as np
ppMax = 1
Tmax = np.array([1.19, 1.99, 4.16, 1.98, 2.53])
d = np.array([2648000, 5552000, 4744000, 4056000, 6168000])
p = np.array([0.19952623149688797, 0.00018021843172751523, 0.0020210434604112652, 0.001602417432034276, 0.003647501823979989])
r = np.array([8574212.020483451, 6619470.077787987, 7521159.373986546, 7135440.631765847, 6832684.423897811])
c = np.array([430000000.0, 700000000.0, 400000000.0, 220000000.0, 170000000.0])
fc = np.array([40000000000, 40000000000, 40000000000, 40000000000, 40000000000])
ff = np.array([4000000000, 4000000000, 4000000000, 4000000000, 4000000000])
W = np.array([0.7, 0.2, 0.3, 0.7, 0.5])
wt = np.array([0.609, 0.04000000000000001, 0.255, 0.308, 0.43])
we = np.array([4.336742687028045, 10.647756980938421, 8.263103073749088, 7.675258157093112, 6.322105707432189])
pp = cp.Variable(len(Tmax))
cons = [cp.sum(pp) <= ppMax, d/r+c/fc+d/(W*cp.log(1+pp)) <= Tmax]
object = cp.Minimize(wt*(2*d/r+c/ff+c/fc+d/(W*cp.log(1+pp)))+we*(2*p*(d/r)+pp*(d/(W*cp.log(1+pp)))))
prob = cp.Problem(object, cons)
prob.solve()
print(prob.value, pp.value)

Using dictionary entries as parameters in scipy solve_ivp

Can I somehow pass variables from dictionaries into the functions inside of solve_ivp,
such that I can call them directly, i.e. offFSH instead of para_dict["offFSH"] ?
I think that this would be better for readability, but please let me know if my current/another version would be preferable.
Please don't pay attention to the model/equations itself, I took it from a much larger model I want to simulate, hence my desire to organize variables with dictionaries.
Thanks!
from scipy.integrate import solve_ivp
def simulate_model(simu_dict, para_dict):
tspan = [0, simu_dict["simulationLength"]]
InitialValues = [simu_dict["FSHR_init"], simu_dict["FSHR_complex_init"], simu_dict["LHR_init"], simu_dict["LHR_complex_init"]]
result = solve_ivp(fun = lambda t, y: func(t,y, para_dict), t_span = tspan, y0 = InitialValues, method = "RK45", dense_output = True)
return result
def func(t, y, para_dict):
FSHR = 0
FSHR_complex = 1
LHR = 2
LHR_complex = 3
dFSHR = para_dict["offFSH"] * y[FSHR_complex] - para_dict["onFSH"] * y[FSHR_complex]
dFSHR_complex = para_dict["onFSH"] * y[FSHR] - para_dict["offFSH"] * y[FSHR_complex]
dLHR = para_dict["offLH"] * y[LHR_complex] - para_dict["onLH"] * y[LHR]
dLHR_complex = para_dict["onLH"] - para_dict["offLH"] * y[LHR_complex]
dy = [dFSHR,dFSHR_complex,dLHR, dLHR_complex]
return dy
###########################################################################################
simu_dict = {
"FSHR_init": 0.7,
"FSHR_complex_init": 0.9,
"LHR_init": 1.8,
"LHR_complex_init": 6.2,
"simulationLength": 2
}
para_dict = {
"onFSH": 1,
"offFSH": 5,
"onLH": 1,
"offLH": 1,
}
test = simulate_model(simu_dict, para_dict)

Changing the code to Boundary Value Problem for ODE Python

I am struggling to change my code to solve a system of (Ordinary Differential Equation)ODE from initial value problem to boundary value problem. I have tried myself many times but I think i am making mistakes which are logically incorrect. So instead of pasting the change code, I am pasting below my original code which works fine.
Below mention code is used to solve a system of ODE with function odeint and then I am using Particle Swarm Optimiation(PSO) algorithm for optimisation process. I want to use the same equations with function solve_bvp with boundary conditions t(0) =1 and t(1) = 2. Below mention is the code. Thanks
from scipy import *
from scipy.integrate import odeint
from operator import itemgetter
import matplotlib
matplotlib.use('Agg')
from matplotlib.ticker import FormatStrFormatter
from pylab import *
from itertools import product
import itertools
from numpy import zeros_like
import operator
from pyswarm import pso
modelsOne = []
modelsTwo = []
modelsThree = []
#step 1 start## To build model structure library. HIV model is a three-variable model, so we need three model structure liararys: modelsOne, modelsTwo, modelsThree.
# the model structure library contains all possible structures of the model to be sought.
def ModelsProduct(modelsOne, modelsTwo, modelsThree):
modelsStepOne = list(product("+-",repeat = 4))
modelsStepThree = [('a','a'),('a','b'),('a','c'),('b','b'),('b','c'),('c','c')]
#produce modelsOne
modelsStepTwo = [('b',),('c',)]
for one in modelsStepOne:
for two in modelsStepTwo:
for three in modelsStepThree:
modelsOne.append(one+two+three)
#produce modelsTwo
modelsStepTwo = [('a',),('c',)]
for one in modelsStepOne:
for two in modelsStepTwo:
for three in modelsStepThree:
modelsTwo.append(one+two+three)
#produce modelsThree
modelsStepTwo = [('a',),('b',)]
for one in modelsStepOne:
for two in modelsStepTwo:
for three in modelsStepThree:
modelsThree.append(one+two+three)
return modelsOne, modelsTwo, modelsThree
modelsOne, modelsTwo,modelsThree = ModelsProduct(modelsOne, modelsTwo, modelsThree)
#step 1 end##
VarList = ["a","b","c"]
initial_condi = [100, 150, 50000]
dictVar = {'a':0, 'b': 1, 'c': 2}
ops = { "+": operator.add, "-": operator.sub }
t_range = arange(0.0,60.0,1.0)
def odeFunc(Y, t, x,dictVar):
if x[-3] == 192:
temp1 = 191
else:
temp1 = int(x[-3])
if x[-2] == 192:
temp2 = 191
else:
temp2 = int(x[-2])
if x[-1] == 192:
temp3 = 191
else:
temp3 = int(x[-1])
modelOne = modelsOne[temp1]
modelTwo = modelsTwo[temp2]
modelThree = modelsThree[temp3]
return GenModel(Y, x, modelOne,modelTwo,modelThree, dictVar)
def GenModel(Y,x,modelOne,modelTwo,modelThree, dictVar):
dydt = zeros_like(Y)
dydt[0] = ops[modelOne[0]](dydt[0],x[0]*Y[0])
dydt[0] = ops[modelOne[1]](dydt[0],x[1]*Y[dictVar[modelOne[-3]]])
dydt[0] = ops[modelOne[2]](dydt[0],x[2]*Y[dictVar[modelOne[-2]]]*Y[dictVar[modelOne[-1]]])
dydt[0] = ops[modelOne[3]](dydt[0],x[3])
dydt[1] = ops[modelTwo[0]](dydt[1],x[4]*Y[1])
dydt[1] = ops[modelTwo[1]](dydt[1],x[5]*Y[dictVar[modelTwo[-3]]])
dydt[1] = ops[modelTwo[2]](dydt[1],x[6]*Y[dictVar[modelTwo[-2]]]*Y[dictVar[modelTwo[-1]]])
dydt[1] = ops[modelTwo[3]](dydt[1],x[7])
dydt[2] = ops[modelThree[0]](dydt[2],x[8]*Y[2])
dydt[2] = ops[modelThree[1]](dydt[2],x[9]*Y[dictVar[modelThree[-3]]])
dydt[2] = ops[modelThree[2]](dydt[2],x[10]*Y[dictVar[modelThree[-2]]]*Y[dictVar[modelThree[-1]]])
dydt[2] = ops[modelThree[3]](dydt[2],x[11])
return dydt
## equations
def pendulum_equations(w, t):
T, I, V = w
dT = 80 - 0.15*T - 0.00002*T*V
dI = 0.00002*T*V - 0.55*I
dV = 900*0.55*I - 5.5*V - 0.00002*T*V
return dT, dI, dV
result_init = odeint(pendulum_equations, initial_condi, t_range)
result_init[:,2] = result_init[:,2]/100
def myfunc(xRand):
result_new = odeint(odeFunc, initial_condi, t_range, args=(xRand,dictVar))
result_new[:,2] = result_new[:,2]/100
result_sub = result_new - result_init
return sum(result_sub*result_sub)
x = (0.15,0,0.00002,80,0.55,0,0.00002,0,5.5,495,0.00002,0,122,98,128)
lb = [0]*15
ub = [1,1,0.5,200,1,1,0.5,200,10,1000,0.5,200,192,192,192]
xopt1, fopt1 = pso(myfunc, lb, ub,omega= 0.7298,phip=1.49618,phig=1.49618,maxiter=1000,swarmsize= 1000,minstep=1e-20,minfunc=1e-20,debug = True)
If you have an ODE like
def f_ode(t,u): return [ u[1], -u[0] ]
which you can solve as
tspan = np.linspace(0,1,51);
u_init = [1.0, 0.0]
u = odeint(f_ode, u_init, tspan, tfirst=True)
or as
res = solve_ivp(f_ode, tspan[[0,-1]], u_init, t_eval=tspan)
if res.success:
u=res.y
you can switch to a boundary problem by encoding the necessary functions and initial guess
def f_bc(u0, u1): return [u0[0]-1, u1[0]-2]
t = np.linspace(0,1,11);
u = [ 1+t, 1+0*t]
res = solve_bvp(f_ode,f_bc,t,u)
if res.success:
u = res.sol(tspan)
Note that you have to try out if your version of the new solver functions supports the passing of parameters the same way as odeint. If that is not possible, use lambda constructs as wrappers, or explicitly defined functions.

How do I pass an array between two functions in python?

I wish to pass three arrays : xLinespace, yLinespace and zLinespace from the function cubicSplineInterpolate to the function trajectoryMover. However I am not sure how to achieve this using python. After passing the arrays to the second function, I intend to iterate through each array simulataneously in order to alter the position of a robot. Do I need to set the arrays as arguments in each function?
class example_application:
def cubicSplineInterpolate(self, x_axis, y_axis, z_axis):
m=1
xLinespace=[]
yLinespace=[]
zLinespace=[]
while m<len(x_axis):
for t in np.arange(m-1,m,1/float(100)):
xLinespace.append(self.func(x_axis[m-1],x_axis[m],t,U[m-1],U[m],m-1,m))
yLinespace.append(self.func(y_axis[m-1],y_axis[m],t,V[m-1],V[m],m-1,m))
zLinespace.append(self.func(z_axis[m-1],z_axis[m],t,W[m-1],W[m],m-1,m))
m=m+1
def trajectoryMover(self):
newPose = Pose()
xLinespace=[]
yLinespace=[]
zLinespace=[]
x_axis = [0.01, 0.02, 0.033, 0.0044, 0.0001, 0.10]
y_axis = [0.002, 0.00033, 0.1014, 0.01512, 0.14316, 0.015143]
z_axis = [0.003, 0.2124, 0.15417, 0.15615, 0.01241, 0.151561]
self.cubicSplineInterpolate(x_axis,y_axis,z_axis)
print(self.cubicSplineInterpolate.xLinespace)
for x, y, z in zip(x_axis, y_axis, z_axis):
newPose.position.x = x
newPose.position.y = y
newPose.position.z = z
newPose.orientation.x = -0.907106781172
newPose.orientation.y = -0.0707106781191
newPose.orientation.z = 2.59734823723e-06
newPose.orientation.w = -2.59734823723e-06
self.set_position_cartesian.publish(newPose)
rospy.loginfo(newPose)
rospy.sleep(1)
From the comments: if trajectoryMover calls cubicSplineInterpolate
class example_application:
def cubicSplineInterpolate(self, x_axis, y_axis, z_axis):
m=1
xLinespace=[]
yLinespace=[]
zLinespace=[]
while m<len(x_axis):
for t in np.arange(m-1,m,1/float(100)):
xLinespace.append(self.func(x_axis[m-1],x_axis[m],t,U[m-1],U[m],m-1,m))
yLinespace.append(self.func(y_axis[m-1],y_axis[m],t,V[m-1],V[m],m-1,m))
zLinespace.append(self.func(z_axis[m-1],z_axis[m],t,W[m-1],W[m],m-1,m))
m=m+1
return(xLinespace, yLinespace, zLinespace)
def trajectoryMover(self):
newPose = Pose()
x_axis = [0.01, 0.02, 0.033, 0.0044, 0.0001, 0.10]
y_axis = [0.002, 0.00033, 0.1014, 0.01512, 0.14316, 0.015143]
z_axis = [0.003, 0.2124, 0.15417, 0.15615, 0.01241, 0.151561]
xLinespace, yLinespace, zLinespace = self.cubicSplineInterpolate(x_axis,y_axis,z_axis)
print(self.cubicSplineInterpolate.xLinespace)
for x, y, z in zip(x_axis, y_axis, z_axis):
newPose.position.x = x
newPose.position.y = y
newPose.position.z = z
newPose.orientation.x = -0.907106781172
newPose.orientation.y = -0.0707106781191
newPose.orientation.z = 2.59734823723e-06
newPose.orientation.w = -2.59734823723e-06
self.set_position_cartesian.publish(newPose)
rospy.loginfo(newPose)
rospy.sleep(1)
Also note (although not super important): I believe PEP8 says functions should be underscore separated i.e. trajectory_mover and cubic_spline_interpolate()

Python K means clustering

I am trying to implement the code on this website to estimate what value of K I should use for my K means clustering.
https://datasciencelab.wordpress.com/2014/01/21/selection-of-k-in-k-means-clustering-reloaded/
However I am not getting any success - in particular I am trying to get the f(k) vs the number of clusters k graph which I can use to procure the ideal value of k to use.
My data format is as follows:
Each of the coordinates have 5 dimensions/variables i.e. they are data points that live in a five-dimensional space.
The list of the coordinates are below, where for example the first data point has coordinates ( 35.38361202590826,-24.022420305129415, 0.9608968122051765, -11.700331772145386, -9.4393980963685).
Variable1 = [35.38361202590826, 3.0, 10.0, 10.04987562112089, 5.385164807134505, 24.35159132377184, 10.77032961426901, 10.816653826391967, 18.384776310850235, 14.317821063276353, 24.18677324489565, 3.0, 24.33105012119288, 8.94427190999916, 2.82842712474619, 4.123105625617661, 4.47213595499958, 13.453624047073712, 12.529964086141668, 19.4164878389476, 5.385164807134505, 5.0, 24.041630560342618, 30.083217912982647, 15.132745950421555, 1.414213562373095, 21.470910553583888, 12.649110640673516, 9.0, 9.055385138137416, 16.124515496597102, 18.027756377319946, 7.615773105863908, 4.47213595499958, 5.0, 16.124515496597102, 8.246211251235321, 3.0, 23.02172886644268, 2.23606797749979, 10.0, 13.416407864998737, 14.7648230602334, 12.649110640673516, 2.82842712474619, 9.899494936611665, 12.806248474865697, 13.0, 10.19803902718557, 10.440306508910549]
Variable2 = [-24.022420305129415, -40.0, -21.0, -36.020346285601605, -14.298541039632994, -10.225204451297113, -7.242118188905023, -10.816653826391967, -16.263455967290593, -0.9079593845004517, -5.70559779110359, -1.0, -17.426292654367874, -0.4472135954999579, -12.727922061357855, -38.32062875574061, -15.205262246998569, -13.89960053482201, -6.943355894868313, -18.43793805396085, -14.298541039632994, -8.0, -9.899494936611665, -10.537436550735357, -9.251460406371256, -1.414213562373095, -0.23287321641631115, -4.743416490252569, -10.0, -25.951408627588936, -5.457528321925173, -11.648704120729812, -15.231546211727816, -9.838699100999074, -2.2, 4.713319914389921, -3.395498750508662, -32.0, -16.59301967354925, -4.47213595499958, -3.4, -13.416407864998737, 4.944183868793753, -3.478505426185217, -21.213203435596423, -18.384776310850235, -6.871645523098667, -21.0, -5.491251783869154, -8.620436566990362]
Variable3 = [0.9608968122051765, 22.0, 21.0, 18.507691737905798, 15.412713068695306, -8.08982038917884, -0.7427813527082074, -7.211102550927978, -14.849242404917499, -0.4190581774617469, -10.170848236315095, -7.0, 1.150792911137501, -5.366563145999495, -12.727922061357855, 4.85071250072666, 9.838699100999074, -8.473553267217696, 6.065460321953928, -10.249021432229634, 4.642383454426297, -9.0, 9.899494936611665, 4.354587344310195, -8.854969246098202, -8.48528137423857, -10.292996165600954, -11.067971810589327, -30.0, -10.932721081409808, -14.6360986815266, -22.188007849009164, 0.0, -7.155417527999327, -5.4, -12.279438724331637, 19.40285000290664, -7.0, 18.938629784469825, 8.94427190999916, 3.8, -8.94427190999916, -43.549455173073746, -8.538149682454623, -11.31370849898476, 1.4142135623730951, -10.619815808425212, 12.0, 7.060180864974626, -7.854175538813441]
Variable4 = [-11.700331772145386, -8.0, -5.0, -2.9851115706299676, -10.398938937914904, -8.459406092237773, -7.242118188905023, -10.539303728279352, -21.920310216782973, -8.03194840135015, -10.791021909261136, -10.0, -9.69954025101608, -2.6832815729997477, -23.33452377915607, -7.761140001162655, -17.44133022449836, -4.980070779856015, -2.7134954071899156, -6.48933015307002, -12.441587657862476, -5.2, -18.384776310850235, -10.603918800266811, -14.604091070057484, -4.949747468305833, -1.3506646552146047, -7.905694150420948, -14.0, -29.706080514133717, -2.4806946917841692, -23.574758339572238, -3.2826608214930637, -5.813776741499453, -13.4, -4.9613893835683385, -11.884245626780316, -19.0, -5.473090258814675, -2.23606797749979, -2.0, -2.6832815729997477, -6.163297699455227, -12.01665510863984, -12.727922061357855, -12.020815280171307, -8.589556903873333, -18.53846153846154, -5.491251783869154, -4.789131426105757]
Variable5 = [-9.4393980963685, -4.0, -2.0, -0.29851115706299675, -9.84185292338375, 6.118696639531204, -6.127946159842712, -2.218800784900916, 10.606601717798213, 0.6984302957695782, 0.7442084075352507, -0.0, 3.452378733412503, 1.3416407864998738, -6.363961030678928, 6.305926250944657, -5.813776741499453, -0.4459764877482998, -0.7980868844676221, 7.673890419106611, -1.4855627054164149, 1.4, -2.8284271247461903, -2.925218979383948, 3.9649116027305387, 0.7071067811865475, 0.4191717895493601, 1.5811388300841895, -4.0, 4.748555621218401, 4.341215710622296, 4.714951667914447, -5.120950881529179, 4.919349550499537, 6.2, 0.6201736729460423, -6.305926250944657, -9.0, -6.168085847235585, 0.0, -1.0, 1.3416407864998738, 3.3186987612451224, 4.427188724235731, 4.242640687119285, 4.949747468305833, 5.9346029517670305, 2.3076923076923075, -3.1378581622109447, 1.436739427831727]
I am able to use scikit-learn to create clusters with these coordinates however I am interested in finding the optimal k value to use - however scikit-learn does not have a feature where I can estimate the optimal value of K with this technique (or any technique as far as I am aware).
You can try the code in the last comment by Monte Shaffer.
Here's a simplified version:
import numpy as np
import random
from numpy import zeros
class KMeansFK():
def __init__(self, K, X):
self.K = K
self.X = X
self.N = len(X)
self.mu = None
self.clusters = None
self.method = None
def _cluster_points(self):
mu = self.mu
clusters = {}
for x in self.X:
bestmukey = min([(i[0], np.linalg.norm(x-mu[i[0]])) \
for i in enumerate(mu)], key=lambda t:t[1])[0]
try:
clusters[bestmukey].append(x)
except KeyError:
clusters[bestmukey] = [x]
self.clusters = clusters
def _reevaluate_centers(self):
clusters = self.clusters
newmu = []
keys = sorted(self.clusters.keys())
for k in keys:
newmu.append(np.mean(clusters[k], axis = 0))
self.mu = newmu
def _has_converged(self):
K = len(self.oldmu)
return(set([tuple(a) for a in self.mu]) == \
set([tuple(a) for a in self.oldmu])\
and len(set([tuple(a) for a in self.mu])) == K)
def find_centers(self, K, method='random'):
self.method = method
X = self.X
K = self.K
# https://stackoverflow.com/questions/44372231/population-must-be-a-sequence-or-set-for-dicts-use-listd
self.oldmu = random.sample(list(X), K)
if method != '++':
# Initialize to K random centers
self.mu = random.sample(list(X), K)
while not self._has_converged():
self.oldmu = self.mu
# Assign all points in X to clusters
self._cluster_points()
# Reevaluate centers
self._reevaluate_centers()
def _dist_from_centers(self):
cent = self.mu
X = self.X
D2 = np.array([min([np.linalg.norm(x-c)**2 for c in cent]) for x in X])
self.D2 = D2
def _choose_next_center(self):
self.probs = self.D2/self.D2.sum()
self.cumprobs = self.probs.cumsum()
r = random.random()
ind = np.where(self.cumprobs >= r)[0][0]
return(self.X[ind])
def init_centers(self,K):
self.K = K
#self.mu = random.sample(self.X, 1)
self.mu = random.sample(list(self.X), 1)
while len(self.mu) < self.K:
self._dist_from_centers()
self.mu.append(self._choose_next_center())
def get_ak(self,k, Nd):
if k == 2:
return( 1 - 3.0 / (4.0 * Nd ) )
else:
previous_a = self.get_ak(k-1, Nd)
return ( previous_a + (1.0-previous_a)/6.0 )
def fK(self, thisk, Skm1=0):
X = self.X
Nd = len(X[0])
self.find_centers(thisk, method='++')
mu, clusters = self.mu, self.clusters
Sk = sum([np.linalg.norm(mu[i]-c)**2 \
for i in range(thisk) for c in clusters[i]])
if thisk == 1:
fs = 1
elif Skm1 == 0:
fs = 1
else:
fs = Sk/(self.get_ak(thisk,Nd)*Skm1)
return fs, Sk
def run(self, maxk):
ks = range(1,maxk)
fs = zeros(len(ks))
Wks,Wkbs,sks = zeros(len(ks)+1),zeros(len(ks)+1),zeros(len(ks)+1)
# Special case K=1
self.init_centers(1)
fs[0], Sk = self.fK(1)
# Rest of Ks
for k in ks[1:]:
self.init_centers(k)
fs[k-1], Sk = self.fK(k, Skm1=Sk)
self.fs = fs
And then run it on your data:
X = np.array([Variable1, Variable2, Variable3, Variable4, Variable5])
km = kmeans.KMeansFK(2, X)
km.run(5)
Now km.clusters has the result.

Categories

Resources