Python package for Bayesian optimization among a given set of samples - python

I have a set of samples and their corresponding target values. Below is the visualization:
I am trying to use Bayesian optimization to find the sample with maximum target value. My question is if someone knows a python package that does this. I was looking at BayesianOptimization package and it seems that it can perform optimization over an interval, but not necessarily only among the samples.
Edit: Here is a sample code:
import matplotlib.pyplot as plt
import numpy as np
from bayes_opt import BayesianOptimization
mean = (1, 2)
cov = [[1, 0], [0, 1]]
my_x, my_y = np.random.multivariate_normal(mean, cov, 1000).T
cmap_data = -my_x ** 2 - (my_y - 1) ** 2 + 1
search = []
def black_box_function(x, y):
a = np.hstack((x, y))
search.append(a)
plt.scatter(my_x, my_y, marker='.', s=1, c=np.array(cmap_data))
plt.axis('equal')
# plt.scatter(np.array(search)[:, 0], np.array(search)[:, 1], marker='.', s=20, c='black')
plt.plot(np.array(search)[:, 0], np.array(search)[:, 1], marker='.', c='black', linewidth=0.5)
plt.show()
return -x ** 2 - (y - 1) ** 2 + 1
# -- optimization
pbounds = {'x': (min(my_x), max(my_x)), 'y': (min(my_y), max(my_y))}
optimizer = BayesianOptimization(
f=black_box_function,
pbounds=pbounds,
random_state=1,
)
for i in range(1000):
optimizer.probe(
params=[my_x[i], my_y[i]],
lazy=True,
)
optimizer.maximize(
init_points=0,
n_iter=0,
)
But it seems optimizer.probe just evaluates the values (to be used in future iterations), and it is not an actual iteration of optimization.

Related

The result of least_squares is different depending on the environment

The acquisition channel of scipy and the same version are used.
The result of least_squares is different depending on the environment.
Differences in the environment, the PC is different.
version:1.9.1 py39h316f440_0
channel:conda-forge
environment:windows
I've attached the source code I ran.
If the conditions are the same except for the environment, I would like to get the same results.
Why different causes? How can I do that?
thank you.
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.optimize import least_squares
import random
random.seed(134)
import numpy as np
np.random.seed(134)
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.optimize import least_squares
def report_params(fit_params_values, fit_param_names):
for each in range(len(fit_param_names)):
print(fit_param_names[each], 'is', fit_params_values[each])
# define your modules
def pCon1():
# This is the module for a specific insubstatiation of a constituitive promoter
# the input is nothing
# the output is a protein production amount per time unit
pCon1_production_rate = 100
return pCon1_production_rate
def pLux1(LuxR, AHL):
# This is the module for a specific insubstatiation of a lux promoter
# the input is a LuxR amount and an AHL amount
# the output is a protein production amount per time unit
# For every promoter there is some function that determines what the promoter's
# maximal and basal expression are based on the amount of transcriptional factor
# is floating around in the cell. These numbers are empircally determined, and
# for demonstration purposes are fictionally and arbitrarily filled in here.
# These functions take the form of hill functions.
basal_n = 2
basal_basal = 2
basal_max = 2
basal_kd = 2
basal_expression_rate = basal_basal + (basal_max * (LuxR**basal_n / (LuxR**basal_n + basal_kd)))
max_n = 2
max_max = 2
max_kd = 2
maximal_expression_rate = (LuxR**max_n / (LuxR**max_n + max_kd))
pLux1_n = 2
pLux1_kd = 10
pLux1_production_rate = basal_expression_rate + maximal_expression_rate*(AHL**pLux1_n / (pLux1_kd + AHL**pLux1_n))
return pLux1_production_rate
def simulation_set_of_equations(y, t, *args):
# Args are strictly for parameters we want to eventually estimate.
# Everything else must be hardcoded below. Sorry for the convience.
# Unpack your parameters
k_pCon_express = args[0] # A summation of transcription and translation from a pCon promoter
k_pLux_express = args[1] # A summation of transcription and translation from a pLux promoter
k_loss = args[2] # A summation of dilution and degredation
# Unpack your current amount of each species
LuxR, GFP, AHL = y
# Determine the change in each species
dLuxR = pCon1() - k_loss*LuxR
dGFP = pLux1(LuxR, AHL)*k_pLux_express - k_loss*GFP
dAHL = 0 # for now we're assuming AHL was added exogenously and never degrades
# Return the change in each species; make sure same order as your init values
# scipy.odeint will take these values and apply them to the current value of each species in the next time step for you
return [dLuxR, dGFP, dAHL]
# Parameters
k_pCon_express = 101
k_pLux_express = 50
k_loss = 0.1
params = (k_pCon_express, k_pLux_express, k_loss)
param_names = ['k_pCon_express', 'k_pLux_express', 'k_loss'] # somehow this is honestly necessary in Python?!
# Initial Conditions
# LuxR, GFP, AHL
init_P = [1000, 0, 11]
# Timesteps
n_steps = 500
t = np.linspace(0, 30, n_steps)
num_P = odeint(simulation_set_of_equations, init_P, t, args = (params))
plt.plot(t, num_P[:,0], c='b', label = 'LuxR')
plt.plot(t, num_P[:,1], c='g', label = 'GFP')
plt.plot(t, num_P[:,2], c='r', label = 'AHL')
plt.xlabel('Time')
plt.ylabel('Concentration')
plt.legend(loc = 'best')
plt.grid()
plt.yscale('log')
plt.show()
noise = np.random.normal(0, 10, num_P.shape)
exp_P = num_P + noise
exp_t = t[::10]
exp_P = exp_P[::10]
# Create experimental data. Just take the regular simulation data and add some gaussian noise to it.
def residuals(params):
params = tuple(params)
sim_P = odeint(simulation_set_of_equations, init_P, exp_t, args = params)
res = sim_P - exp_P
return res.flatten()
initial_guess = (100, 100, 100)
low_bounds = [0, 0, 0]
up_bounds = [1000, 1000, 1000]
fitted_params = least_squares(residuals, initial_guess, bounds=(low_bounds, up_bounds)).x
# small reminder: .x is the fitted parameters attribute of the least_squares output
# With least_squares function, unlike, say, curve_fit, it does not compute the covariance matrix for you
# TODO calculate standard deviation of parameter estimation
# (will this ever be used other than sanity checking?)
print(params)
report_params(fitted_params, param_names)
(101, 50, 0.1)
k_pCon_express is 100.0
k_pLux_express is 49.9942246627
k_loss is 0.100037839987
plt.plot(t, odeint(simulation_set_of_equations, init_P, t, args = tuple(params))[:,1], c='r', label='GFP - Given Param Simulation')
plt.scatter(exp_t, exp_P[:,1], c='b', label='GFP - Fake Experimental Data')
plt.plot(t, odeint(simulation_set_of_equations, init_P, t, args = tuple(fitted_params))[:,1], c='g', label='GFP - Fitted Param Simlulation')
plt.legend(loc = 'best')
plt.xlabel('Time')
plt.ylabel('Concentration')
plt.grid()
plt.yscale('log')
plt.show()

How to plot 2 trendlines on a single scatterplot? (python)

I want to plot 2 trendlines for one scatterplot using Matplotlib in Python but I don't know how. The graph should be similar to this target plot (from here, fig.2).
I managed to plot 1 trendline on a scatterplot here but can't figure out how to plot another trend.
Underneath is what I tried until now:
This proved ok for other parameters that I plotted, but not for this case, which led me to the conclusion that it's not too correct.
X = vO2.reshape(-1, 1)
Y = ve.reshape(-1, 1)
linear_regressor = LinearRegression()
linear_regressor.fit(X, Y)
y_pred = linear_regressor.predict(X)
x_pred = linear_regressor.predict(Y)
plt.scatter(X, Y)
plt.plot(X, y_pred, '-*',label="O2")
plt.plot(x_pred, Y, '-*',label="vent")
plt.xlabel("VO2 (L/min)")
plt.ylabel("VE (L/min)")
plt.show()
and also
z1 = np.polyfit(vO2, ve, 1)
p1 = np.poly1d(z1)
z2 = np.polyfit(ve, vO2, 1)
p2 = np.poly1d(z2)
plt.scatter(vO2, ref_vent, label='original')
plt.plot(vO2, p1(vO2), label='trendline')
plt.plot(ve, p2(ve), label='trendline')
plt.show()
which also didn't look similar to the target plot.
I don't know how to continue. Thanks in advance!
example dataset:
vo2 = [1.673925 1.9015125 1.981775 2.112875 2.1112625 2.086375 2.13475
2.1777 2.176975 2.1857125 2.258925 2.2718375 2.3381 2.3330875
2.353725 2.4879625 2.448275 2.4829875 2.5084375 2.511275 2.5511
2.5678375 2.5844625 2.6101875 2.6457375 2.6602125 2.6939875 2.7210625
2.720475 2.767025 2.751375 2.7771875 2.776025 2.7319875 2.564
2.3977625 2.4459125 2.42965 2.401275 2.387175 2.3544375]
ve = [ 3.93125 7.1975 9.04375 14.06125 14.11875 13.24375
14.6625 15.3625 15.2 15.035 17.7625 17.955
19.2675 19.875 21.1575 22.9825 23.75625 23.30875
25.9925 25.6775 27.33875 27.7775 27.9625 29.35
31.86125 32.2425 33.7575 34.69125 36.20125 38.6325
39.4425 42.085 45.17 47.18 42.295 37.5125
38.84375 37.4775 34.20375 33.18 32.67708333]
OK, so you need to find the point, where slope of line changes. I tried 2nd derivative, but it was noisy and I coulnd't find the right spot.
Another way is to try all possible points, calculate left and right regression lines and find pair with best fit (r2 coeff). Give this code a try. It is not complete. I do not know, how to force regression lines to go through point in the middle. And it might be better to work with interpolated data, if there are not enough datapoints.
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
vo2 = [1.673925,1.9015125,1.981775,2.112875,2.1112625,2.086375,2.13475,2.1777,2.176975,2.1857125,2.258925,2.2718375,2.3381,2.3330875,2.353725,2.4879625,2.448275,2.4829875,2.5084375,2.511275,2.5511,2.5678375,2.5844625,2.6101875,2.6457375,2.6602125,2.6939875,2.7210625,2.720475,2.767025,2.751375,2.7771875,2.776025,2.7319875,2.564,2.3977625,2.4459125,2.42965,2.401275,2.387175,2.3544375]
ve = [ 3.93125,7.1975,9.04375,14.06125,14.11875,13.24375,14.6625,15.3625,15.2,15.035,17.7625,17.955,19.2675,19.875,21.1575,22.9825,23.75625,23.30875,25.9925,25.6775,27.33875,27.7775,27.9625,29.35,31.86125,32.2425,33.7575,34.69125,36.20125,38.6325,39.4425,42.085,45.17,47.18,42.295,37.5125,38.84375,37.4775,34.20375,33.18,32.67708333]
x = np.array(vo2)
y = np.array(ve)
sort_idx = x.argsort()
x = x[sort_idx]
y = y[sort_idx]
assert len(x) == len(y)
def fit(x,y):
p = np.polyfit(x, y, 1)
f = np.poly1d(p)
r2 = r2_score(y, f(x))
return p, f, r2
skip = 5 # minimal length of split data
r2 = [0] * len(x)
funcs = {}
for i in range(len(x)):
if i < skip or i > len(x) - skip:
continue
_, f_left, r2_left = fit(x[:i], y[:i])
_, f_right, r2_right = fit(x[i:], y[i:])
r2[i] = r2_left * r2_right
funcs[i] = (f_left, f_right)
split_ix = np.argmax(r2) # index of split
f_left,f_right = funcs[split_ix]
print(f"split point index: {split_ix}, x: {x[split_ix]}, y: {y[split_ix]}")
xd = np.linspace(min(x), max(x), 100)
plt.plot(x, y, "o")
plt.plot(xd, f_left(xd))
plt.plot(xd, f_right(xd))
plt.plot(x[split_ix], y[split_ix], "x")
plt.show()

How to loop through multiple sklearn classification models?

I'm trying to figure out how to feed my data set into several scikit classification models.
When I run the code I get the following error:
Traceback (most recent call last):
File "<ipython-input-515-9a3302837c99>", line 3, in <module>
X, y = dataset
ValueError: too many values to unpack (expected 2)
Here is my code.
X = np.asarray([np.asarray(df['LRMScore']),np.asarray(df['Spread'])]).T
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [X]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
My X variable consists of two columns of a dataframe, and it looks like this.
array([[ 8. , 0.06],
[ 8. , 0.06],
[ 8. , 0.06],
...,
[10. , 0.01],
[ 8. , 0.03],
[ 9.75, 0.06]])
These datasets consist of two arrays: X and Y.
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
My dataset consists of one array. That's the problem. I guess mys setup has to be done slightly differently, but I'm not sure how that would look.
I got the code from the link below.
https://scikit-learn.org/0.18/auto_examples/cluster/plot_cluster_comparison.html
Since your X array has two columns you need to transpose it in order to use value unpacking:
x, y = dataset.T
That did it! Thanks parsa. Here is my final working solution.
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
np.random.seed(0)
pd.set_option('display.max_columns', 500)
df = pd.read_csv('C:\\your_path_here\\test.csv')
print('done!')
df = df[:10000]
df = df.fillna(0)
df = df.dropna()
X = df[['RatingScore',
'Par',
'Term',
'TimeToMaturity',
'LRMScore',
'Coupon',
'Price']]
#select your target variable
y = df[['Spread']]
#train test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()

Example for NARX in python - both training and prediction

Is there any end-to-end example of how to train and predict/inference data using a NARX model in python?
There is the library PyNeurgen NARX PyNeurgen library
but the documentation for PyNeurgen isn't very complete.
This OP seems to have written a Keras implementation but, but the code is lacking an implementation for inferencing/prediction. NARX implementation using keras
I think you are looking for the equivalents of model.fit() & model.predict(), which are net.learn() and net.test() in pyneurgen. Based on this tutorial and this example, I formulated this demo (in python 2.7) to give you an idea on how it is done. I hope it helps.
import math
import numpy as np
import matplotlib.pylab as plt
from pyneurgen.neuralnet import NeuralNet
from pyneurgen.recurrent import NARXRecurrent
def plot_results(x, y, y_true):
plt.subplot(3, 1, 1)
plt.plot([i[1] for i in population])
plt.title("Population")
plt.grid(True)
plt.subplot(3, 1, 2)
plt.plot(x, y, 'bo', label='targets')
plt.plot(x, y_true, 'ro', label='actuals')
plt.grid(True)
plt.legend(loc='lower left', numpoints=1)
plt.title("Test Target Points vs Actual Points")
plt.subplot(3, 1, 3)
plt.plot(range(1, len(net.accum_mse) + 1, 1), net.accum_mse)
plt.xlabel('epochs')
plt.ylabel('mean squared error')
plt.grid(True)
plt.title("Mean Squared Error by Epoch")
plt.show()
def generate_data():
# all samples are drawn from this population
pop_len = 200
factor = 1.0 / float(pop_len)
population = [[i, math.sin(float(i) * factor * 10.0)] for i in range(pop_len)]
population_shuffle = population[:]
all_inputs = []
all_targets = []
np.random.shuffle(population_shuffle)
for position, target in population_shuffle:
all_inputs.append([position * factor])
all_targets.append([target])
# print(all_inputs[-1], all_targets[-1])
return population, all_inputs, all_targets
# generate data
population, all_inputs, all_targets = generate_data()
# NARXRecurrent
input_nodes, hidden_nodes, output_nodes = 1, 10, 1
output_order, incoming_weight_from_output = 3, .6
input_order, incoming_weight_from_input = 2, .4
# init neural network
net = NeuralNet()
net.init_layers(input_nodes, [hidden_nodes], output_nodes,
NARXRecurrent(output_order, incoming_weight_from_output,
input_order, incoming_weight_from_input))
net.randomize_network()
net.set_halt_on_extremes(True)
# set constrains and rates
net.set_random_constraint(.5)
net.set_learnrate(.1)
# set inputs and outputs
net.set_all_inputs(all_inputs)
net.set_all_targets(all_targets)
# set lengths
length = len(all_inputs)
learn_end_point = int(length * .8)
# set ranges
net.set_learn_range(0, learn_end_point)
net.set_test_range(learn_end_point + 1, length - 1)
# add activation to layer 1
net.layers[1].set_activation_type('tanh')
# fit data to model
net.learn(epochs=150, show_epoch_results=True, random_testing=False)
# define mean squared error
mse = net.test()
print "Testing mse = ", mse
# define data
x = [item[0][0] * 200.0 for item in net.get_test_data()]
y = [item[0][0] for item in net.test_targets_activations]
y_true = [item[1][0] for item in net.test_targets_activations]
# plot results
plot_results(x, y, y_true)
This results in the following plot:
For more examples, take a look at the following links:
Air_Quality_ppm_prediction/NARXImproved.ipynb
NARX-Prediction-Model/NARX_Neural_Network.ipynb
CS230project/NARX_PyNeurGen.ipynb
TimeSeriesNotes/narx.py

Extract decision boundary with scikit-learn linear SVM

I have a very simple 1D classification problem: a list of values [0, 0.5, 2] and their associated classes [0, 1, 2]. I would like to get the classification boundaries between those classes.
Adapting the iris example (for visualization purposes), getting rid of the non-linear models:
X = np.array([[x, 1] for x in [0, 0.5, 2]])
Y = np.array([1, 0, 2])
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, Y)
lin_svc = svm.LinearSVC(C=C).fit(X, Y)
Gives the following result:
LinearSVC is returning junk (why?), but the SVC with linear kernel is working okay. So I would like to get the boundaries values, that you can graphically guess: ~0.25 and ~1.25.
That's where I'm lost: svc.coef_ returns
array([[ 0.5 , 0. ],
[-1.33333333, 0. ],
[-1. , 0. ]])
while svc.intercept_ returns array([-0.125 , 1.66666667, 1. ]).
This is not explicit.
I must be missing something silly, how to obtain those values? They seem obvious to compute, that would be ridiculous to iterate over the x-axis to find the boundary...
I had the same question and eventually found the solution in the sklearn documentation.
Given the weights W=svc.coef_[0] and the intercept I=svc.intercept_ , the decision boundary is the line
y = a*x - b
with
a = -W[0]/W[1]
b = I[0]/W[1]
Exact boundary calculated from coef_ and intercept_
I think this is a great question and haven't been able to find a general answer to it anywhere in the documentation. This site really needs Latex, but anyway, I'll try to do my best without...
In general, a hyperplane is defined by its unit normal and an offset from the origin. So we hope to find some decision function of the form: x dot n + d > 0 (where the > may of course be replaced with >=).
In the case of the SVM Margins Example, we can manipulate the equation they start with to clarify its conceptual significance. First, let's establish the notational convenience of writing coef to represent coef_[0] and intercept to represent intercept_[0], since these arrays only have 1 value. Then some simple substitution yields the equation:
y + coef[0]*x/coef[1] + intercept/coef[1] = 0
Multiplying through by coef[1], we obtain
coef[1]*y + coef[0]*x + intercept = 0
And so we see that the coefficients and intercept function roughly as their names would imply. Applying one quick generalization of notation should make the answer clear - we will replace x and y with a single vector x.
coef[0]*x[0] + coef[1]*x[1] + intercept = 0
In general, the coef_ and intercept_ members of the svm classifier will have dimension matching the data set it was trained on, so we can extrapolate this equation to data of arbitrary dimension. And to avoid leading anyone astray, here is the final generalized decision boundary using the original variable names from the svm:
coef_[0][0]*x[0] + coef_[0][1]*x[1] + coef_[0][2]*x[2] + ... + coef_[0][n-1]*x[n-1] + intercept_[0] = 0
where the dimension of the data is n.
Or more tersely:
sum(coef_[0][i]*x[i]) + intercept_[0] = 0
where i sums over the range of the dimension of the input data.
Get decision line from SVM, demo 1
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.datasets import make_blobs
# we create 40 separable points
X, y = make_blobs(n_samples=40, centers=2, random_state=6)
# fit the model, don't regularize for illustration purposes
clf = svm.SVC(kernel='linear', C=1000)
clf.fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap=plt.cm.Paired)
# plot the decision function
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# create grid to evaluate model
xx = np.linspace(xlim[0], xlim[1], 30)
yy = np.linspace(ylim[0], ylim[1], 30)
YY, XX = np.meshgrid(yy, xx)
xy = np.vstack([XX.ravel(), YY.ravel()]).T
Z = clf.decision_function(xy).reshape(XX.shape)
# plot decision boundary and margins
ax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5,
linestyles=['--', '-', '--'])
# plot support vectors
ax.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=100,
linewidth=1, facecolors='none')
plt.show()
Prints:
Approximate the separating n-1 dimensional hyperplane of an SVM, Demo 2
import numpy as np
import mlpy
from sklearn import svm
from sklearn.svm import SVC
import matplotlib.pyplot as plt
np.random.seed(0)
mean1, cov1, n1 = [1, 5], [[1,1],[1,2]], 200 # 200 samples of class 1
x1 = np.random.multivariate_normal(mean1, cov1, n1)
y1 = np.ones(n1, dtype=np.int)
mean2, cov2, n2 = [2.5, 2.5], [[1,0],[0,1]], 300 # 300 samples of class -1
x2 = np.random.multivariate_normal(mean2, cov2, n2)
y2 = 0 * np.ones(n2, dtype=np.int)
X = np.concatenate((x1, x2), axis=0) # concatenate the 1 and -1 samples
y = np.concatenate((y1, y2))
clf = svm.SVC()
#fit the hyperplane between the clouds of data, should be fast as hell
clf.fit(X, y)
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
production_point = [1., 2.5]
answer = clf.predict([production_point])
print("Answer: " + str(answer))
plt.plot(x1[:,0], x1[:,1], 'ob', x2[:,0], x2[:,1], 'or', markersize = 5)
colormap = ['r', 'b']
color = colormap[answer[0]]
plt.plot(production_point[0], production_point[1], 'o' + str(color), markersize=20)
#I want to draw the decision lines
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xx = np.linspace(xlim[0], xlim[1], 30)
yy = np.linspace(ylim[0], ylim[1], 30)
YY, XX = np.meshgrid(yy, xx)
xy = np.vstack([XX.ravel(), YY.ravel()]).T
Z = clf.decision_function(xy).reshape(XX.shape)
ax.contour(XX, YY, Z, colors='k', levels=[-1, 0, 1], alpha=0.5,
linestyles=['--', '-', '--'])
plt.show()
Prints:
These hyperplanes are all straight as an arrow, they're just straight in higher dimensions and can't be comprehended by mere mortals confined to 3 dimensional space. These hyperplanes are cast into higher dimensions with the creative kernel functions, than flattened back into the visible dimension for your viewing pleasure. Here is a video trying to impart some intuition of what is going on in demo 2: https://www.youtube.com/watch?v=3liCbRZPrZA

Categories

Resources