I try to visualise an SVM with code from https://scikit-learn.org/stable/auto_examples/svm/plot_iris_svc.html and want to be able to specify the colours for each class. For that I create a custom colormap using LinearSegmentedColormap.from_list. This method works as intended for 6 classes or fewer, but for >6 classes, the colours of the contour-plot are often wrong.
How can I specify the colours for >6 classes?
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from sklearn import svm, preprocessing
# create dummy data
use = (
((1,9),(.2,.1),'b'),
((4,0),(.2,.4),'r'),
((1,5),(.2,.3),'g'),
((6,3),(.3,.2),'m'),
((5,6),(.1,.2),'c'),
((4,2),(.1,.1),'xkcd:orange'),
((5,4),(.3,.2),'xkcd:peach'),
((3,1),(.1,.4),'xkcd:bright pink'),
((2,1),(.2,.1),'xkcd:crimson'),
)
sx = [np.random.normal(loc=u[0][0], scale=u[1][0], size=(20,)) for u in use]
sy = [np.random.normal(loc=u[0][1], scale=u[1][1], size=(20,)) for u in use]
X = np.array([[ix[i], iy[i]] for ix, iy in zip(sx, sy) for i in range(20)])
y = np.array([i for i in range(len(use)) for n in range(20)])
# scale the data
Scaler = preprocessing.StandardScaler().fit(X)
X = Scaler.transform(X)
# color map
cm = LinearSegmentedColormap.from_list('use', [u[2] for u in use], N=len(use))
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z/Z.max()
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
# we create an instance of SVM and fit out data.
C = 1.0 # SVM regularization parameter
models = (svm.SVC(kernel='linear', C=C, decision_function_shape='ovo'),
svm.SVC(kernel='sigmoid', C=C, decision_function_shape='ovo'),
svm.SVC(kernel='rbf', gamma=0.7, C=C, decision_function_shape='ovo'),
svm.SVC(kernel='poly', degree=3, gamma='auto', C=C, decision_function_shape='ovo'))
models = (clf.fit(X, y) for clf in models)
# title for the plots
titles = ('SVC with linear kernel',
'SVC with sigmoid kernel',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel')
# Set-up 2x2 grid for plotting.
fig, sub = plt.subplots(2, 2)
# plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
for clf, title, ax in zip(models, titles, sub.flatten()):
plot_contours(ax, clf, xx, yy, cmap=cm, alpha=0.7)
ax.scatter(X0, X1, c=y, cmap=cm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
One has to specify the levels as a list-like object if one wants to enforce colours like this. The list of levels should contain $n+1$ entries with the borders of the classes, where $n$ is the number of classes. So with the classes equalling range(len(use)) this should be [i - .5 for i in range(len(use) + 1)], hence one can use the following to get the desired output:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from sklearn import svm, preprocessing
# create dummy data
use = (
((1,9),(.2,.1),'b'),
((4,0),(.2,.4),'r'),
((1,5),(.2,.3),'g'),
((6,3),(.3,.2),'m'),
((5,6),(.1,.2),'c'),
((4,2),(.1,.1),'xkcd:orange'),
((5,4),(.3,.2),'xkcd:peach'),
((3,1),(.1,.4),'xkcd:bright pink'),
((2,1),(.2,.1),'xkcd:crimson'),
)
sx = [np.random.normal(loc=u[0][0], scale=u[1][0], size=(20,)) for u in use]
sy = [np.random.normal(loc=u[0][1], scale=u[1][1], size=(20,)) for u in use]
X = np.array([[ix[i], iy[i]] for ix, iy in zip(sx, sy) for i in range(20)])
y = np.array([i for i in range(len(use)) for n in range(20)])
# scale the data
Scaler = preprocessing.StandardScaler().fit(X)
X = Scaler.transform(X)
# color map
cm = LinearSegmentedColormap.from_list('use', [u[2] for u in use], N=len(use))
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
# we create an instance of SVM and fit out data.
C = 1.0 # SVM regularization parameter
models = (svm.SVC(kernel='linear', C=C, decision_function_shape='ovo'),
svm.SVC(kernel='sigmoid', C=C, decision_function_shape='ovo'),
svm.SVC(kernel='rbf', gamma=0.7, C=C, decision_function_shape='ovo'),
svm.SVC(kernel='poly', degree=3, gamma='auto', C=C, decision_function_shape='ovo'))
models = (clf.fit(X, y) for clf in models)
# title for the plots
titles = ('SVC with linear kernel',
'SVC with sigmoid kernel',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel')
# Set-up 2x2 grid for plotting.
fig, sub = plt.subplots(2, 2)
# plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
for clf, title, ax in zip(models, titles, sub.flatten()):
plot_contours(ax, clf, xx, yy,
cmap=cm, alpha=0.7,
levels=[i - .5 for i in range(len(use) + 1)])
ax.scatter(X0, X1, c=y, cmap=cm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
Related
I am working with scikit-learn's breast cancer dataset, consisting of 30 features.
Following this tutorial for the much less depressing iris dataset, I figured how to plot the decision surface separating the "benign" and "malignant" categories, when considering the dataset's first two features (mean radius and mean texture).
This is what I get:
But how to represent the hyperplane computed when using all features in the dataset?
I am aware that I cannot plot a graph in 30 dimensions, but I would like to "project" the hyperplane created when running svm.SVC(kernel='linear', C=1).fit(X_train, y_train) onto the 2D scatter plot showing mean texture against mean radius.
I read about using PCA to reduce dimensionality, but I suspect that fitting a "reduced" dataset is not the same as projecting the hyperplane computed over all 30 features onto a 2D plot.
Here is my code so far:
from sklearn import datasets
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import svm
import numpy as np
#Load dataset
cancer = datasets.load_breast_cancer()
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, test_size=0.3,random_state=109) # 70% training and 30% test
h = .02 # mesh step
C = 1.0 # Regularisation
clf = svm.SVC(kernel='linear', C=C).fit(X_train[:,:2], y_train) # Linear Kernel
x_min, x_max = X_train[:, 0].min() - 1, X_train[:, 0].max() + 1
y_min, y_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8)
scat=plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train)
legend1 = plt.legend(*scat.legend_elements(),
loc="upper right", title="diagnostic")
plt.xlabel('mean_radius')
plt.ylabel('mean_texture')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.show()
You cannot visualize the decision surface for a lot of features. This is because the dimensions will be too many and there is no way to visualize an N-dimensional surface.
I have also written an article about this here:
https://towardsdatascience.com/support-vector-machines-svm-clearly-explained-a-python-tutorial-for-classification-problems-29c539f3ad8?source=friends_link&sk=80f72ab272550d76a0cc3730d7c8af35
However, you can use 2 features and plot nice decision surfaces as follows.
Case 1: 2D plot for 2 features and using the iris dataset
from sklearn.svm import SVC
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
def make_meshgrid(x, y, h=.02):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
model = svm.SVC(kernel='linear')
clf = model.fit(X, y)
fig, ax = plt.subplots()
# title for the plots
title = ('Decision surface of linear SVC ')
# Set-up grid for plotting.
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
plot_contours(ax, clf, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_ylabel('y label here')
ax.set_xlabel('x label here')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
ax.legend()
plt.show()
Case 2: 3D plot for 3 features and using the iris dataset
from sklearn.svm import SVC
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from mpl_toolkits.mplot3d import Axes3D
iris = datasets.load_iris()
X = iris.data[:, :3] # we only take the first three features.
Y = iris.target
#make it binary classification problem
X = X[np.logical_or(Y==0,Y==1)]
Y = Y[np.logical_or(Y==0,Y==1)]
model = svm.SVC(kernel='linear')
clf = model.fit(X, Y)
# The equation of the separating plane is given by all x so that np.dot(svc.coef_[0], x) + b = 0.
# Solve for w3 (z)
z = lambda x,y: (-clf.intercept_[0]-clf.coef_[0][0]*x -clf.coef_[0][1]*y) / clf.coef_[0][2]
tmp = np.linspace(-5,5,30)
x,y = np.meshgrid(tmp,tmp)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot3D(X[Y==0,0], X[Y==0,1], X[Y==0,2],'ob')
ax.plot3D(X[Y==1,0], X[Y==1,1], X[Y==1,2],'sr')
ax.plot_surface(x, y, z(x,y))
ax.view_init(30, 60)
plt.show()
You can't plot the 30-dim data without any transformation to 2-d.
https://github.com/tmadl/highdimensional-decision-boundary-plot
What is a Voronoi Tessellation?
Given a set P := {p1, ..., pn} of sites, a Voronoi Tessellation is a subdivision of the space into n cells, one for each site in P, with the property that a point q lies in the cell corresponding to a site pi iff d(pi, q) < d(pj, q) for i distinct from j. The segments in a Voronoi Tessellation correspond to all points in the plane equidistant to the two nearest sites. Voronoi Tessellations have applications in computer science. - https://philogb.github.io/blog/2010/02/12/voronoi-tessellation/
In geometry, a centroidal Voronoi tessellation (CVT) is a special type of Voronoi tessellation or Voronoi diagram. A Voronoi tessellation is called centroidal when the generating point of each Voronoi cell is also its centroid, i.e. the arithmetic mean or center of mass. It can be viewed as an optimal partition corresponding to an optimal distribution of generators. A number of algorithms can be used to generate centroidal Voronoi tessellations, including Lloyd's algorithm for K-means clustering or Quasi-Newton methods like BFGS. - Wiki
import numpy as np, matplotlib.pyplot as plt
from sklearn.neighbors.classification import KNeighborsClassifier
from sklearn.datasets.base import load_breast_cancer
from sklearn.manifold.t_sne import TSNE
from sklearn import svm
bcd = load_breast_cancer()
X,y = bcd.data, bcd.target
X_Train_embedded = TSNE(n_components=2).fit_transform(X)
print(X_Train_embedded.shape)
h = .02 # mesh step
C = 1.0 # Regularisation
clf = svm.SVC(kernel='linear', C=C) # Linear Kernel
clf = clf.fit(X,y)
y_predicted = clf.predict(X)
resolution = 100 # 100x100 background pixels
X2d_xmin, X2d_xmax = np.min(X_Train_embedded[:,0]), np.max(X_Train_embedded[:,0])
X2d_ymin, X2d_ymax = np.min(X_Train_embedded[:,1]), np.max(X_Train_embedded[:,1])
xx, yy = np.meshgrid(np.linspace(X2d_xmin, X2d_xmax, resolution), np.linspace(X2d_ymin, X2d_ymax, resolution))
# approximate Voronoi tesselation on resolution x resolution grid using 1-NN
background_model = KNeighborsClassifier(n_neighbors=1).fit(X_Train_embedded, y_predicted)
voronoiBackground = background_model.predict(np.c_[xx.ravel(), yy.ravel()])
voronoiBackground = voronoiBackground.reshape((resolution, resolution))
#plot
plt.contourf(xx, yy, voronoiBackground)
plt.scatter(X_Train_embedded[:,0], X_Train_embedded[:,1], c=y)
plt.show()
From my wine-dataset, I am trying to plot a decision boundary between 2 columns which is described by the snippet:
X0, X1 = X[:, 10], Y
I have taken the following code from scikit svm plot tutorial and modified to replace with my variable names/index. However when I run the following code, I get an error saying:
ValueError: X.shape[1] = 2 should be equal to 11, the number of features at training time
with error stack as:
Traceback (most recent call last):
File "test-wine.py", line 120, in <module>
cmap=plt.cm.coolwarm, alpha=0.8)
File "test-wine.py", line 96, in plot_contours
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
File "/home/suhail/anaconda3/envs/ml/lib/python3.5/site-packages/sklearn/svm/base.py", line 548, in predict
y = super(BaseSVC, self).predict(X)
File "/home/suhail/anaconda3/envs/ml/lib/python3.5/site-packages/sklearn/svm/base.py", line 308, in predict
X = self._validate_for_predict(X)
File "/home/suhail/anaconda3/envs/ml/lib/python3.5/site-packages/sklearn/svm/base.py", line 459, in _validate_for_predict
(n_features, self.shape_fit_[1]))
ValueError: X.shape[1] = 2 should be equal to 11, the number of features at training time
I cannot understand the reason for the above error. Here is the code that I have modified.
import pandas as pd
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import numpy as np
data = pd.read_csv('winequality-red.csv').values
x_data_shape = data.shape[0]
y_data_shape = data.shape[1]
X = data[:, 0:y_data_shape-1]
Y = data[:, y_data_shape-1]
############### PLOT DECISION BOUNDARY SVM #############
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
C = 1.0 # SVM regularization parameter
models = (SVC(kernel='linear', C=C),
SVC(kernel='rbf', gamma=0.7, C=C),
SVC(kernel='poly', degree=3, C=C))
models = (clf.fit(X, Y) for clf in models)
titles = ('SVC with linear kernel',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel')
fig, sub = plt.subplots(2, 2)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 10], Y
xx, yy = make_meshgrid(X0, X1)
for clf, title, ax in zip(models, titles, sub.flatten()):
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=Y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel('Alcohol Content')
ax.set_ylabel('Quality')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
What could be the reason for this error?
You trained the classifiers with all 11 features,
but you provide only 2 features for the evaluation of the classifier which happens when Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) is called from within the plot_contours method.
To evaluate a classifier trained with 11 features, you need to provide all 11 features. This is what your error message indicates.
So in order to make the snippet work for you, you should limit yourself to two features (otherwise plotting two-dimensional decision boundaries does not make sense anyway), e.g by using
X = data[:, :2]
Y = data[:, y_data_shape-1]
when reading your data.
Note that the example you referred to also uses only two features:
# import some data to play with
iris = datasets.load_iris()
# Take the first two features. We could avoid this by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
Using SVM with sklearn library, I would like to plot the data with each labels representing its color. I don't want to color the points but filling area with colors.
I have now :
d_pred, d_train_std, d_test_std, l_train, l_test
d_pred are the labels predicted.
I would plot d_pred with d_train_std with shape : (70000,2) where X-axis are the first column and Y-Axis the second column.
Thank you.
You cannot visualize the decision surface for a lot of features. This is because the dimensions will be too many and there is no way to visualize an N-dimensional surface.
However, you can use 2 features and plot nice decision surfaces as follows.
I have also written an article about this here:
https://towardsdatascience.com/support-vector-machines-svm-clearly-explained-a-python-tutorial-for-classification-problems-29c539f3ad8?source=friends_link&sk=80f72ab272550d76a0cc3730d7c8af35
Case 1: 2D plot for 2 features and using the iris dataset
from sklearn.svm import SVC
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
def make_meshgrid(x, y, h=.02):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
model = svm.SVC(kernel='linear')
clf = model.fit(X, y)
fig, ax = plt.subplots()
# title for the plots
title = ('Decision surface of linear SVC ')
# Set-up grid for plotting.
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
plot_contours(ax, clf, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_ylabel('y label here')
ax.set_xlabel('x label here')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
ax.legend()
plt.show()
Case 2: 3D plot for 3 features and using the iris dataset
from sklearn.svm import SVC
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from mpl_toolkits.mplot3d import Axes3D
iris = datasets.load_iris()
X = iris.data[:, :3] # we only take the first three features.
Y = iris.target
#make it binary classification problem
X = X[np.logical_or(Y==0,Y==1)]
Y = Y[np.logical_or(Y==0,Y==1)]
model = svm.SVC(kernel='linear')
clf = model.fit(X, Y)
# The equation of the separating plane is given by all x so that np.dot(svc.coef_[0], x) + b = 0.
# Solve for w3 (z)
z = lambda x,y: (-clf.intercept_[0]-clf.coef_[0][0]*x -clf.coef_[0][1]*y) / clf.coef_[0][2]
tmp = np.linspace(-5,5,30)
x,y = np.meshgrid(tmp,tmp)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot3D(X[Y==0,0], X[Y==0,1], X[Y==0,2],'ob')
ax.plot3D(X[Y==1,0], X[Y==1,1], X[Y==1,2],'sr')
ax.plot_surface(x, y, z(x,y))
ax.view_init(30, 60)
plt.show()
It can be difficult to get the function in 3D. An easy way to get a visualization is to get a large amount of points that cover your point space and run them through your learned function (my_model.predict), keep the points that hit inside the function, and visualize them. The more you add the more defined the boundary will be.
Here's my code that does what #Christian Tuchez describes:
outputs = my_clf.predict(1_test)
hits = []
for i in range(outputs.size):
if outputs[i] == 1:
hits.append(i) # save the index where it's 1
This saves the index of all the points that hit in the function (saved in the "hits" list). You can probably accomplish this without a loop, I just found it easiest for me.
Then to display just those points, you'd write something like this:
ax.scatter(1_test[hits[:], 0], 1_test[hits[:], 1], 1_test[hits[:], 2], c="cyan", s=2, edgecolor=None)
I am currently performing multi class SVM with linear kernel using python's scikit library.
The sample training data and testing data are as given below:
Model data:
x = [[20,32,45,33,32,44,0],[23,32,45,12,32,66,11],[16,32,45,12,32,44,23],[120,2,55,62,82,14,81],[30,222,115,12,42,64,91],[220,12,55,222,82,14,181],[30,222,315,12,222,64,111]]
y = [0,0,0,1,1,2,2]
I want to plot the decision boundary and visualize the datasets. Can someone please help to plot this type of data.
The data given above is just mock data so feel free to change the values.
It would be helpful if at least if you could suggest the steps that are to followed.
Thanks in advance
You have to choose only 2 features to do this. The reason is that you cannot plot a 7D plot. After selecting the 2 features use only these for the visualization of the decision surface.
(I have also written an article about this here: https://towardsdatascience.com/support-vector-machines-svm-clearly-explained-a-python-tutorial-for-classification-problems-29c539f3ad8?source=friends_link&sk=80f72ab272550d76a0cc3730d7c8af35)
Now, the next question that you would ask: How can I choose these 2 features?. Well, there are a lot of ways. You could do a univariate F-value (feature ranking) test and see what features/variables are the most important. Then you could use these for the plot. Also, we could reduce the dimensionality from 7 to 2 using PCA for example.
2D plot for 2 features and using the iris dataset
from sklearn.svm import SVC
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
iris = datasets.load_iris()
# Select 2 features / variable for the 2D plot that we are going to create.
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
def make_meshgrid(x, y, h=.02):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
model = svm.SVC(kernel='linear')
clf = model.fit(X, y)
fig, ax = plt.subplots()
# title for the plots
title = ('Decision surface of linear SVC ')
# Set-up grid for plotting.
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
plot_contours(ax, clf, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_ylabel('y label here')
ax.set_xlabel('x label here')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
ax.legend()
plt.show()
EDIT: Apply PCA to reduce dimensionality.
from sklearn.svm import SVC
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.decomposition import PCA
iris = datasets.load_iris()
X = iris.data
y = iris.target
pca = PCA(n_components=2)
Xreduced = pca.fit_transform(X)
def make_meshgrid(x, y, h=.02):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
model = svm.SVC(kernel='linear')
clf = model.fit(Xreduced, y)
fig, ax = plt.subplots()
# title for the plots
title = ('Decision surface of linear SVC ')
# Set-up grid for plotting.
X0, X1 = Xreduced[:, 0], Xreduced[:, 1]
xx, yy = make_meshgrid(X0, X1)
plot_contours(ax, clf, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_ylabel('PC2')
ax.set_xlabel('PC1')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title('Decison surface using the PCA transformed/projected features')
ax.legend()
plt.show()
EDIT 1 (April 15th, 2020):
Case: 3D plot for 3 features and using the iris dataset
from sklearn.svm import SVC
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from mpl_toolkits.mplot3d import Axes3D
iris = datasets.load_iris()
X = iris.data[:, :3] # we only take the first three features.
Y = iris.target
#make it binary classification problem
X = X[np.logical_or(Y==0,Y==1)]
Y = Y[np.logical_or(Y==0,Y==1)]
model = svm.SVC(kernel='linear')
clf = model.fit(X, Y)
# The equation of the separating plane is given by all x so that np.dot(svc.coef_[0], x) + b = 0.
# Solve for w3 (z)
z = lambda x,y: (-clf.intercept_[0]-clf.coef_[0][0]*x -clf.coef_[0][1]*y) / clf.coef_[0][2]
tmp = np.linspace(-5,5,30)
x,y = np.meshgrid(tmp,tmp)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot3D(X[Y==0,0], X[Y==0,1], X[Y==0,2],'ob')
ax.plot3D(X[Y==1,0], X[Y==1,1], X[Y==1,2],'sr')
ax.plot_surface(x, y, z(x,y))
ax.view_init(30, 60)
plt.show()
You can use mlxtend. It's quite clean.
First do a pip install mlxtend, and then:
from sklearn.svm import SVC
import matplotlib.pyplot as plt
from mlxtend.plotting import plot_decision_regions
svm = SVC(C=0.5, kernel='linear')
svm.fit(X, y)
plot_decision_regions(X, y, clf=svm, legend=2)
plt.show()
Where X is a two-dimensional data matrix, and y is the associated vector of training labels.
I tried to plot SVM classifiers in the Iris dataset the starting code can be found here. I extended a pandas DataFrame with four extra column that I want to plot in the same manner.
I made the four extra columns with the code :
iris = iris.assign(SepalRatio = iris['SepalLengthCm'] / iris['SepalWidthCm']).assign(PetalRatio = iris['PetalLengthCm'] / iris['PetalWidthCm']).assign(SepalMultiplied = iris['SepalLengthCm'] * iris['SepalWidthCm']).assign(PetalMultiplied = iris['PetalLengthCm'] * iris['PetalWidthCm'])
I also made an extra specieID colunm :
d = {"Iris-setosa" : 0, "Iris-versicolor": 1, "Iris-virginica": 2}
iris['SpecieID'] = iris['Species'].map(d).fillna(-1)
Then, I extracted some columns to plot from the DataFrame but after plotting I get the error :
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-49-9724675f32fa> in <module>()
77 xx, yy = make_meshgrid(X0, X1)
78
---> 79 for clf, title, ax in zip(models, titles, sub.flatten()):
80 plot_contours(ax, clf, xx, yy,
81 cmap=plt.cm.coolwarm, alpha=0.8)
<ipython-input-49-9724675f32fa> in <genexpr>(.0)
62 svm.SVC(kernel='rbf', gamma=0.7, C=C),
63 svm.SVC(kernel='poly', degree=3, C=C))
---> 64 models = (clf.fit(X, y) for clf in models)
65
66 # title for the plots
C:\Users\masc\AppData\Local\Continuum\Anaconda3\lib\site-packages\sklearn\svm\base.py in fit(self, X, y, sample_weight)
150
151 X, y = check_X_y(X, y, dtype=np.float64, order='C', accept_sparse='csr')
--> 152 y = self._validate_targets(y)
153
154 sample_weight = np.asarray([]
C:\Users\masc\AppData\Local\Continuum\Anaconda3\lib\site-packages\sklearn\svm\base.py in _validate_targets(self, y)
518 def _validate_targets(self, y):
519 y_ = column_or_1d(y, warn=True)
--> 520 check_classification_targets(y)
521 cls, y = np.unique(y_, return_inverse=True)
522 self.class_weight_ = compute_class_weight(self.class_weight, cls, y_)
C:\Users\masc\AppData\Local\Continuum\Anaconda3\lib\site-packages\sklearn\utils\multiclass.py in check_classification_targets(y)
170 if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
171 'multilabel-indicator', 'multilabel-sequences']:
--> 172 raise ValueError("Unknown label type: %r" % y_type)
173
174
ValueError: Unknown label type: 'unknown'
My modified code is :
from sklearn import svm
iris = pd.read_csv("Iris.csv") # the iris dataset is now a Pandas DataFrame
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
# import some data to play with
#iris = datasets.load_iris()
iris_numpy_array = iris.as_matrix(columns=None)
print (iris_numpy_array)
# Take the first two features. We could avoid this by using a two-dim dataset
X = iris_numpy_array[:, [1, 2]]
print (X)
y = iris_numpy_array[:, [10]]
y = y.ravel()
print (y)
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
models = (svm.SVC(kernel='linear', C=C),
svm.LinearSVC(C=C),
svm.SVC(kernel='rbf', gamma=0.7, C=C),
svm.SVC(kernel='poly', degree=3, C=C))
models = (clf.fit(X, y) for clf in models)
# title for the plots
titles = ('SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel')
# Set-up 2x2 grid for plotting.
fig, sub = plt.subplots(2, 2)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
for clf, title, ax in zip(models, titles, sub.flatten()):
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel('Sepal length')
ax.set_ylabel('Sepal width')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
The content of X and Y are the same in the code example as in my code but the only difference is that the're extracted from the pandas DataFrame.
The original code is:
print(__doc__)
iris = pd.read_csv("Iris.csv") # the iris dataset is now a Pandas DataFrame
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
# import some data to play with
iris = datasets.load_iris()
# Take the first two features. We could avoid this by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
models = (svm.SVC(kernel='linear', C=C),
svm.LinearSVC(C=C),
svm.SVC(kernel='rbf', gamma=0.7, C=C),
svm.SVC(kernel='poly', degree=3, C=C))
models = (clf.fit(X, y) for clf in models)
# title for the plots
titles = ('SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel')
# Set-up 2x2 grid for plotting.
fig, sub = plt.subplots(2, 2)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
for clf, title, ax in zip(models, titles, sub.flatten()):
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel('Sepal length')
ax.set_ylabel('Sepal width')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
I solved the problem by using another template that also makes a SVM plot :
from sklearn import svm
from mlxtend.plotting import plot_decision_regions
X = iris[['SepalLengthCm', 'SepalWidthCm']]
y = iris['SpecieID']
clf = svm.SVC(decision_function_shape = 'ovo')
clf.fit(X.values, y.values)
# Plot Decision Region using mlxtend's awesome plotting function
plot_decision_regions(X=X.values,
y=y.values,
clf=clf,
legend=2)
# Update plot object with X/Y axis labels and Figure Title
plt.xlabel(X.columns[0], size=14)
plt.ylabel(X.columns[1], size=14)
plt.title('SVM Decision Region Boundary', size=16)
This code gives the plot :
I also tried to do this, and finally (after lot of hours 😅) here is my code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
iris = load_iris()
# show data
# print(iris)
# show data columns
# print(iris['feature_names'])
# ========================================
# create dataframe
df = pd.DataFrame(iris['data'], columns=iris['feature_names'])
# print(df.head())
# tambahkan kolom 'target' => mencerminkan spesies
df['target'] = iris['target']
# print(df.head())
# target mencerminkan nama spesies di target_names
# print(iris['target_names'])
# tambahkan daftar spesies ke dataframe
df['spesies'] = df['target'].apply(lambda x: iris['target_names'][x])
# print(df)
# ========================================
# pisahkan df untuk setiap spesies
df0 = df[df['target'] == 0] # setosa
df1 = df[df['target'] == 1] # versicolor
df2 = df[df['target'] == 2] # virginica
print(df0.head())
print(df1.head())
print(df2.head())
# =======================================
# plot data
fig = plt.figure('Iris Data', figsize=(14,7))
# plot data sepal length vs sepal width
plt.subplot(121)
plt.scatter(df0['sepal length (cm)'], df0['sepal width (cm)'], color='r', marker ='o')
plt.scatter(df1['sepal length (cm)'], df1['sepal width (cm)'], color='y', marker ='o')
plt.scatter(df2['sepal length (cm)'], df2['sepal width (cm)'], color='b', marker ='o')
plt.xlabel('sepal length (cm)')
plt.ylabel('sepal width (cm)')
plt.title('Sepal width vs sepal length')
plt.legend(['0 Setosa', '1 Versicolor', '2 Virginica'])
plt.grid(True)
# plot data petal length vs petal width
plt.subplot(122)
plt.scatter(df0['petal length (cm)'], df0['petal width (cm)'], color='r', marker ='o')
plt.scatter(df1['petal length (cm)'], df1['petal width (cm)'], color='y', marker ='o')
plt.scatter(df2['petal length (cm)'], df2['petal width (cm)'], color='b', marker ='o')
plt.xlabel('petal length (cm)')
plt.ylabel('petal width (cm)')
plt.title('Petal width vs petal length')
plt.legend(['0 Setosa', '1 Versicolor', '2 Virginica'])
plt.grid(True)
plt.show()
# ===========================================
# split dataset into test & train
from sklearn.model_selection import train_test_split
x = df.drop(['target', 'spesies'], axis='columns') # data utama
y = df['target'] # data target
# train dataset dg test = 0.2
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.2)
print(len(x_train)) # 120 = 80%
print(len(x_test)) # 30 = 20% (hasil dari test_size = .2)
# ============================================
# svm
from sklearn.svm import SVC
# model = SVC()
model = SVC(gamma='auto') # avoid warning
# train data
model.fit(x_train, y_train)
# akurasi
print(model.score(x_test, y_test))
# ===========================================
# prediksi
print(model.predict([[5.1, 3.5, 1.4, 0.2]])) # output = [0] = spesies setosa
print(model.predict([[7.0, 3.2, 4.7, 1.4]])) # output = [1] = spesies verticolor
print(model.predict([[5.9, 3.0, 5.1, 1.8]])) # output = [2] = spesies virginica
# ===========================================
# plot svm
def make_meshgrid(x, y, h=.02):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(
np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h)
)
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
iris = load_iris()
X = iris['data'][:, :2]
print(X)
y = iris['target']
print(y)
C = 1.0 # SVM regularization parameter
model = SVC(gamma = 'auto')
model = model.fit(X, y)
fig = plt.figure()
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
ax = plt.subplot()
plot_contours(ax, model, xx, yy, cmap='coolwarm', alpha=0.8)
ax.scatter(X0, X1, c=y, cmap='coolwarm', s=50, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel('Sepal length (cm)')
ax.set_ylabel('Sepal width (cm)')
ax.set_title('Support Vector Machine')
plt.show()
And here is the final result: