I am trying to use this example from the sklearn documentation. I am not really sure what the code is doing and although I assume I am inputting my dataset the wrong way, I recently obtained this error:
<ipython-input-26-3c3c0763766b> in <module>()
49 for ds in datasets:
50 # preprocess dataset, split into training and test part
---> 51 X, y = ds
52 X = StandardScaler().fit_transform(X)
53 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
ValueError: too many values to unpack
Any ideas as to how I can modify the code to work with my dataset (which is a multidimensional numpy array from a pandas dataframe) and fix the error?
dataURL = "peridotites_clean_complete.csv"
pd_data = pd.read_csv(dataURL)
rock_names = pd_data['ROCK NAME']
rock_compositions = pd_data.columns[1:]
rock_data = np.vstack([pd_data[x] for x in rock_compositions])
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [rock_data]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
The thing is ds is a list with more than two values like the one shown below:
>>> ds=['rockatr1','rockatr2','rockatr','rocktype']
>>> X,y=ds
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: too many values to unpack
You have to specify which part is X and which is y as shown below. Usually in classification data the last column is used as label, that is what I assumed here.
>>> X,y=ds[:-1],ds[-1]
>>> X
['rockatr1', 'rockatr2', 'rockatr']
>>> y
'rocktype'
Related
I am trying to make a visualization of different kernels effect of a support vector machine over a data-set but I have a problem reshaping an array size:
The dataset I am using is the following:
https://archive.ics.uci.edu/ml/datasets/human+activity+recognition+using+smartphones
df_train_X = pd.read_csv("X_train.txt", header=None, delim_whitespace=True)
df_train_y = pd.read_csv("y_train.txt", header=None, delim_whitespace=True)
X_train = df_train_X.values
y_train = df_train_y[0]
X = X_train
y = y_train
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:int(.9 * n_sample)]
y_train = y[:int(.9 * n_sample)]
X_test = X[int(.9 * n_sample):]
y_test = y[int(.9 * n_sample):]
# fit the model
for kernel in ('linear', 'rbf', 'poly'):
clf = SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure()
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired,
edgecolor='k', s=20)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none',
zorder=10, edgecolor='k')
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'],
linestyles=['--', '-', '--'], levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
The problem is related to Z = Z.reshape(XX.shape) since I get the following error that blocks the code execution: cannot reshape array of size 240000 into shape (200,200)
But I don't know how to solve it to perform the visualization.
I'm trying to plot the decision boundary of the SVM classifier using a precomputed Laplace kernel (code below) on the similar lines of this scikit-learn post. I'm taking test points as mesh grid values (xx, yy) just like as mentioned in the post and train points as X and y. I'm able to fit the pre-computed kernel using train points.
import numpy as np
#from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.svm import SVC
from sklearn.metrics.pairwise import laplacian_kernel
#Load the iris data
iris_data = load_iris()
#Split the data and target
X = iris_data.data[:, :2]
y = iris_data.target
#Step size in mesh plot
h = 0.02
#Convert X and y to a numpy array
X = np.array(X)
y = np.array(y)
#Using Laplacian kernel - https://scikit-learn.org/stable/modules/metrics.html#laplacian-kernel
K = np.array(laplacian_kernel(X, gamma=.5))
svm = SVC(kernel='precomputed').fit(K, np.ravel(y))
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
#plt.subplot(2, 2, i + 1)
#plt.subplots_adjust(wspace=0.4, hspace=0.4)
# Calculate the gram matrix for test points. Here is where the error is coming. xx- test, X-train.
K_test = np.array(laplacian_kernel(xx, X, gamma=.5))
#Predict using the gram matrix for test
Z = svm.predict(np.c_[K_test])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.coolwarm)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title('SVC with Laplace kernel')
plt.show()
However, when I try to plot the decision boundary on graph for grid points, I get the below error.
Traceback (most recent call last):
File "/home/user/Src/laplce.py", line 37, in <module>
K_test = np.array(laplacian_kernel(xx, X, gamma=.5))
File "/home/user/.local/lib/python3.9/site-packages/sklearn/metrics/pairwise.py", line 1136, in laplacian_kernel
X, Y = check_pairwise_arrays(X, Y)
File "/home/user/.local/lib/python3.9/site-packages/sklearn/utils/validation.py", line 63, in inner_f
return f(*args, **kwargs)
File "/home/user/.local/lib/python3.9/site-packages/sklearn/metrics/pairwise.py", line 160, in check_pairwise_arrays
raise ValueError("Incompatible dimension for X and Y matrices: "
ValueError: Incompatible dimension for X and Y matrices: X.shape[1] == 280 while Y.shape[1] == 2
So, how do I resolve the error and plot the decision boundary for iris data ? Thanks in advance
The issue is getting your meshgrid into the same dimensions as the training matrix, before applying the laplacian. So if we run the code below to fit the svm :
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.svm import SVC
from sklearn.metrics.pairwise import laplacian_kernel
iris_data = load_iris()
X = iris_data.data[:, :2]
y = iris_data.target
h = 0.02
K = laplacian_kernel(X,gamma=.5)
svm = SVC(kernel='precomputed').fit(K, y)
Create the grid like you did:
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
x_test = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
xx,yy = np.meshgrid(np.arange(x_min, x_max, h),np.arange(y_min, y_max, h))
Your original input into the laplacian was (150,2) so you need to basically put your xx,yy into 2 columns:
x_test = np.vstack([xx.ravel(),yy.ravel()]).T
K_test = laplacian_kernel(x_test, X, gamma=.5)
Z = svm.predict(K_test)
Z = Z.reshape(xx.shape)
Then plot:
plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.coolwarm)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
The points are more or less correct, you can see it does not resolve 1,2 very well:
pd.crosstab(y,svm.predict(K))
col_0 0 1 2
row_0
0 49 1 0
1 0 35 15
2 0 11 39
Sorry if this is a very simple question. But I'm a newcomer to the field.
My specific question is this: I have trained an XGboost classifier in Python. After the training, how can I get the samples in my training data that are closer than a fixed value to the decision boundary of the model?
Thanks
I don't think if xgboost has a built-in method for that or if there is a mathematical formula for that like for SVC. This visualization could help though for 2D feature spaces:
import xgboost as xgb
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
alpha=0.8, c=cmap(idx),
marker=markers[idx], label=cl)
# highlight test samples
if test_idx:
# plot all samples
if not versiontuple(np.__version__) >= versiontuple('1.9.0'):
X_test, y_test = X[list(test_idx), :], y[list(test_idx)]
warnings.warn('Please update to NumPy 1.9.0 or newer')
else:
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:, 0],
X_test[:, 1],
c='',
alpha=1.0,
linewidths=1,
marker='o',
s=55, label='test set')
X, y = make_moons(noise=0.3, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
xgb_clf = xgb.XGBClassifier()
xgb_clf = xgb_clf.fit(X_train, y_train)
plot_decision_regions(X_test, y_test, xgb_clf)
plt.show()
The plot_decision_regions function is from Python Machine Learning book, available on its public GitHub here.
I was using "svm" classifier to classify it was a bike or car.
So, my features were 0,1,2 columns and dependents was 3rd column.I can able to clearly see the classification,but i don't know how to print all the points based on classification in diagram.
import numpy as np
import operator
from matplotlib import pyplot as plt
from sklearn import svm
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.svm import SVC
dataframe=pd.read_csv(DATASET_PATH)
dataframe = dataframe.dropna(how='any',axis=0)
SVM_Trained_Model = preprocessing.LabelEncoder()
train_data=dataframe[0:len(dataframe)]
le=preprocessing.LabelEncoder()
col=dataframe.columns[START_TRAIN_COLUMN:].astype('U')
col_name=["no_of_wheels","dimensions","windows","vehicle_type"]
for i in range(0,len(col_name)):
self.train_data[col_name[i]]=le.fit_transform(self.train_data[col_name[i]])
train_column=np.array(train_data[col]).astype('U')
data=train_data.iloc[:,[0,1,2]].values
target=train_data.iloc[:,3].values
data_train, data_test, target_train, target_test = train_test_split(data,target, test_size = 0.30,
random_state = 0) `split test and test train`
svc_model=SVC(kernel='rbf', probability=True))'classifier model'
svc_model.fit(data_train, target_train)
all_labels =svc_model.predict(data_test)
X_set, y_set = data_train, target_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step =
0.01),np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
Xpred = np.array([X1.ravel(), X2.ravel()] + [np.repeat(0, X1.ravel().size) for _ in range(1)]).T
pred = svc_model.predict(Xpred).reshape(X1.shape)
plt.contourf(X1, X2, pred,alpha = 0.75, cmap = ListedColormap(('white','orange','pink')))
plt.xlim(X1.min(),X1.max())
plt.ylim(X2.min(), X2.max())
colors=['red','yellow','cyan','blue']
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],c = ListedColormap((colors[i]))(i), label
= j)
plt.title('Multiclass Classifier ')
plt.xlabel('Features')
plt.ylabel('Dependents')
plt.legend()
plt.show()
Image
So here is my diagram I need to print the points using python print() based on pink and white region in the diagram.Please help me to get this points.
You need to select and use only 2 features in order to make a 2D surface plot.
from sklearn.svm import SVC
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
def make_meshgrid(x, y, h=.02):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
model = svm.SVC(kernel='linear')
clf = model.fit(X, y)
fig, ax = plt.subplots()
# title for the plots
title = ('Decision surface of linear SVC ')
# Set-up grid for plotting.
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
plot_contours(ax, clf, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_ylabel('y label here')
ax.set_xlabel('x label here')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
ax.legend()
plt.show()
I'm confused about SVC with kernel method, e.g., rbf. What I understand is when SVC with rbf kernel is applied to fit(x,y), it computes the rbf kernel matrix K of (x,x.T) which shape is [n_samples, n_samples], then transforms this kernel matrix K to y with hinge loss.
Under this intuition, I use sklearn.svm.svc and sklearn.metrics.pairwise.rbf_kernel to compare the results between:
svc(kernel='rbf').fit(x,y)
# and
svc(kernel='precomputed').fit(rbf_kernel(x,x),y)
# and
svc(kernel='linear').fit(rbf_kernel(x,x),y)
I think it's supposed to be same in the classification result. There are some difference between these three results.
More specifically, if you run the code as follow, svc(kernel='precomputed').fit(rbf_kernel(x,x),y)) performs same as svc(kernel='rbf').fit(x,y), but svc(kernel='linear').fit(rbf_kernel(x,x),y) performs not as well as the other two methods.
Could anyone help me to figure out the reason? Thanks.
from sklearn.datasets import make_moons, make_circles, make_classification
import numpy as np
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from matplotlib.colors import ListedColormap
import numpy as np
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
h = .02 # step size in the mesh
names = [
"RBF SVM",
# "RP Ridge",
"RBF-Precomp SVM",
"RBF-Linear SVM",
]
classifiers = [
SVC(gamma=1, C=1),
SVC(kernel='precomputed',C=1,gamma=1),
SVC(kernel="linear", C=1),
]
datasets = [
make_moons(n_samples=200,noise=0, random_state=0),
make_moons(n_samples=200,noise=0.2, random_state=0),
make_circles(n_samples=200,noise=0, factor=0.5, random_state=0),
make_circles(n_samples=200,noise=0.2, factor=0.5, random_state=0),]
figure = plt.figure(figsize=(int((len(classifiers)+1)*3), int(len(datasets)*3)))
i=1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.2, random_state=42)
K_train = rbf_kernel(X_train,X_train,gamma=1)
K_test = rbf_kernel(X_test,X_train,gamma=1)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# Plot the testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,
edgecolors='k', marker='*')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if "Pre" in name:
clf.fit(K_train,y_train)
score = clf.score(K_test, y_test)
elif "Linear" in name:
clf.fit(K_train,y_train)
score = clf.score(K_test, y_test)
else:
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
# create test data from mesh grid
mesh_data = np.c_[xx.ravel(), yy.ravel()]
K_mesh = rbf_kernel(mesh_data, X_train,gamma=1)
if "Pre" in name or "Linear" in name:
Z = clf.decision_function(K_mesh)
else:
Z = clf.decision_function(mesh_data)
# Put the result into a color plot
Z = Z.reshape(xx.shape)
# draw the every mesh grid, distinct them with colors in plt.cm.RdBu
ax.contourf(xx, yy, Z, 66, cmap=cm, alpha=0.6)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# Plot the testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
edgecolors='k', alpha=0.6, marker='*')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
plt.tight_layout()
plt.savefig('bench_test.png')