Related
I am trying to plot multiple gaussians on one plot with different heights, widths and centers from this type of dataframe:
hight(y)
fwhM(width)
centers(x)
24.122348
1.827472
98
24.828252
4.333549
186
26.810812
1.728494
276
25.997897
1.882424
373
24.503944
2.222210
471
27.488572
1.750039
604
31.556823
3.844592
683
27.920951
0.891394
792
27.009054
1.917744
897
Any idea on how to go about it?
We will reuse the Gaussian plotter as defined in
Creating gaussians of fixed width and std
(The code is repeated here)
Data
The following code generates the above dataframe.
data = [
(24.122348, 1.827472, 98),
(24.828252, 4.333549, 186),
(26.810812, 1.728494, 276),
(25.997897, 1.882424, 373),
(24.503944, 2.222210, 471),
(27.488572, 1.750039, 604),
(31.556823, 3.844592, 683),
(27.920951, 0.891394, 792),
(27.009054, 1.917744, 897),
]
df = pd.DataFrame(data, columns=["height", "fwhm", "center"])
Gaussian
Taken from the reference post above.
import matplotlib.cm as mpl_cm
import matplotlib.colors as mpl_colors
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial.distance import cdist
class Gaussian:
def __init__(self, size):
self.size = size
self.center = np.array(self.size) / 2
self.axis = self._calculate_axis()
def _calculate_axis(self):
"""
Generate a list of rows, columns over multiple axis.
Example:
Input: size=(5, 3)
Output: [array([0, 1, 2, 3, 4]), array([[0], [1], [2]])]
"""
axis = [np.arange(size).reshape(-1, *np.ones(idx, dtype=np.uint8))
for idx, size in enumerate(self.size)]
return axis
def update_size(self, size):
""" Update the size and calculate new centers and axis. """
self.size = size
self.center = np.array(self.size) / 2
self.axis = self._calculate_axis()
def create(self, dim=1, fwhm=3, center=None):
""" Generate a gaussian distribution on the center of a certain width. """
center = center if center is not None else self.center[:dim]
distance = sum((ax - ax_center) ** 2 for ax_center, ax in zip(center, self.axis))
distribution = np.exp(-4 * np.log(2) * distance / fwhm ** 2)
return distribution
def creates(self, dim=2, fwhm=3, centers: np.ndarray = None):
""" Combines multiple gaussian distributions based on multiple centers. """
centers = np.array(centers or np.array([self.center]).T).T
indices = np.indices(self.size).reshape(dim, -1).T
distance = np.min(cdist(indices, centers, metric='euclidean'), axis=1)
distance = np.power(distance.reshape(self.size), 2)
distribution = np.exp(-4 * np.log(2) * distance / fwhm ** 2)
return distribution
#staticmethod
def plot(distribution, show=True):
""" Plotter, in case you do not know the dimensions of your distribution, or want the same interface. """
if len(distribution.shape) == 1:
return Gaussian.plot1d(distribution, show)
if len(distribution.shape) == 2:
return Gaussian.plot2d(distribution, show)
if len(distribution.shape) == 3:
return Gaussian.plot3d(distribution, show)
raise ValueError(f"Trying to plot {len(distribution.shape)}-dimensional data, "
f"Only 1D, 2D, and 3D distributions are valid.")
#staticmethod
def plot1d(distribution, show=True, vmin=None, vmax=None, cmap=None):
norm = mpl_colors.Normalize(
vmin=vmin if vmin is not None else distribution.min(),
vmax=vmax if vmin is not None else distribution.max()
)
cmap = mpl_cm.ScalarMappable(norm=norm, cmap=cmap or mpl_cm.get_cmap('jet'))
cmap.set_array(distribution)
c = [cmap.to_rgba(value) for value in distribution] # defines the color
fig, ax = plt.subplots()
ax.scatter(np.arange(len(distribution)), distribution, c=c)
ax.plot(distribution)
fig.colorbar(cmap)
if show: plt.show()
return fig
#staticmethod
def plot2d(distribution, show=True):
fig, ax = plt.subplots()
img = ax.imshow(distribution, cmap='jet')
fig.colorbar(img)
if show: plt.show()
return fig
#staticmethod
def plot3d(distribution, show=True):
m, n, c = distribution.shape
x, y, z = np.mgrid[:m, :n, :c]
out = np.column_stack((x.ravel(), y.ravel(), z.ravel(), distribution.ravel()))
x, y, z, values = np.array(list(zip(*out)))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Standalone colorbar, directly creating colorbar on fig results in strange artifacts.
img = ax.scatter([0, 0], [0, 0], [0, 0], c=[0, 1], cmap=mpl_cm.get_cmap('jet'))
img.set_visible = False
fig.colorbar(img)
ax.scatter(x, y, z, c=values, cmap=mpl_cm.get_cmap('jet'))
if show: plt.show()
return fig
Solution
Since it is unclear to me how you want to the Gaussian distributions to interact when they are in part of multiple widths, I will assume that you want the maximum value.
Then the main logic is that we can now generate a unique Gaussian distribution for every center with given full width half maximum (fwhm), and take the maximum of all the distrubutions.
distribution = np.zeros((1200,))
df = pd.DataFrame(data, columns=["height", "fwhm", "center"])
gaussian = Gaussian(size=distribution.shape)
for idx, row in df.iterrows():
distribution = np.maximum(distribution, gaussian.create(fwhm=row.fwhm, center=[row.center]))
gaussian.plot(distribution, show=True)
Result
Edit
Since the question now asks for a different distribution, you can adjust the code in the create (and creates) method with the following to get different types of distributions:
def create(self, dim=1, fwhm=3, center=None):
""" Generate a gaussian distribution on the center of a certain width. """
center = center if center is not None else self.center[:dim]
distance = sum((ax - ax_center) for ax_center, ax in zip(center, self.axis))
distribution = sps.beta.pdf(distance / max(distance), a=3, b=100)
return distribution
Where sps.beta comes from import scipy.stats as sps, and can be changed with a gamma distribution as well. e.g. distribution = sps.gamma.pdf(distance, 10, 40).
Note that the distance is no longer squared, and that the argument fwhm, could be replaced by the parameters required for the distribution.
I need to plot the decision boundary for KNN without using sklearn. I have implemented the classifier but I am not able to plot the decision boundary. The plot should be as described in the book ElemStatLearn "The Elements of Statistical Learning: Data Mining, Inference, and Prediction. Second Edition" by Trevor Hastie & Robert Tibshirani& Jerome Friedman. The plot required is shown below:
KNN k=15 classifier Original
So, far I have been able to plot only the image below:
KNN k=15 classifier Plot produced so far
I have calculated the grid points and the predictions on those points. I also tried to find the points on the boundary if the predictions don't match with the prediction on the previous grid point and sorted the points. But when I plot the points, they don't look like the one that is required.
def get_grid(X):
# Creating grids for decision surface
## Define bounds of the surface
min1, max1 = X[:, 0].min() - 0.2, X[:, 0].max() + 0.2
min2, max2 = X[:, 1].min() - 0.2, X[:, 1].max() + 0.2
## Define the x and y points
x1grid = arange(min1, max1, 0.1)
x2grid = arange(min2, max2, 0.1)
## Create all of the lines and rows of the grid
xx, yy = meshgrid(x1grid, x2grid)
## Flatten each grid to a vector
r1, r2 = xx.flatten(), yy.flatten()
r1, r2 = r1.reshape((len(r1), 1)), r2.reshape((len(r2), 1))
## Horizontally stack vectors to create x1, x2 input for the model
grid_X = hstack((r1, r2))
return grid_X
X, y = data[:, :-1], data[:, -1].astype(int)
# Custom class defined
model = KNNClassifier(num_neighbors = 5)
model.fit(X, y)
y_pred = model.predict(X)
grid_X = get_grid(X)
grid_yhat = model.predict(grid_X)
boundary = []
for i in range(1, len(grid_X)):
if grid_yhat[i] != grid_yhat[i-1]:
boundary.append((grid_X[i] + grid_X[i-1]) * 0.5)
boundary_x = [b[0] for b in boundary]
boundary_y = [b[1] for b in boundary]
order = np.argsort(boundary_x)
boundary_x = np.array(boundary_x)[order]
boundary_y = np.array(boundary_y)[order]
def plot_decision_surface(X, y, boundary_X, boundary_y, grid_X, grid_yhat):
figure(figsize=(10,10))
axis('off')
# Plot the ground truth data points in the 2D feature space
X_pos, X_neg = split_X(X, y)
scatter(X_pos[:, 0], X_pos[:, 1], facecolors='none', edgecolors='orange', marker='o', linewidth=3, s=60)
scatter(X_neg[:, 0], X_neg[:, 1], facecolors='none', edgecolors='blue', marker='o', linewidth=3, s=60)
grid_pos, grid_neg = split_X(grid_X, grid_yhat)
# Plot and color the grid of x, y values with class
scatter(grid_pos[:, 0], grid_pos[:, 1], color='orange', marker='.', linewidth=0.05)
scatter(grid_neg[:, 0], grid_neg[:, 1], color='blue', marker='.', linewidth=0.05)
# Plot the decision boundary for the classification
scatter(boundary_X, boundary_y, color='k')
plot(boundary_X, boundary_y, color='k')
# Plot Info
show()
plot_decision_surface(X, y, boundary_X, boundary_y, grid_X, grid_yhat)
Failed attempt to plot the boundary is shown below:
Failed attempt to plot the boundary
I'm trying to find points inside the ellipse. It is not an 'ordinary' ellipse actually it is based on average and standard deviation which is much easier than calculating eigen values in order to find confidence interval
Function is not written by me here are the sources
https://matplotlib.org/devdocs/gallery/statistics/confidence_ellipse.html
https://carstenschelp.github.io/2018/09/14/Plot_Confidence_Ellipse_001.html
Here is the code:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.transforms as transforms
x = np.array([21.5,16.3,13.7,20.0,17.4,10.4,16.9,7.0,13.8,15.2,13.8,8.2,18.0,9.4,13.2,7.2,21.2,30.2,13.5,29.8,18.3,20.2,31.1,21.5,29.8,18.0,13.1,24.1,32.5,15.4,16.1,15.0,25.9,3.0,17.0,23.6,17.6,-11.8,22.2,26.6,17.8,20.6,23.0,28.0,25.3,22.1,22.4,16.3,22.0,12.1])
y = np.array([92.4,98.2,97.6,95.9,96.5,92.1,89.6,89.4,89.2,89.4,90.2,86.7,89.5,89.9,90.2,87.6,104.0,87.3,99.4,85.4,92.8,92.0,87.9,96.2,94.1,95.2,95.6,86.3,87.6,89.5,95.0,97.1,93.0,87.8,98.9,98.2,100.1,45.4,92.1,91.6,94.7,93.9,91.4,91.1,95.7,93.8,96.4,94.1,94.0,89.1])
#function obtained from matplotlib documentation
#https://matplotlib.org/devdocs/gallery/statistics/confidence_ellipse.html
def confidence_ellipse(x, y, ax, n_std=3.0, facecolor='none', **kwargs):
"""
Create a plot of the covariance confidence ellipse of *x* and *y*.
Parameters
----------
x, y : array-like, shape (n, )
Input data.
ax : matplotlib.axes.Axes
The axes object to draw the ellipse into.
n_std : float
The number of standard deviations to determine the ellipse's radiuses.
**kwargs
Forwarded to `~matplotlib.patches.Ellipse`
Returns
-------
matplotlib.patches.Ellipse
"""
if x.size != y.size:
raise ValueError("x and y must be the same size")
cov = np.cov(x, y)
pearson = cov[0, 1]/np.sqrt(cov[0, 0] * cov[1, 1])
# Using a special case to obtain the eigenvalues of this
# two-dimensionl dataset.
ell_radius_x = np.sqrt(1 + pearson)
ell_radius_y = np.sqrt(1 - pearson)
ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2,
facecolor=facecolor, **kwargs)
# Calculating the stdandard deviation of x from
# the squareroot of the variance and multiplying
# with the given number of standard deviations.
scale_x = np.sqrt(cov[0, 0]) * n_std
mean_x = np.mean(x)
# calculating the stdandard deviation of y ...
scale_y = np.sqrt(cov[1, 1]) * n_std
mean_y = np.mean(y)
transf = transforms.Affine2D() \
.rotate_deg(45) \
.scale(scale_x, scale_y) \
.translate(mean_x, mean_y)
ellipse.set_transform(transf + ax.transData)
return ax.add_patch(ellipse)
#implementation
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.scatter(x,y,s=5)
ellipse = confidence_ellipse(x,y,ax,n_std=2,edgecolor='red')
plt.show()
Afterwards I tried to find get center coordinates and the points inside the ellipse as below:
ellipse.get_center()
Out:(0,0)
ellipse.contains_point([21.5,92.4])#first points in x,y arrays
Out:False
ellipse.contains_point([0,0])#get_center() result
Out:False
Ellipse plot is working fine but I need every points coordinates inside the ellipse.
What I am doing wrong? I already checked similar questions but they didn't work either.
The confidence_ellipse example function only returns an object for drawing, and the contains point will only tell you if the point is on the ellipse.
What you probably want is something like:
import math
class distribution():
def __init__(self,x,y):
self.cov = np.cov(x, y)
self.mean = np.matrix( [np.mean(x), np.mean(y)])
def dist(self, x,y):
tmp = np.matrix([x,y])
diff = self.mean - tmp
dist = diff * np.linalg.inv(self.cov) * diff.T
return math.sqrt(dist)
def is_inside(self, x,y,nstd=2.0):
if (self.dist(x,y) < nstd):
return True
else:
return False
Then you can do :
d = distribution(x,y)
d.is_inside(24.1,86.3)
Returns true.
Then, for all the points:
points = np.array(list(zip(x, y)))
points_in = list(filter(lambda p: d.is_inside(p[0],p[1]), points))
points_out = list(filter(lambda p: not d.is_inside(p[0],p[1]), points))
x_in = [ x[0] for x in points_in]
y_in = [ x[1] for x in points_in]
x_out = [ x[0] for x in points_out]
y_out = [ x[1] for x in points_out]
fig2, ax2 = plt.subplots(1, 1, figsize=(8, 8))
ax2.scatter(x_in,y_in,s=5, facecolor="green")
ax2.scatter(x_out,y_out, s=5, facecolor="red")
ellipse = confidence_ellipse(x,y,ax2,n_std=2,edgecolor='red') # this presupposes that you still have the confidence_ellipse still defined
plt.show()
And your output should look something like this:
Where the red points are more than 2 standard deviations away, and the green ones are inside.
You can plot all the x,y label on the plot.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.transforms as transforms
x = np.array([21.5,16.3,13.7,20.0,17.4,10.4,16.9,7.0,13.8,15.2,13.8,8.2,18.0,9.4,13.2,7.2,21.2,30.2,13.5,29.8,18.3,20.2,31.1,21.5,29.8,18.0,13.1,24.1,32.5,15.4,16.1,15.0,25.9,3.0,17.0,23.6,17.6,-11.8,22.2,26.6,17.8,20.6,23.0,28.0,25.3,22.1,22.4,16.3,22.0,12.1])
y = np.array([92.4,98.2,97.6,95.9,96.5,92.1,89.6,89.4,89.2,89.4,90.2,86.7,89.5,89.9,90.2,87.6,104.0,87.3,99.4,85.4,92.8,92.0,87.9,96.2,94.1,95.2,95.6,86.3,87.6,89.5,95.0,97.1,93.0,87.8,98.9,98.2,100.1,45.4,92.1,91.6,94.7,93.9,91.4,91.1,95.7,93.8,96.4,94.1,94.0,89.1])
#function obtained from matplotlib documentation
#https://matplotlib.org/devdocs/gallery/statistics/confidence_ellipse.html
def confidence_ellipse(x, y, ax, n_std=3.0, facecolor='none', **kwargs):
"""
Create a plot of the covariance confidence ellipse of *x* and *y*.
Parameters
----------
x, y : array-like, shape (n, )
Input data.
ax : matplotlib.axes.Axes
The axes object to draw the ellipse into.
n_std : float
The number of standard deviations to determine the ellipse's radiuses.
**kwargs
Forwarded to `~matplotlib.patches.Ellipse`
Returns
-------
matplotlib.patches.Ellipse
"""
if x.size != y.size:
raise ValueError("x and y must be the same size")
cov = np.cov(x, y)
pearson = cov[0, 1]/np.sqrt(cov[0, 0] * cov[1, 1])
# Using a special case to obtain the eigenvalues of this
# two-dimensionl dataset.
ell_radius_x = np.sqrt(1 + pearson)
ell_radius_y = np.sqrt(1 - pearson)
ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2,
facecolor=facecolor, **kwargs)
# Calculating the stdandard deviation of x from
# the squareroot of the variance and multiplying
# with the given number of standard deviations.
scale_x = np.sqrt(cov[0, 0]) * n_std
mean_x = np.mean(x)
# calculating the stdandard deviation of y ...
scale_y = np.sqrt(cov[1, 1]) * n_std
mean_y = np.mean(y)
transf = transforms.Affine2D() \
.rotate_deg(45) \
.scale(scale_x, scale_y) \
.translate(mean_x, mean_y)
ellipse.set_transform(transf + ax.transData)
return ax.add_patch(ellipse)
#implementation
fig, ax = plt.subplots(1, 1, figsize=(12, 8))
ax.scatter(x,y,s=15)
ellipse = confidence_ellipse(x,y,ax,n_std=2,edgecolor='red')
# zip joins x and y coordinates in pairs
for x,y in zip(x,y):
label = f"({x},{y})"
#label = "{:.2f}".format(y) # plot just y-value of the point
# print(label) # uncomment if you want to print the points for reference
plt.annotate(label, # this is the text
(x,y), # this is the point to label
textcoords="offset points", # how to position the text
xytext=(0,10), # distance from text to points (x,y)
ha='center') # horizontal alignment can be left, right or center
plt.show()
P.S. : You need to adjust your xytext accordingly. That is just the point where this label has plotted in the figure.
You can also print those values for your reference. Just put print(label) and it will print all the points for you.
(21.5,92.4)
(16.3,98.2)
(13.7,97.6)
(20.0,95.9)
(17.4,96.5)
(10.4,92.1)
(16.9,89.6)
(7.0,89.4)
(13.8,89.2)
(15.2,89.4)
(13.8,90.2)
(8.2,86.7)
(18.0,89.5)
(9.4,89.9)
(13.2,90.2)
(7.2,87.6)
(21.2,104.0)
(30.2,87.3)
(13.5,99.4)
(29.8,85.4)
(18.3,92.8)
(20.2,92.0)
(31.1,87.9)
(21.5,96.2)
(29.8,94.1)
(18.0,95.2)
(13.1,95.6)
(24.1,86.3)
(32.5,87.6)
(15.4,89.5)
(16.1,95.0)
(15.0,97.1)
(25.9,93.0)
(3.0,87.8)
(17.0,98.9)
(23.6,98.2)
(17.6,100.1)
(-11.8,45.4)
(22.2,92.1)
(26.6,91.6)
(17.8,94.7)
(20.6,93.9)
(23.0,91.4)
(28.0,91.1)
(25.3,95.7)
(22.1,93.8)
(22.4,96.4)
(16.3,94.1)
(22.0,94.0)
(12.1,89.1)
I have two variables that I have plotted using matplotlib scatter function.
I would like to show the 68% confidence region by highlighting it in the plot. I know to show it in a histogram, but I don't know how to do it for a 2D plot like this (x vs y). In my case, the x is Mass and y is Ngal Mstar+2.
An example image of what I am looking for looks like this:
Here they have showed the 68% confidence region using dark blue and 95% confidence region using light blue.
Can it be achieved using one of thescipy.stats modules?
To plot a region between two curves, you could use pyplot.fill_between().
As for your confidence region, I was not sure what you wanted to achieve, so I exemplified with simultaneous confidence bands, by modifying the code from:
https://en.wikipedia.org/wiki/Confidence_and_prediction_bands#cite_note-2
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as sp
## Sample size.
n = 50
## Predictor values.
XV = np.random.uniform(low=-4, high=4, size=n)
XV.sort()
## Design matrix.
X = np.ones((n,2))
X[:,1] = XV
## True coefficients.
beta = np.array([0, 1.], dtype=np.float64)
## True response values.
EY = np.dot(X, beta)
## Observed response values.
Y = EY + np.random.normal(size=n)*np.sqrt(20)
## Get the coefficient estimates.
u,s,vt = np.linalg.svd(X,0)
v = np.transpose(vt)
bhat = np.dot(v, np.dot(np.transpose(u), Y)/s)
## The fitted values.
Yhat = np.dot(X, bhat)
## The MSE and RMSE.
MSE = ((Y-EY)**2).sum()/(n-X.shape[1])
s = np.sqrt(MSE)
## These multipliers are used in constructing the intervals.
XtX = np.dot(np.transpose(X), X)
V = [np.dot(X[i,:], np.linalg.solve(XtX, X[i,:])) for i in range(n)]
V = np.array(V)
## The F quantile used in constructing the Scheffe interval.
QF = sp.fdtri(X.shape[1], n-X.shape[1], 0.95)
QF_2 = sp.fdtri(X.shape[1], n-X.shape[1], 0.68)
## The lower and upper bounds of the Scheffe band.
D = s*np.sqrt(X.shape[1]*QF*V)
LB,UB = Yhat-D,Yhat+D
D_2 = s*np.sqrt(X.shape[1]*QF_2*V)
LB_2,UB_2 = Yhat-D_2,Yhat+D_2
## Make the plot.
plt.clf()
plt.plot(XV, Y, 'o', ms=3, color='grey')
plt.hold(True)
a = plt.plot(XV, EY, '-', color='black', zorder = 4)
plt.fill_between(XV, LB_2, UB_2, where = UB_2 >= LB_2, facecolor='blue', alpha= 0.3, zorder = 0)
b = plt.plot(XV, LB_2, '-', color='blue', zorder=1)
plt.plot(XV, UB_2, '-', color='blue', zorder=1)
plt.fill_between(XV, LB, UB, where = UB >= LB, facecolor='blue', alpha= 0.3, zorder = 2)
b = plt.plot(XV, LB, '-', color='blue', zorder=3)
plt.plot(XV, UB, '-', color='blue', zorder=3)
d = plt.plot(XV, Yhat, '-', color='red',zorder=4)
plt.ylim([-8,8])
plt.xlim([-4,4])
plt.xlabel("X")
plt.ylabel("Y")
plt.show()
The output looks like this:
First of all thank you #snake_charmer for your answer, but I have found a simpler way of solving the issue using curve_fit from scipy.optimize
I fit my data sample using curve_fit which gives me my best fit parameters. What it also gives me is the estimated covariance of the parameters. The diagonals of the same provide the variance of the parameter estimate. To compute one standard deviation errors on the parameters we can use np.sqrt(np.diag(pcov)) where pcov is the covariance matrix.
def fitfunc(M,p1,p2):
N = p1+( (M)*p2 )
return N
The above is the fit function I use for the data.
Now to fit the data using curve_fit
popt_1,pcov_1 = curve_fit(fitfunc,logx,logn,p0=(10.0,1.0),maxfev=2000)
p1_1 = popt_1[0]
p1_2 = popt_1[1]
sigma1 = [np.sqrt(pcov_1[0,0]),np.sqrt(pcov_1[1,1])] #THE 1 SIGMA CONFIDENCE INTERVALS
residuals1 = (logy) - fitfunc((logx),p1_1,p1_2)
xi_sq_1 = sum(residuals1**2) #THE CHI-SQUARE OF THE FIT
curve_y_1 = fitfunc((logx),p1_1,p1_2)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(logx,logy,c='r',label='$0.0<z<0.5$')
ax1.plot(logx,curve_y_1,'y')
ax1.plot(logx,fitfunc(logx,p1_1+sigma1[0],p1_2+sigma1[1]),'m',label='68% conf limits')
ax1.plot(logx,fitfunc(logx,p1_1-sigma1[0],p1_2-sigma1[1]),'m')
So just by using the square root the diagonal elements of the covariance matrix, I can obtain the 1 sigma confidence lines.
I could really use a tip to help me plotting a decision boundary to separate to classes of data. I created some sample data (from a Gaussian distribution) via Python NumPy. In this case, every data point is a 2D coordinate, i.e., a 1 column vector consisting of 2 rows. E.g.,
[ 1
2 ]
Let's assume I have 2 classes, class1 and class2, and I created 100 data points for class1 and 100 data points for class2 via the code below (assigned to the variables x1_samples and x2_samples).
mu_vec1 = np.array([0,0])
cov_mat1 = np.array([[2,0],[0,2]])
x1_samples = np.random.multivariate_normal(mu_vec1, cov_mat1, 100)
mu_vec1 = mu_vec1.reshape(1,2).T # to 1-col vector
mu_vec2 = np.array([1,2])
cov_mat2 = np.array([[1,0],[0,1]])
x2_samples = np.random.multivariate_normal(mu_vec2, cov_mat2, 100)
mu_vec2 = mu_vec2.reshape(1,2).T
When I plot the data points for each class, it would look like this:
Now, I came up with an equation for an decision boundary to separate both classes and would like to add it to the plot. However, I am not really sure how I can plot this function:
def decision_boundary(x_vec, mu_vec1, mu_vec2):
g1 = (x_vec-mu_vec1).T.dot((x_vec-mu_vec1))
g2 = 2*( (x_vec-mu_vec2).T.dot((x_vec-mu_vec2)) )
return g1 - g2
I would really appreciate any help!
EDIT:
Intuitively (If I did my math right) I would expect the decision boundary to look somewhat like this red line when I plot the function...
Your question is more complicated than a simple plot : you need to draw the contour which will maximize the inter-class distance. Fortunately it's a well-studied field, particularly for SVM machine learning.
The easiest method is to download the scikit-learn module, which provides a lot of cool methods to draw boundaries: scikit-learn: Support Vector Machines
Code :
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
import scipy
from sklearn import svm
mu_vec1 = np.array([0,0])
cov_mat1 = np.array([[2,0],[0,2]])
x1_samples = np.random.multivariate_normal(mu_vec1, cov_mat1, 100)
mu_vec1 = mu_vec1.reshape(1,2).T # to 1-col vector
mu_vec2 = np.array([1,2])
cov_mat2 = np.array([[1,0],[0,1]])
x2_samples = np.random.multivariate_normal(mu_vec2, cov_mat2, 100)
mu_vec2 = mu_vec2.reshape(1,2).T
fig = plt.figure()
plt.scatter(x1_samples[:,0],x1_samples[:,1], marker='+')
plt.scatter(x2_samples[:,0],x2_samples[:,1], c= 'green', marker='o')
X = np.concatenate((x1_samples,x2_samples), axis = 0)
Y = np.array([0]*100 + [1]*100)
C = 1.0 # SVM regularization parameter
clf = svm.SVC(kernel = 'linear', gamma=0.7, C=C )
clf.fit(X, Y)
Linear Plot
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, 'k-')
MultiLinear Plot
C = 1.0 # SVM regularization parameter
clf = svm.SVC(kernel = 'rbf', gamma=0.7, C=C )
clf.fit(X, Y)
h = .02 # step size in the mesh
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contour(xx, yy, Z, cmap=plt.cm.Paired)
Implementation
If you want to implement it yourself, you need to solve the following quadratic equation:
The Wikipedia article
Unfortunately, for non-linear boundaries like the one you draw, it's a difficult problem relying on a kernel trick but there isn't a clear cut solution.
Based on the way you've written decision_boundary you'll want to use the contour function, as Joe noted above. If you just want the boundary line, you can draw a single contour at the 0 level:
f, ax = plt.subplots(figsize=(7, 7))
c1, c2 = "#3366AA", "#AA3333"
ax.scatter(*x1_samples.T, c=c1, s=40)
ax.scatter(*x2_samples.T, c=c2, marker="D", s=40)
x_vec = np.linspace(*ax.get_xlim())
ax.contour(x_vec, x_vec,
decision_boundary(x_vec, mu_vec1, mu_vec2),
levels=[0], cmap="Greys_r")
Which makes:
Those were some great suggestions, thanks a lot for your help! I ended up solving the equation analytically and this is the solution I ended up with (I just want to post it for future reference:
# 2-category classification with random 2D-sample data
# from a multivariate normal distribution
import numpy as np
from matplotlib import pyplot as plt
def decision_boundary(x_1):
""" Calculates the x_2 value for plotting the decision boundary."""
return 4 - np.sqrt(-x_1**2 + 4*x_1 + 6 + np.log(16))
# Generating a Gaussion dataset:
# creating random vectors from the multivariate normal distribution
# given mean and covariance
mu_vec1 = np.array([0,0])
cov_mat1 = np.array([[2,0],[0,2]])
x1_samples = np.random.multivariate_normal(mu_vec1, cov_mat1, 100)
mu_vec1 = mu_vec1.reshape(1,2).T # to 1-col vector
mu_vec2 = np.array([1,2])
cov_mat2 = np.array([[1,0],[0,1]])
x2_samples = np.random.multivariate_normal(mu_vec2, cov_mat2, 100)
mu_vec2 = mu_vec2.reshape(1,2).T # to 1-col vector
# Main scatter plot and plot annotation
f, ax = plt.subplots(figsize=(7, 7))
ax.scatter(x1_samples[:,0], x1_samples[:,1], marker='o', color='green', s=40, alpha=0.5)
ax.scatter(x2_samples[:,0], x2_samples[:,1], marker='^', color='blue', s=40, alpha=0.5)
plt.legend(['Class1 (w1)', 'Class2 (w2)'], loc='upper right')
plt.title('Densities of 2 classes with 25 bivariate random patterns each')
plt.ylabel('x2')
plt.xlabel('x1')
ftext = 'p(x|w1) ~ N(mu1=(0,0)^t, cov1=I)\np(x|w2) ~ N(mu2=(1,1)^t, cov2=I)'
plt.figtext(.15,.8, ftext, fontsize=11, ha='left')
# Adding decision boundary to plot
x_1 = np.arange(-5, 5, 0.1)
bound = decision_boundary(x_1)
plt.plot(x_1, bound, 'r--', lw=3)
x_vec = np.linspace(*ax.get_xlim())
x_1 = np.arange(0, 100, 0.05)
plt.show()
And the code can be found here
EDIT:
I also have a convenience function for plotting decision regions for classifiers that implement a fit and predict method, e.g., the classifiers in scikit-learn, which is useful if the solution cannot be found analytically. A more detailed description how it works can be found here.
You can create your own equation for the boundary:
where you have to find the positions x0 and y0, as well as the constants ai and bi for the radius equation. So, you have 2*(n+1)+2 variables. Using scipy.optimize.leastsq is straightforward for this type of problem.
The code attached below builds the residual for the leastsq penalizing the points outsize the boundary. The result for your problem, obtained with:
x, y = find_boundary(x2_samples[:,0], x2_samples[:,1], n)
ax.plot(x, y, '-k', lw=2.)
x, y = find_boundary(x1_samples[:,0], x1_samples[:,1], n)
ax.plot(x, y, '--k', lw=2.)
using n=1:
using n=2:
usng n=5:
using n=7:
import numpy as np
from numpy import sin, cos, pi
from scipy.optimize import leastsq
def find_boundary(x, y, n, plot_pts=1000):
def sines(theta):
ans = np.array([sin(i*theta) for i in range(n+1)])
return ans
def cosines(theta):
ans = np.array([cos(i*theta) for i in range(n+1)])
return ans
def residual(params, x, y):
x0 = params[0]
y0 = params[1]
c = params[2:]
r_pts = ((x-x0)**2 + (y-y0)**2)**0.5
thetas = np.arctan2((y-y0), (x-x0))
m = np.vstack((sines(thetas), cosines(thetas))).T
r_bound = m.dot(c)
delta = r_pts - r_bound
delta[delta>0] *= 10
return delta
# initial guess for x0 and y0
x0 = x.mean()
y0 = y.mean()
params = np.zeros(2 + 2*(n+1))
params[0] = x0
params[1] = y0
params[2:] += 1000
popt, pcov = leastsq(residual, x0=params, args=(x, y),
ftol=1.e-12, xtol=1.e-12)
thetas = np.linspace(0, 2*pi, plot_pts)
m = np.vstack((sines(thetas), cosines(thetas))).T
c = np.array(popt[2:])
r_bound = m.dot(c)
x_bound = popt[0] + r_bound*cos(thetas)
y_bound = popt[1] + r_bound*sin(thetas)
return x_bound, y_bound
I like the mglearn library to draw decision boundaries. Here is one example from the book "Introduction to Machine Learning with Python" by A. Mueller:
fig, axes = plt.subplots(1, 3, figsize=(10, 3))
for n_neighbors, ax in zip([1, 3, 9], axes):
clf = KNeighborsClassifier(n_neighbors=n_neighbors).fit(X, y)
mglearn.plots.plot_2d_separator(clf, X, fill=True, eps=0.5, ax=ax, alpha=.4)
mglearn.discrete_scatter(X[:, 0], X[:, 1], y, ax=ax)
ax.set_title("{} neighbor(s)".format(n_neighbors))
ax.set_xlabel("feature 0")
ax.set_ylabel("feature 1")
axes[0].legend(loc=3)
If you want to use scikit learn, you can write your code like this:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
# read data
data = pd.read_csv('ex2data1.txt', header=None)
X = data[[0,1]].values
y = data[2]
# use LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(X, y)
# Coefficient of the features in the decision function. (from theta 1 to theta n)
parameters = log_reg.coef_[0]
# Intercept (a.k.a. bias) added to the decision function. (theta 0)
parameter0 = log_reg.intercept_
# Plotting the decision boundary
fig = plt.figure(figsize=(10,7))
x_values = [np.min(X[:, 1] -5 ), np.max(X[:, 1] +5 )]
# calcul y values
y_values = np.dot((-1./parameters[1]), (np.dot(parameters[0],x_values) + parameter0))
colors=['red' if l==0 else 'blue' for l in y]
plt.scatter(X[:, 0], X[:, 1], label='Logistics regression', color=colors)
plt.plot(x_values, y_values, label='Decision Boundary')
plt.show()
see: Building-a-Logistic-Regression-with-Scikit-learn
Just solved a very similar problem with a different approach (root finding) and wanted to post this alternative as answer here for future reference:
def discr_func(x, y, cov_mat, mu_vec):
"""
Calculates the value of the discriminant function for a dx1 dimensional
sample given covariance matrix and mean vector.
Keyword arguments:
x_vec: A dx1 dimensional numpy array representing the sample.
cov_mat: numpy array of the covariance matrix.
mu_vec: dx1 dimensional numpy array of the sample mean.
Returns a float value as result of the discriminant function.
"""
x_vec = np.array([[x],[y]])
W_i = (-1/2) * np.linalg.inv(cov_mat)
assert(W_i.shape[0] > 1 and W_i.shape[1] > 1), 'W_i must be a matrix'
w_i = np.linalg.inv(cov_mat).dot(mu_vec)
assert(w_i.shape[0] > 1 and w_i.shape[1] == 1), 'w_i must be a column vector'
omega_i_p1 = (((-1/2) * (mu_vec).T).dot(np.linalg.inv(cov_mat))).dot(mu_vec)
omega_i_p2 = (-1/2) * np.log(np.linalg.det(cov_mat))
omega_i = omega_i_p1 - omega_i_p2
assert(omega_i.shape == (1, 1)), 'omega_i must be a scalar'
g = ((x_vec.T).dot(W_i)).dot(x_vec) + (w_i.T).dot(x_vec) + omega_i
return float(g)
#g1 = discr_func(x, y, cov_mat=cov_mat1, mu_vec=mu_vec_1)
#g2 = discr_func(x, y, cov_mat=cov_mat2, mu_vec=mu_vec_2)
x_est50 = list(np.arange(-6, 6, 0.1))
y_est50 = []
for i in x_est50:
y_est50.append(scipy.optimize.bisect(lambda y: discr_func(i, y, cov_mat=cov_est_1, mu_vec=mu_est_1) - \
discr_func(i, y, cov_mat=cov_est_2, mu_vec=mu_est_2), -10,10))
y_est50 = [float(i) for i in y_est50]
Here is the result:
(blue the quadratic case, red the linear case (equal variances)
I know this question has been answered in a very thorough way analytically. I just wanted to share a possible 'hack' to the problem. It is unwieldy but gets the job done.
Start by building a mesh grid of the 2d area and then based on the classifier just build a class map of the entire space. Subsequently detect changes in the decision made row-wise and store the edges points in a list and scatter plot the points.
def disc(x): # returns the class of the point based on location x = [x,y]
temp = 0.5 + 0.5*np.sign(disc0(x)-disc1(x))
# disc0() and disc1() are the discriminant functions of the respective classes
return 0*temp + 1*(1-temp)
num = 200
a = np.linspace(-4,4,num)
b = np.linspace(-6,6,num)
X,Y = np.meshgrid(a,b)
def decColor(x,y):
temp = np.zeros((num,num))
print x.shape, np.size(x,axis=0)
for l in range(num):
for m in range(num):
p = np.array([x[l,m],y[l,m]])
#print p
temp[l,m] = disc(p)
return temp
boundColorMap = decColor(X,Y)
group = 0
boundary = []
for x in range(num):
group = boundColorMap[x,0]
for y in range(num):
if boundColorMap[x,y]!=group:
boundary.append([X[x,y],Y[x,y]])
group = boundColorMap[x,y]
boundary = np.array(boundary)
Sample Decision Boundary for a simple bivariate gaussian classifier
Given two bi-variate normal distributions, you can use Gaussian Discriminant Analysis (GDA) to come up with a decision boundary as the difference between the log of the 2 pdf's.
Here's a way to do it using scipy multivariate_normal (the code is not optimized):
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
from numpy.linalg import norm
from numpy.linalg import inv
from scipy.spatial.distance import mahalanobis
def normal_scatter(mean, cov, p):
size = 100
sigma_x = cov[0,0]
sigma_y = cov[1,1]
mu_x = mean[0]
mu_y = mean[1]
x_ps, y_ps = np.random.multivariate_normal(mean, cov, size).T
x,y = np.mgrid[mu_x-3*sigma_x:mu_x+3*sigma_x:1/size, mu_y-3*sigma_y:mu_y+3*sigma_y:1/size]
grid = np.empty(x.shape + (2,))
grid[:, :, 0] = x; grid[:, :, 1] = y
z = p*multivariate_normal.pdf(grid, mean, cov)
return x_ps, y_ps, x,y,z
# Dist 1
mu_1 = np.array([1, 1])
cov_1 = .5*np.array([[1, 0], [0, 1]])
p_1 = .5
x_ps, y_ps, x,y,z = normal_scatter(mu_1, cov_1, p_1)
plt.plot(x_ps,y_ps,'x')
plt.contour(x, y, z, cmap='Blues', levels=3)
# Dist 2
mu_2 = np.array([2, 1])
#cov_2 = np.array([[2, -1], [-1, 1]])
cov_2 = cov_1
p_2 = .5
x_ps, y_ps, x,y,z = normal_scatter(mu_2, cov_2, p_2)
plt.plot(x_ps,y_ps,'.')
plt.contour(x, y, z, cmap='Oranges', levels=3)
# Decision Boundary
X = np.empty(x.shape + (2,))
X[:, :, 0] = x; X[:, :, 1] = y
g = np.log(p_1*multivariate_normal.pdf(X, mu_1, cov_1)) - np.log(p_2*multivariate_normal.pdf(X, mu_2, cov_2))
plt.contour(x, y, g, [0])
plt.grid()
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.plot([mu_1[0], mu_2[0]], [mu_1[1], mu_2[1]], 'k')
plt.show()
If p_1 != p_2, then you get non-linear boundary. The decision boundary is given by g above.
Then to plot the decision hyper-plane (line in 2D), you need to evaluate g for a 2D mesh, then get the contour which will give a separating line.
You can also assume to have equal co-variance matrices for both distributions, which will give a linear decision boundary. In this case, you can replace the calculation of g in the above code with the following:
W = inv(cov_1).dot(mu_1-mu_2)
x_0 = 1/2*(mu_1+mu_2) - cov_1.dot(np.log(p_1/p_2)).dot((mu_1-mu_2)/mahalanobis(mu_1, mu_2, cov_1))
X = np.empty(x.shape + (2,))
X[:, :, 0] = x; X[:, :, 1] = y
g = (X-x_0).dot(W)
i use this method from this book python-machine-learning-2nd.pdf URL
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=0.8,
c=colors[idx],
marker=markers[idx],
label=cl,
edgecolor='black')
# highlight test samples
if test_idx:
# plot all samples
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:, 0],
X_test[:, 1],
c='',
edgecolor='black',
alpha=1.0,
linewidth=1,
marker='o',
s=100,
label='test set')
Since version 1.1, sklearn has a function for this:
https://scikit-learn.org/stable/modules/generated/sklearn.inspection.DecisionBoundaryDisplay.html#sklearn.inspection.DecisionBoundaryDisplay