Plotting and ranking logistic regression coefficients in python? - python

Hi I am a beginner in coding in python and machine learning and I am trying to learn about what goes on under the hood of logistic regression and making it run in python from scratch. I have been tasked with plotting and ranking the weights/coefficients of logistic regression below in order to remove features with the least impact. But, whilst I've added a basic plot it doesn't help me rank the coefficients/thetas. I was initially going to try using seaborn's sns.coefplot() but this has been deprecated. Any help pointing in the right direction would be appreciated.
This is also using the wisconin breast cancer dataset (https://www.kaggle.com/uciml/breast-cancer-wisconsin-data)
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
df = pd.read_csv("cancerdata.csv")
X = df.values[:,2:-1].astype('float64')
X = (X - np.mean(X, axis =0)) / np.std(X, axis = 0)
X = np.hstack([np.ones((X.shape[0], 1)),X])
X = MinMaxScaler().fit_transform(X)
Y = df["diagnosis"].map({'M':1,'B':0})
Y = np.array(Y)
X_train,X_test,Y_train,Y_test =
train_test_split(X,Y,test_size=0.25)
def Sigmoid(z):
return 1/(1 + np.exp(-z))
def Hypothesis(theta, x):
return Sigmoid(x # theta)
def Cost_Function(X,Y,theta,m):
hi = Hypothesis(theta, X)
_y = Y.reshape(-1, 1)
J = 1/float(m) * np.sum(-_y * np.log(hi) - (1-_y) * np.log(1-hi))
return J
def Cost_Function_Derivative(X,Y,theta,m,alpha):
hi = Hypothesis(theta,X)
_y = Y.reshape(-1, 1)
J = alpha/float(m) * X.T # (hi - _y)
return J
def Gradient_Descent(X,Y,theta,m,alpha):
new_theta = theta - Cost_Function_Derivative(X,Y,theta,m,alpha)
return new_theta
def Accuracy(theta):
correct = 0
length = len(X_test)
prediction = (Hypothesis(theta, X_test) > 0.5)
_y = Y_test.reshape(-1, 1)
correct = prediction == _y
my_accuracy = (np.sum(correct) / length)*100
print ('LR Accuracy %: ', my_accuracy)
def Logistic_Regression(X,Y,alpha,theta,num_iters):
m = len(Y)
for x in range(num_iters):
new_theta = Gradient_Descent(X,Y,theta,m,alpha)
theta = new_theta
if x % 100 == 0:
#print ('theta: ', theta)
Accuracy(theta)
x = np.linspace(-6, 6, 50)
y = -(theta[0] + theta[1]*x)/theta[2]
plt.plot(x, y)
plt.plot(theta)
plt.show()
ep = .012
initial_theta = np.random.rand(X_train.shape[1],1) * 2 * ep - ep
alpha = 0.5
iterations = 2000
Logistic_Regression(X_train,Y_train,alpha,initial_theta,iterations)

Related

Implement gradient descent in python

I am trying to implement gradient descent in python. Though my code is returning result by I think results I am getting are completely wrong.
Here is the code I have written:
import numpy as np
import pandas
dataset = pandas.read_csv('D:\ML Data\house-prices-advanced-regression-techniques\\train.csv')
X = np.empty((0, 1),int)
Y = np.empty((0, 1), int)
for i in range(dataset.shape[0]):
X = np.append(X, dataset.at[i, 'LotArea'])
Y = np.append(Y, dataset.at[i, 'SalePrice'])
X = np.c_[np.ones(len(X)), X]
Y = Y.reshape(len(Y), 1)
def gradient_descent(X, Y, theta, iterations=100, learningRate=0.000001):
m = len(X)
for i in range(iterations):
prediction = np.dot(X, theta)
theta = theta - (1/m) * learningRate * (X.T.dot(prediction - Y))
return theta
theta = np.random.randn(2,1)
theta = gradient_descent(X, Y, theta)
print('theta',theta)
The result I get after running this program is:
theta [[-5.23237458e+228]
[-1.04560188e+233]]
Which are very high values. Can someone point out the mistake I have made in implementation.
Also, 2nd problem is I have to set value of learning rate very low (in this case i have set to 0.000001) to work other wise program throws an error.
Please help me in diagnosis the problem.
try to reduce the learning rate with iteration otherwise it wont be able to reach the optimal lowest.try this
import numpy as np
import pandas
dataset = pandas.read_csv('start.csv')
X = np.empty((0, 1),int)
Y = np.empty((0, 1), int)
for i in range(dataset.shape[0]):
X = np.append(X, dataset.at[i, 'R&D Spend'])
Y = np.append(Y, dataset.at[i, 'Profit'])
X = np.c_[np.ones(len(X)), X]
Y = Y.reshape(len(Y), 1)
def gradient_descent(X, Y, theta, iterations=50, learningRate=0.01):
m = len(X)
for i in range(iterations):
prediction = np.dot(X, theta)
theta = theta - (1/m) * learningRate * (X.T.dot(prediction - Y))
learningRate/=10;
return theta
theta = np.random.randn(2,1)
theta = gradient_descent(X, Y, theta)
print('theta',theta)

My vectorization implementation of gradient descent does not get me the right answer

I'm currently working on Andrew Ng's gradient descent exercise using python but keeps getting me the wrong optimal theta. I followed this vectorization cheatsheet for gradient descent --- https://medium.com/ml-ai-study-group/vectorized-implementation-of-cost-functions-and-gradient-vectors-linear-regression-and-logistic-31c17bca9181.
Here is my code:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def cost_func(X, Y, theta):
m = len(X)
H = X.dot(theta)
J = 1/(2*m) * (H - Y).T.dot(H - Y)
return J
def gradient_descent(X, Y, alpha=0.01, iterations=1500):
#initializing theta as a zero vector
theta = np.zeros(X.shape[1])
#initializing the a list of cost function value
J_list = [cost_func(X, Y, theta)]
m = len(X)
while iterations > 0:
H = X.dot(theta)
delta = (1/m)*X.T.dot(H - Y)
theta = theta - alpha * delta
iterations -= 1
J_list.append(cost_func(X, Y, theta))
return theta, J_list
def check_convergence(J_list):
plt.plot(range(len(J_list)), J_list)
plt.xlabel('Iterations')
plt.ylabel('Cost J')
plt.show()
file_name_1 = 'https://raw.githubusercontent.com/kaleko/CourseraML/master/ex1/data/ex1data1.txt'
df1 = pd.read_csv(file_name_1, header=None)
X = df1.values[:, 0]
Y = df1.values[:, 1]
m = len(X)
X = np.column_stack((np.ones(m), X))
theta_optimal, J_list = gradient_descent(X, Y, 0.01, 1500)
print(theta_optimal)
check_convergence(J_list)
My theta output is [-3.63029144 1.16636235], which is incorrect.
Here is my cost function graph. As you see, it converges way too quickly.
The correct graph should look like.
Thank you.

How to calculate error in Polynomial Linear Regression?

I am trying to calculate the error rate of the training data I'm using.
I believe I'm calculating the error incorrectly. The formula is as shown:
y is calculated as shown:
I am calculating this in the function fitPoly(M) at line 49. I believe I am incorrectly calculating y(x(n)), but I don't know what else to do.
Below is the Minimal, Complete, and Verifiable example.
import numpy as np
import matplotlib.pyplot as plt
dataTrain = [[2.362761180904257019e-01, -4.108125266714775847e+00],
[4.324296163702689988e-01, -9.869308732049049127e+00],
[6.023323504115264404e-01, -6.684279243433971729e+00],
[3.305079685397107614e-01, -7.897042003779912278e+00],
[9.952423271981121200e-01, 3.710086310489402628e+00],
[8.308127402955634011e-02, 1.828266768673480147e+00],
[1.855495407116576345e-01, 1.039713135916495501e+00],
[7.088332047815845138e-01, -9.783208407540947560e-01],
[9.475723071629885697e-01, 1.137746192425550085e+01],
[2.343475721257285427e-01, 3.098019704040922750e+00],
[9.338350584099475160e-02, 2.316408265530458976e+00],
[2.107903139601833287e-01, -1.550451474833406396e+00],
[9.509966727520677843e-01, 9.295029459100994984e+00],
[7.164931165416982273e-01, 1.041025972594300075e+00],
[2.965557300301902011e-03, -1.060607693351102121e+01]]
def strip(L, xt):
ret = []
for i in L:
ret.append(i[xt])
return ret
x1 = strip(dataTrain, 0)
y1 = strip(dataTrain, 1)
# HELP HERE
def getY(m, w, D):
y = w[0]
y += np.sum(w[1:] * D[:m])
return y
# HELP ABOVE
def dataMatrix(X, M):
Z = []
for x in range(len(X)):
row = []
for m in range(M + 1):
row.append(X[x][0] ** m)
Z.append(row)
return Z
def fitPoly(M):
t = []
for i in dataTrain:
t.append(i[1])
w, _, _, _ = np.linalg.lstsq(dataMatrix(dataTrain, M), t)
w = w[::-1]
errTrain = np.sum(np.subtract(t, getY(M, w, x1)) ** 2)/len(x1)
print('errTrain: %s' % (errTrain))
return([w, errTrain])
#fitPoly(8)
def plotPoly(w):
plt.ylim(-15, 15)
x, y = zip(*dataTrain)
plt.plot(x, y, 'bo')
xw = np.arange(0, 1, .001)
yw = np.polyval(w, xw)
plt.plot(xw, yw, 'r')
#plotPoly(fitPoly(3)[0])
def bestPoly():
m = 0
plt.figure(1)
plt.xlim(0, 16)
plt.ylim(0, 250)
plt.xlabel('M')
plt.ylabel('Error')
plt.suptitle('Question 3: training and Test error')
while m < 16:
plt.figure(0)
plt.subplot(4, 4, m + 1)
plotPoly(fitPoly(m)[0])
plt.figure(1)
plt.plot(fitPoly(m)[1])
#plt.plot(fitPoly(m)[2])
m+= 1
plt.figure(3)
plt.xlabel('t')
plt.ylabel('x')
plt.suptitle('Question 3: best-fitting polynomial (degree = 8)')
plotPoly(fitPoly(8)[0])
print('Best M: %d\nBest w: %s\nTraining error: %s' % (8, fitPoly(8)[0], fitPoly(8)[1], ))
bestPoly()
Updated: This solution uses numpy's np.interp which will connect the points as a kind of "best fit". We then use your error function to find the difference between this interpolated line and the predicted y values for the degree of each polynomial.
import numpy as np
import matplotlib.pyplot as plt
import itertools
dataTrain = [
[2.362761180904257019e-01, -4.108125266714775847e+00],
[4.324296163702689988e-01, -9.869308732049049127e+00],
[6.023323504115264404e-01, -6.684279243433971729e+00],
[3.305079685397107614e-01, -7.897042003779912278e+00],
[9.952423271981121200e-01, 3.710086310489402628e+00],
[8.308127402955634011e-02, 1.828266768673480147e+00],
[1.855495407116576345e-01, 1.039713135916495501e+00],
[7.088332047815845138e-01, -9.783208407540947560e-01],
[9.475723071629885697e-01, 1.137746192425550085e+01],
[2.343475721257285427e-01, 3.098019704040922750e+00],
[9.338350584099475160e-02, 2.316408265530458976e+00],
[2.107903139601833287e-01, -1.550451474833406396e+00],
[9.509966727520677843e-01, 9.295029459100994984e+00],
[7.164931165416982273e-01, 1.041025972594300075e+00],
[2.965557300301902011e-03, -1.060607693351102121e+01]
]
data = np.array(dataTrain)
data = data[data[:, 0].argsort()]
X,y = data[:, 0], data[:, 1]
fig,ax = plt.subplots(4, 4)
indices = list(itertools.product([0,1,2,3], repeat=2))
for i,loc in enumerate(indices, start=1):
xx = np.linspace(X.min(), X.max(), 1000)
yy = np.interp(xx, X, y)
w = np.polyfit(X, y, i)
y_pred = np.polyval(w, xx)
ax[loc].scatter(X, y)
ax[loc].plot(xx, y_pred)
ax[loc].plot(xx, yy, 'r--')
error = np.square(yy - y_pred).sum() / X.shape[0]
print(error)
plt.show()
This prints out:
2092.19807848
1043.9400277
1166.94550318
252.238810889
225.798905379
155.785478366
125.662973726
143.787869281
6553.66570273
10805.6609259
15577.8686283
13536.1755299
108074.871771
213513916823.0
472673224393.0
1.01198058355e+12
Visually, it plots out this:
From here, it's just a matter of saving those errors to a list and finding the minimum.
I may contribute :
def pol_y(x, w):
y = 0; power = 0;
for i in w:
y += i*(x**power);
power += 1;
return y
The M is included implicitly because it is the final index of w. So if w = [0, 0, 1], then pol_y(x, w) is as same as f(x) = x^2.
If you want to map the 1st column of the dataTrain :
get_Y = [pol_y(i, w) for i in x1 ]
The error may be calculated by
vec_error = [(y1[i] - getY[i])**2 for i in range(0, len(y1)];
train_error = np.sum(vec_error)/len(y1);
Hope this helps.

Python: Gibbs sampler for regression model

I am trying to write a function for Gibbs sampler in the Bayesian framework. I got the code from this [website][1], which is a straightforward regression model. However, I am tackling a more complicated model which is: y= beta0 + beta1* x + x^gamma * sigma * epsilon where sigma is the variance of the model. That means I need to estimate p(beta0|y,x,beta1,sigma,gamma) and so on(in the Gibbs sampler method). my question is how should I modify the code to sample beta0, beta1 and other variables as there are extra variables to condition on.
My codes are:
import numpy as np
import pymc as pm
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
plt.rcParams['figure.figsize'] = (10, 5)
conda install -c conda-forge pymc3=3.0
def sample_beta_0(y, x, beta_1, sigma, gamma, mu_0, tau_0):
N = len(y)
assert len(x) == N
tau_i = 1/((x**gamma)*sigma)**2
precision = tau_0 + sum(tau_i)
mean = tau_0 * mu_0 + np.sum((y - beta_1 * x)*tau_i)
mean /= precision
return np.random.normal(mean, 1 / np.sqrt(precision))
def sample_beta_1(y, x, beta_0, sigma, mu_1, sigma_1):
N = len(y)
assert len(x) == N
precision = sigma_1 + sigma * np.sum(x * x)
mean = sigma_1 * mu_1 + sigma * np.sum( (y - beta_0) * x)
mean /= precision
return np.random.normal(mean, 1 / np.sqrt(precision))
def sample_sigma(y, x, beta_0, beta_1, alpha, beta):
N = len(y)
alpha_new = alpha + N / 2
resid = y - beta_0 - beta_1 * x
beta_new = beta + np.sum(resid * resid) / 2
return np.random.gamma(alpha_new, 1 / beta_new)
beta_0_true = -1
beta_1_true = 2
sigma_true = 1
N = 50
x = np.random.uniform(low=0, high=4, size=N)
y = np.random.normal(beta_0_true + beta_1_true * x, 1 / np.sqrt(sigma_true))
synth_plot = plt.plot(x, y, "o")
plt.xlabel("x")
plt.ylabel("y")
# print('Y are', y)
# print('X are', x)
plt.show()
"""GIBBS Sampler"""
# specify initial values
init = {"beta_0": 0,
"beta_1": 0,
"sigma": 2}
# specify hyper parameters
hypers = {"mu_0": 0,
"sigma_0": 1,
"mu_1": 0,
"sigma_1": 1,
"alpha": 2,
"beta": 1}
def gibbs(y, x, iters, init, hypers):
assert len(y) == len(x)
beta_0 = init["beta_0"]
beta_1 = init["beta_1"]
sigma = init["sigma"]
trace = np.zeros((iters, 3)) # trace to store values of beta_0, beta_1, sigma
for it in range(iters):
beta_0 = sample_beta_0(y, x, beta_1, sigma, hypers["mu_0"], hypers["sigma_0"])
beta_1 = sample_beta_1(y, x, beta_0, sigma, hypers["mu_1"], hypers["sigma_1"])
sigma = sample_sigma(y, x, beta_0, beta_1, hypers["alpha"], hypers["beta"])
trace[it, :] = np.array((beta_0, beta_1, sigma))
trace = pd.DataFrame(trace)
trace.columns = ['beta_0', 'beta_1', 'sigma']
print(trace)
return trace
iters = 1000
trace = gibbs(y, x, iters, init, hypers)
traceplot = trace.plot()
traceplot.set_xlabel("Iteration")
traceplot.set_ylabel("Parameter value")
trace_burnt = trace[500:999]
hist_plot = trace_burnt.hist(bins = 30, layout = (1,3))
print(trace_burnt.median())
print(trace_burnt.std())
I know is really long but please help!

Simple regression works with randn but not with random

Last night I wrote a simple binary logistic regression python code.
It seems to be working correctly (likelihood increases with each iteration, and I get good classification results).
My problem is that I can only initialize my weights with W = np.random.randn(n+1, 1) normal distribution.
But I don't want normal distribution, I want uniform distribution. But when I do that, I get the error
"RuntimeWarning: divide by zero encountered in log
return np.dot(Y.T, np.log(predictions)) + np.dot((onesVector - Y).T, np.log(onesVector - predictions))"
this is my code
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
return 1/(1+np.exp(-x))
def predict(X, W):
return sigmoid(np.dot(X, W))
def logLikelihood(X, Y, W):
m = X.shape[0]
predictions = predict(X, W)
onesVector = np.ones((m, 1))
return np.dot(Y.T, np.log(predictions)) + np.dot((onesVector - Y).T, np.log(onesVector - predictions))
def gradient(X, Y, W):
return np.dot(X.T, Y - predict(X, W))
def successRate(X, Y, W):
m = Y.shape[0]
predictions = predict(X, W) > 0.5
correct = (Y == predictions)
return 100 * np.sum(correct)/float(correct.shape[0])
trX = np.load("binaryMnistTrainX.npy")
trY = np.load("binaryMnistTrainY.npy")
teX = np.load("binaryMnistTestX.npy")
teY = np.load("binaryMnistTestY.npy")
m, n = trX.shape
trX = np.concatenate((trX, np.ones((m, 1))),axis=1)
teX = np.concatenate((teX, np.ones((teX.shape[0], 1))),axis=1)
W = np.random.randn(n+1, 1)
learningRate = 0.00001
numIter = 500
likelihoodArray = np.zeros((numIter, 1))
for i in range(0, numIter):
W = W + learningRate * gradient(trX, trY, W)
likelihoodArray[i, 0] = logLikelihood(trX, trY, W)
print("train success rate is %lf" %(successRate(trX, trY, W)))
print("test success rate is %lf" %(successRate(teX, teY, W)))
plt.plot(likelihoodArray)
plt.show()
If i initialize my W to be zeros or randn then it works.
If I initialize it to random (not normal) or ones, then I get the division by zero thing.
Why does this happen and how can I fix it?

Categories

Resources