issue using backpropagation with perceptron - python

I've been reading Bishop's book on machine learning, and I'm trying to implement the backpropagation algorithm for a neural network, but it's not finding a solution. The code is below. I've broken it down into the network code and the testing code.
import numpy as np
from collections import namedtuple
import matplotlib.pyplot as plt
import scipy.optimize as opt
# Network code
def tanh(x):
return np.tanh(x)
def dtanh(x):
return 1 - np.tan(x)**2
def identity(x):
return x
def unpack_weights(w, D, M, K):
"""
len(w) = (D + 1)*M + (M + 1)*K, where
D = number of inputs, excluding bias
M = number of hidden units, excluding bias
K = number of output units
"""
UnpackedWeights = namedtuple("UpackedWeights", ["wHidden", "wOutput"])
cutoff = M*(D + 1)
wHidden = w[:cutoff].reshape(M, D + 1)
wOutput = w[cutoff:].reshape(K, M + 1)
return UnpackedWeights(wHidden=wHidden, wOutput=wOutput)
def compute_output(x, weights, fcnHidden=tanh, fcnOutput=identity):
NetworkResults = namedtuple("NetworkResults", ["hiddenAct", "hiddenOut", "outputAct", "outputOut"])
xBias = np.vstack((1., x))
hiddenAct = weights.wHidden.dot(xBias)
hiddenOut = np.vstack((1., fcnHidden(hiddenAct)))
outputAct = weights.wOutput.dot(hiddenOut)
outputOut = fcnOutput(outputAct)
return NetworkResults(hiddenAct=hiddenAct, hiddenOut=hiddenOut, outputAct=outputAct,
outputOut=outputOut)
def backprop(t, x, M, fcnHidden=tanh, fcnOutput=identity, dFcnHidden=dtanh):
maxIter = 10000
learningRate = 0.2
N, K = t.shape
N, D = x.shape
nParams = (D + 1)*M + (M + 1)*K
w0 = np.random.uniform(-0.1, 0.1, nParams)
for _ in xrange(maxIter):
sse = 0.
for n in xrange(N):
weights = unpack_weights(w0, D, M, K)
# Compute net output
netResults = compute_output(x=x[n].reshape(-1, 1), weights=weights,
fcnHidden=fcnHidden, fcnOutput=fcnOutput)
# Compute derivatives of error function wrt wOutput
outputDelta = netResults.outputOut - t[n].reshape(K, 1)
outputDerivs = outputDelta.dot(netResults.hiddenOut.T)
# Compute derivateives of error function wrt wHidden
hiddenDelta = dFcnHidden(netResults.hiddenAct)*(weights.wOutput[:, 1:].T.dot(outputDelta))
xBias = np.vstack((1., x[n].reshape(-1, 1)))
hiddenDerivs = hiddenDelta.dot(xBias.T)
delErr = np.hstack((np.ravel(hiddenDerivs), np.ravel(outputDerivs)))
w1 = w0 - learningRate*delErr
w0 = w1
sse += np.sum(outputDelta**2)
return w0
# Testing code
def generate_test_data():
D, M, K, N = 1, 3, 1, 25
x = np.sort(np.random.uniform(-1., 1., (N, D)), axis=0)
t = 1.0 + x**2
return D, M, K, N, x, t
def test_backprop():
D, M, K, N, x, t = generate_test_data()
return backprop(t, x, M)
def scipy_solution(t, x, D, M, K, N, method="BFGS"):
def obj_fn(w):
weights = unpack_weights(w, D, M, K)
err = 0
for n in xrange(N):
netOut = compute_output(x[n], weights=weights)
err += (netOut.outputOut[0, 0] - t[n])**2
return err
w0 = np.random.uniform(-1, 1, (D + 1)*M + (M + 1)*K)
return opt.minimize(obj_fn, w0, method=method)
When I use the optimize module in scipy (i.e., the scipy_solution() function) to find the network weights, the sum of squared errors gets very close to zero, and the output of the network looks like the data I generated. When I use my backpropagation function, the sum of squared errors gets stuck between 2.0 and 3.0, and the network output looks almost linear. Moreover, when I feed the scipy solution for the weights to my backprop function as the starting value, my backprop function still doesn't find the right solution.
I've been stuck on this for a couple of days, so I'd really appreciate any tips anyone has. Thanks.

def dtanh(x):
return 1 - np.tan(x)**2
should be
def dtanh(x):
return 1 - np.tanh(x)**2

Related

Numerical optimization with Gradient Descent in Python

I'm trying to solve the next numerical optimization problem: find the vector x such that minimizes the cost function 0.5 * norm(Bx - v, 2)^2, where B is matrix and v is a vector. I have implemented two gradient descent algorithms. In one of them I manually tune the step-size, and in the other I calculate it automatically with equation (2.5) from ftp://lsec.cc.ac.cn/pub/yyx/papers/p0504.pdf. The gradient of the cost function is B^T(B*x - v).
Additionally, I compare my implementations with the solve(A, B) function from numpy.linalg, noting that the solution of the optimization problem is the solution of the linear system A*x = b, where A = B^T * B, b = B^T * v. So far, I'm getting poor results: large errors and long running times. I don't know it there is an error in my implementation or this is how these algorithms work in the computational experiments that I set up.
In the computational experiments, I generate random "solution" vectors x, and matrices B. Then compute A and b accordingly.
Any feedback is appreciated.
This is my code:
import numpy as np
import matplotlib.pyplot as plt
from numpy import linalg as LA
import time
def residue(x, B, v):
aux = np.dot(B, x) - v
aux = pow(LA.norm(aux, 2), 2)
aux = aux / pow(LA.norm(v, 2), 2)
return aux
def gradGD(x, B, v):
aux = np.dot(B, x) - v
return np.dot(B.T, aux)
def gradientDescent(B, v, alpha, tol, x0):
A = np.dot(B.T, B)
b = np.dot(B.T, v)
x = x0
while True:
res = residue(x, B, v)
print('Residue ', res)
if (res < tol):
break
x = x - alpha * gradGD(x, B, v)
return x
# Gradient descent with auto step-size
def gradientDescentBB(B, v, tol, x0):
x = x0
xpre = np.zeros((N, 1))
flag = 0
while True:
res = residue(x, B, v)
#print('Residue ', res)
if (res < tol):
break
if (flag == 0):
grad = gradGD(x, B, v)
x = x - (1e-06) * grad
flag = 1
continue
gradpre = grad
grad = gradGD(x, B, v)
y = grad - gradpre
s = x - xpre
# print('dot', np.dot(s.T, y))
# print('||y||_2 = ', LA.norm(y, 2))
alpha = np.dot(s.T, y) / pow(LA.norm(y, 2), 2)
# print("alpha = ", alpha)
xpre = x
x = x - alpha * grad
return x
# Solves the optimization problem via Ax * b
def solver(B, v):
A = np.dot(B.T, B)
b = np.dot(B.T, v)
return np.linalg.solve(A, b)
# Main routine
N = 1000
epsilon = 1.0e-6
a = 1/N - epsilon
iter = 20
mytime_iter = []
time2_iter = []
myeabs_iter = []
myerel_iter = []
myepercent_iter = []
cgseabs_iter = []
cgserel_iter = []
cgsepercent_iter = []
# Running the experiment many times
for i in range(iter):
print('Iteration: ', i)
B = a * np.random.randn(N, N) + np.ones((N, N))
#print(B)
x0 = np.random.randn(N, 1) # Real solution of the optmization problem
v = np.dot(B, x0)
mystart = time.time()
# x = gradientDescent(B, v, alpha=1999100e-09, tol=1e-05, x0=np.zeros((N, 1))) # Gradient Descent: Method 1
x = gradientDescentBB(B, v, tol=1e-05, x0=np.zeros((N, 1))) # Gradient Descent: Method 2
myend = time.time()
mytime = myend - mystart
start2 = time.time()
xalt = solver(B, v) # Solution of the optimization problem by solving A*x = b
end2 = time.time()
time2 = start2 - end2
myeabs = LA.norm(x - x0, 2)
myerel = myeabs / LA.norm(x0, 2)
myepercent = myerel * 100
cgseabs = LA.norm(xalt - x0, 2)
cgserel = cgseabs / LA.norm(x0, 2)
cgsepercent = cgserel * 100
mytime_iter.append(mytime)
time2_iter.append(time2)
myeabs_iter.append(myeabs)
myerel_iter.append(myerel)
myepercent_iter.append(myepercent)
cgseabs_iter.append(cgseabs)
cgserel_iter.append(cgserel)
cgsepercent_iter.append(cgsepercent)
plt.figure(1)
plt.plot(mytime_iter, 'bo', label="GD")
plt.plot(time2_iter, 'ro', label="solve()")
plt.legend(loc="upper right")
plt.xlabel("# Iteration")
plt.ylabel("Time (s)")
# plt.ylim(-1.5, 2.0) --
plt.figure(2)
plt.plot(myeabs_iter, "-b", label="GD")
plt.plot(cgseabs_iter, "-r", label="solve()")
plt.legend(loc="upper right")
plt.xlabel("# Iteration")
plt.ylabel("Absolute error")
plt.figure(3)
plt.plot(myerel_iter, "-b", label="GD")
plt.plot(cgserel_iter, "-r", label="solve()")
plt.legend(loc="upper right")
plt.xlabel("# Iteration")
plt.ylabel("Relative error")
plt.figure(4)
plt.plot(myepercent_iter, "-b", label="GD")
plt.plot(cgsepercent_iter, "-r", label="solve()")
plt.legend(loc="upper right")
plt.ylabel("Relative error (%)")
plt.show()

Obtaining wrong error-curve for logistic regression (Bug in Code)

I started machine learning and wrote this code. But for some reason I am getting zig-zag error curve instead of a decreasing logarithmic curve. The "form_binary_classes" for now does nothing but take the start and end indices of two similar datasets with different labels. The error function returns the error in every iteration(most probably this is where the bug is) and acc returns the accuracy. gradient_descent is basically used to return the trained weights and bias terms. Looking only for the bug and not for an efficient method.
def hypothesis(x, theta, b):
h = np.dot(x, theta) + b
return sigmoid(h)
def sigmoid(z):
return 1.0/(1.0+np.exp(-1.0*z))
def error(y_true, x, w, b):
m = x.shape[0]
err = 0.0
for i in range(m):
hx = hypothesis(x[i], w, b)
if(hx==0):
err += (1-y_true[i])*np.log2(1-hx)
elif(hx==1):
err += y_true[i]*np.log2(hx)
else:
err += y_true[i]*np.log2(hx) + (1-y_true[i])*np.log2(1-hx)
return -err/m
def get_gradient(y_true, x, w, b):
grad_w = np.zeros(w.shape)
grad_b = 0.0
m = x.shape[0]
for i in range(m):
hx = hypothesis(x[i], w, b)
grad_w += (y_true[i] - hx)*x[i]
grad_b += (y_true[i] - hx)
grad_w /= m
grad_b /= m
return [grad_w, grad_b]
def gradient_descent(y_true, x, w, b, learning_rate=0.1):
err = error(y_true, x, w, b)
grad_w, grad_b = get_gradient(y_true, x, w, b)
w = w + learning_rate*grad_w
b = b + learning_rate*grad_b
return err, w, b
def predict(x,w,b):
confidence = hypothesis(x,w,b)
if confidence<0.5:
return 0
else:
return 1
def get_acc(x_tst,y_tst,w,b):
y_pred = []
for i in range(y_tst.shape[0]):
p = predict(x_tst[i],w,b)
y_pred.append(p)
y_pred = np.array(y_pred)
return float((y_pred==y_tst).sum())/y_tst.shape[0]
def form_binary_classes(a_start, a_end, b_start, b_end):
x = np.vstack((X[a_start:a_end], X[b_start:b_end]))
y = np.hstack((Y[a_start:a_end], Y[b_start:b_end]))
print("{} {}".format(x.shape,y.shape[0]))
loss = []
acc = []
w = 2*np.random.random((x.shape[1],))
b = 5*np.random.random()
for i in range(100):
l, w, b = gradient_descent(y, x, w, b, learning_rate=0.5)
acc.append(get_acc(X_test,Y_test,w))
loss.append(l)
plt.plot(loss)
plt.ylabel("Negative of Log Likelihood")
plt.xlabel("Time")
plt.show()
What error plot looks like:
What it SHOULD look like:
You have an issue in computing the error, and that can very possibly cause your model's issue for not converging.
In your code, when you consider the corner cases,if hx==0 or if hx==1 any way the error you are computing is zero, even if we have prediction errors, like hx==0 while ytrue=1
in this case, we come inside the first if, and the error will be
(1-1)*log2(1) =0, which is not correct.
You can solve this issue by modifying your first two ifs in this way:
def error(y_true, x, w, b):
m = x.shape[0]
err = 0.0
for i in range(m):
hx = hypothesis(x[i], w, b)
if(hx==y_true[i]): #Corner cases where we have zero error
err += 0
elif((hx==1 and y_true[i]==0) or (hx==0 and y_true[i]==1) ): #Corner cases where we will have log2 of zero
err += np.iinfo(np.int32).min # which is an approximation for log2(0), and we penalzie the model at most with the greatest error possible
else:
err += y_true[i]*np.log2(hx) + (1-y_true[i])*np.log2(1-hx)
return -err/m
In this part of the code, I assumed you have binary labels

Different loss values with same data, same initial state, same recurrent neural network

I am writing a recurrent neural network (specifically, a ConvLSTM). Recently, I have noticed an interesting inconsistency that I cannot quite figure out. I have written this neural network from scratch using numpy (technically cupy for gpu) and a few Chainer lines (specifically for their F.convolution_2D function).
When running this same network twice, for the first 4 or so training examples, the losses are EXACTLY the same. However, around the 5th training example, the losses start to fluctuate in their value.
I have ensured that each time I am running this network, they are reading from the same initial state text file (and thus have the same initial weights and biases). I have also ensured that the data they are inputting are exactly the same.
Is there some inconsistency with Numpy that is the root of this problem? The only thing I can think that is different around the 4th training example is the first usage of gradient clipping. Is there some problem with numpy's linalg function? Is there some rounding error I am not familiar with? I have scanned through my code and there is no instance of utilizing random numbers.
I have added my backpropagation function below:
def bptt(x2, y2, iteration):
x = cp.asarray(x2)
y = cp.asarray(y2)
global connected_weights
global main_kernel
global bias_i
global bias_f
global bias_c
global bias_o
global bias_y
global learning_rate
# Perform forward prop
prediction, pre_sigmoid_prediction, hidden_prediction, i, f, a, c, o, h = forward_prop(x)
loss = calculate_loss(prediction, y)
print("LOSS BEFORE: ")
print(loss)
# Calculate loss with respect to final layer
dLdy_2 = loss_derivative(prediction, y)
# Calculate loss with respect to pre sigmoid layer
dLdy_1 = cp.multiply(sigmoid_derivative(pre_sigmoid_prediction), dLdy_2)
# Calculate loss with respect to last layer of lstm
dLdh = cp.zeros([T + 1, channels_hidden, M, N])
dLdh[T - 1] = cp.reshape(cp.matmul(cp.transpose(connected_weights), dLdy_1.reshape(1, M * N)), (channels_hidden, M, N)) # reshape dLdh to the appropriate size
dLdw_0 = cp.matmul(dLdy_1.reshape(1, M*N), hidden_prediction.transpose(1,0))
# Calculate loss with respect to bias y
dLdb_y = dLdy_1
#--------------------fully connected------------------
bias_y = bias_y - learning_rate*dLdb_y
connected_weights = connected_weights - learning_rate*dLdw_0
# Initialize corresponding matrices
dLdo = cp.zeros([T, channels_hidden, M, N])
dLdc = cp.zeros([T + 1, channels_hidden, M, N])
dLda = cp.zeros([T, channels_hidden, M, N])
dLdf = cp.zeros([T, channels_hidden, M, N])
dLdi = cp.zeros([T, channels_hidden, M, N])
dLdI = cp.zeros([T, channels_hidden+ channels_img, M, N])
dLdW = cp.zeros([4*channels_hidden, channels_img + channels_hidden, kernel_dimension, kernel_dimension])
# Initialize other stuff
dLdo_hat = cp.zeros([T, channels_hidden, M, N])
dLda_hat = cp.zeros([T, channels_hidden, M, N])
dLdf_hat = cp.zeros([T, channels_hidden, M, N])
dLdi_hat = cp.zeros([T, channels_hidden, M, N])
# initialize biases
dLdb_c = cp.empty([channels_hidden, M, N])
dLdb_i = cp.empty([channels_hidden, M, N])
dLdb_f = cp.empty([channels_hidden, M, N])
dLdb_o = cp.empty([channels_hidden, M, N])
for t in cp.arange(T - 1, -1, -1):
dLdo[t] = cp.multiply(dLdh[t], tanh(c[t]))
dLdc[t] += cp.multiply(cp.multiply(dLdh[t], o[t]), (cp.ones((channels_hidden, M, N)) - cp.multiply(tanh(c[t]), tanh(c[t]))))
dLdi[t] = cp.multiply(dLdc[t], a[t])
dLda[t] = cp.multiply(dLdc[t], i[t])
dLdf[t] = cp.multiply(dLdc[t], c[t - 1])
dLdc[t - 1] = cp.multiply(dLdc[t], f[t])
dLda_hat[t] = cp.multiply(dLda[t], (cp.ones((channels_hidden, M, N)) - cp.multiply(a[t], a[t])))
dLdi_hat[t] = cp.multiply(cp.multiply(dLdi[t], i[t]), cp.ones((channels_hidden, M, N)) - i[t])
dLdf_hat[t] = cp.multiply(cp.multiply(dLdf[t], f[t]), cp.ones((channels_hidden, M, N)) - f[t])
dLdo_hat[t] = cp.multiply(cp.multiply(dLdo[t], o[t]), cp.ones((channels_hidden, M, N)) - o[t])
dLdb_c += dLda_hat[t]
dLdb_i += dLdi_hat[t]
dLdb_f += dLdf_hat[t]
dLdb_o += dLdo_hat[t]
# CONCATENATE Z IN THE RIGHT ORDER SAME ORDER AS THE WEIGHTS
dLdz_hat = cp.concatenate((dLdi_hat[t], dLdf_hat[t], dLda_hat[t], dLdo_hat[t]), axis = 0)
#determine convolution derivatives
#here we will use the fact that in z = w * I, dLdW = dLdz * I
temporary = cp.concatenate((x[t], h[t - 1]), axis=0).reshape(channels_hidden + channels_img, 1, M, N)
dLdI[t] = cp.asarray(F.convolution_2d(dLdz_hat.reshape(1, 4*channels_hidden, M, N), main_kernel.transpose(1, 0, 2, 3), b=None, pad=1)[0].data) # reshape into flipped kernel dimensions
dLdW += cp.asarray((F.convolution_2d(temporary, dLdz_hat.reshape(4*channels_hidden, 1, M, N), b=None, pad=1).data).transpose(1,0,2,3)) #reshape into kernel dimensions
#gradient clipping
if cp.amax(dLdW) > 1 or cp.amin(dLdW) < -1:
dLdW = dLdW/cp.linalg.norm(dLdW)
if cp.amax(dLdb_c) > 1 or cp.amin(dLdb_c) < -1:
dLdb_c = dLdb_c/cp.linalg.norm(dLdb_c)
if cp.amax(dLdb_i) > 1 or cp.amin(dLdb_i) < -1:
dLdb_i = dLdb_i/cp.linalg.norm(dLdb_i)
if cp.amax(dLdb_f) > 1 or cp.amin(dLdb_f) < -1:
dLdb_f = dLdb_f/cp.linalg.norm(dLdb_f)
if cp.amax(dLdb_o) > 1 or cp.amin(dLdb_o) < -1:
dLdb_o = dLdb_o/cp.linalg.norm(dLdb_o)
if cp.amax(dLdw_0) > 1 or cp.amin(dLdw_0) < -1:
dLdw_0 = dLdw_0/cp.linalg.norm(dLdw_0)
if cp.amax(dLdb_y) > 1 or cp.amin(dLdb_y) < -1:
dLdb_y = dLdb_y/cp.linalg.norm(dLdb_y)
print("dLdW on step: " + str(t) + " is this: " + str(dLdW[0][0][0][0]))
#print("dLdw_0")
#print("dLdW")
#print(dLdW)
#print(str(cp.amax(dLdw_0)) + " : " + str(cp.amin(dLdw_0)))
#print("dLdW")
#print(str(cp.amax(dLdW)) + " : " + str(cp.amin(dLdW)))
#print("dLdb_c")
#print(str(cp.amax(dLdb_c)) + " : " + str(cp.amin(dLdb_c)))
dLdh[t-1] = dLdI[t][channels_img: channels_img+channels_hidden]
#.reshape(4*channels_hidden, channels_hidden+channels_img, kernel_dimension, kernel_dimension)
#update weights with convolution derivatives
#----------------------------adam optimizer code-----------------------------------
#---------------------update main kernel---------
main_kernel = main_kernel - learning_rate*dLdW
#--------------------update bias c-----------------------
bias_c = bias_c - learning_rate*dLdb_c
#--------------------update bias i-----------------------
bias_i = bias_i - learning_rate*dLdb_i
#--------------------update bias f-----------------------
bias_f = bias_f - learning_rate*dLdb_f
#--------------------update bias c-----------------------
bias_o = bias_o - learning_rate*dLdb_o
prediction2, pre_sigmoid_prediction2, hidden_prediction2, i2, f2, a2, c2, o2, h2 = forward_prop(x)
print("dLdW is: " + str(dLdW[0][0][0][0]))
loss2 = calculate_loss(prediction2, y)
print("LOSS AFTER: ")
print(loss2)
print("backpropagation complete")
Wow, that took some time.
If you look at the back propagation code, look closely at these lines:
dLdb_c = cp.empty([channels_hidden, M, N])
dLdb_i = cp.empty([channels_hidden, M, N])
dLdb_f = cp.empty([channels_hidden, M, N])
dLdb_o = cp.empty([channels_hidden, M, N])
However, notice how the code proceeds to use the += operator on these empty arrays. Simply change the arrays to cp.zeros, and the code gives consistent loss.

Simple regression works with randn but not with random

Last night I wrote a simple binary logistic regression python code.
It seems to be working correctly (likelihood increases with each iteration, and I get good classification results).
My problem is that I can only initialize my weights with W = np.random.randn(n+1, 1) normal distribution.
But I don't want normal distribution, I want uniform distribution. But when I do that, I get the error
"RuntimeWarning: divide by zero encountered in log
return np.dot(Y.T, np.log(predictions)) + np.dot((onesVector - Y).T, np.log(onesVector - predictions))"
this is my code
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
return 1/(1+np.exp(-x))
def predict(X, W):
return sigmoid(np.dot(X, W))
def logLikelihood(X, Y, W):
m = X.shape[0]
predictions = predict(X, W)
onesVector = np.ones((m, 1))
return np.dot(Y.T, np.log(predictions)) + np.dot((onesVector - Y).T, np.log(onesVector - predictions))
def gradient(X, Y, W):
return np.dot(X.T, Y - predict(X, W))
def successRate(X, Y, W):
m = Y.shape[0]
predictions = predict(X, W) > 0.5
correct = (Y == predictions)
return 100 * np.sum(correct)/float(correct.shape[0])
trX = np.load("binaryMnistTrainX.npy")
trY = np.load("binaryMnistTrainY.npy")
teX = np.load("binaryMnistTestX.npy")
teY = np.load("binaryMnistTestY.npy")
m, n = trX.shape
trX = np.concatenate((trX, np.ones((m, 1))),axis=1)
teX = np.concatenate((teX, np.ones((teX.shape[0], 1))),axis=1)
W = np.random.randn(n+1, 1)
learningRate = 0.00001
numIter = 500
likelihoodArray = np.zeros((numIter, 1))
for i in range(0, numIter):
W = W + learningRate * gradient(trX, trY, W)
likelihoodArray[i, 0] = logLikelihood(trX, trY, W)
print("train success rate is %lf" %(successRate(trX, trY, W)))
print("test success rate is %lf" %(successRate(teX, teY, W)))
plt.plot(likelihoodArray)
plt.show()
If i initialize my W to be zeros or randn then it works.
If I initialize it to random (not normal) or ones, then I get the division by zero thing.
Why does this happen and how can I fix it?

Trying to plot a simple function - python

I implemented a simple linear regression and I want to try it out by fitting a non linear model
specifically I am trying to fit a model for the function y = x^3 + 5 for example
this is my code
import numpy as np
import numpy.matlib
import matplotlib.pyplot as plt
def predict(X,W):
return np.dot(X,W)
def gradient(X, Y, W, regTerm=0):
return (-np.dot(X.T, Y) + np.dot(np.dot(X.T,X),W))/(m*k) + regTerm * W /(n*k)
def cost(X, Y, W, regTerm=0):
m, k = Y.shape
n, k = W.shape
Yhat = predict(X, W)
return np.trace(np.dot(Y-Yhat,(Y-Yhat).T))/(2*m*k) + regTerm * np.trace(np.dot(W,W.T)) / (2*n*k)
def Rsquared(X, Y, W):
m, k = Y.shape
SSres = cost(X, Y, W)
Ybar = np.mean(Y,axis=0)
Ybar = np.matlib.repmat(Ybar, m, 1)
SStot = np.trace(np.dot(Y-Ybar,(Y-Ybar).T))
return 1-SSres/SStot
m = 10
n = 200
k = 1
trX = np.random.rand(m, n)
trX[:, 0] = 1
for i in range(2, n):
trX[:, i] = trX[:, 1] ** i
trY = trX[:, 1] ** 3 + 5
trY = np.reshape(trY, (m, k))
W = np.random.rand(n, k)
numIter = 10000
learningRate = 0.5
for i in range(0, numIter):
W = W - learningRate * gradient(trX, trY, W)
domain = np.linspace(0,1,100000)
powerDomain = np.copy(domain)
m = powerDomain.shape[0]
powerDomain = np.reshape(powerDomain, (m, 1))
powerDomain = np.matlib.repmat(powerDomain, 1, n)
for i in range(1, n):
powerDomain[:, i] = powerDomain[:, 0] ** i
print(Rsquared(trX, trY, W))
plt.plot(trX[:, 1],trY,'o', domain, predict(powerDomain, W),'r')
plt.show()
the R^2 I'm getting is very close to 1, meaning I found a very good fit to the training data, but it isn't shown on the plots. When I plot the data, it usually looks like this:
it looks as if I'm underfitting the data, but with such a complex hypothesis, with 200 features (meaning i allow polynomials up to x^200) and only 10 training examples, I should very clearly be overfitting data, so I expect the red line to pass through all the blue points and go wild between them.
This isn't what I'm getting which is confusing to me.
What's wrong?
You forgot to set powerDomain[:,0]=1, that's why your plot goes wrong at 0. And yes you are over fitting: look how quickly your plot fires up as soon as you get out of your training domain.

Categories

Resources