This is a TensorFlow code to calculate Maximum log-likelihood from this link.
import tensorflow as tf
import numpy as np
EPS=1e-8
def gaussian_likelihood(x, mu, log_std):
pre_sum = -0.5 * (((x-mu)/(tf.exp(log_std)+EPS))**2 + 2*log_std + np.log(2*np.pi))
return tf.reduce_sum(pre_sum, axis=1)
if __name__ == '__main__':
sess = tf.Session()
dim = 2
x = tf.placeholder(tf.float32, shape=(None, dim))
mu = tf.placeholder(tf.float32, shape=(None, dim))
log_std = tf.placeholder(tf.float32, shape=(dim,))
true_gaussian_likelihood = gaussian_likelihood(x, mu, log_std)
batch_size = 5
feed_dict = {x: np.random.rand(batch_size, dim),
mu: np.random.rand(batch_size, dim),
log_std: np.random.rand(dim)}
true_result = sess.run(true_gaussian_likelihood,
feed_dict=feed_dict)
print(true_result)
In the last line of gaussian_likelihood function, why it has summed just over axis=1? I think the result of max-likelihood is a probability that is calculated over all samples(here batch_size=5 for simplicity) and should be just one number. But what I got as the max-likelihood is an array with (5,) dimension like below:
[-3.0391085 -3.0107908 -2.966611 -2.9552155 -3.027913 ]
Plus, these numbers aren't in the range [0,1] as the probability values should be. Is this a mistake of OpenAI tean who provided this code?
Related
I am trying to implement Ridge Regression in pytorch, defining the loss function and plotting said function over different iterations. The only issue is, I keep getting an error code: mat1 and mat2 shapes cannot be multiplied (1000x10 and 1x1). I would like to convert the second matrix to a 1x10 in order to complete the code but I can't seem to get it to work.
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
n = 1000
p = 10
mean = np.zeros((p))
val = 0.8
cov = np.ones((p,p))*val
cov = cov + np.eye(p)*(1-val)
np.random.seed(10)
X = np.random.multivariate_normal(mean, cov, n)
theta_true = np.concatenate((np.ones((5,1)), np.zeros((5,1))),axis=0)
delta=0.5
Sigma = np.eye(n,n,k=-1)*0.4 + np.eye(n,n)*1 + np.eye(n,n,k=1)*0.4
mean = np.zeros(n)
e = np.random.multivariate_normal(mean, Sigma, 1)
y=X#theta_true + delta*e.T
import torch
X_t = torch.from_numpy(X).float()
y_t = torch.from_numpy(y).float()
Sigma_t = torch.from_numpy(Sigma).float()
import torch.nn as nn
import torch.nn.functional as F
class MyLinear(nn.Module):
def __init__(self):
super(MyLinear, self).__init__()
self.linear = nn.Linear(1, 1)
def forward(self, x):
out = self.linear(x)
return out
def L2_norm(model):
return torch.sum(list(model.parameters())[0]**2)
def L1_norm(model):
return torch.sum(torch.abs(list(model.parameters())[0]))
def ridge_loss(y_pred, y_true, model, lambda_):
mse = F.mse_loss(y_pred, y_true)
regularization = lambda_ * L2_norm(model)
return mse + regularization
import matplotlib.pyplot as plt
model = MyLinear()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
lambda_ = 0.1
num_epochs = 1000
loss_values = []
for epoch in range(num_epochs):
optimizer.zero_grad()
y_pred = model(X_t)
loss = ridge_loss(y_pred, y_t, model, lambda_)
loss_values.append(loss.item())
loss.backward()
optimizer.step()
plt.plot(loss_values)
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title('Ridge Regression Loss over Iterations')
plt.show()
I tried changing the theta_true definition to transform the matrix but the same error occurred.
theta_true = np.concatenate((np.ones((5,1)), np.zeros((5,1)))).reshape(10, 1)
Your Linear layer in MyLinear (line 37) is what is causing the issue.
self.linear = nn.Linear(1, 1)
means 1 input channel, one output channel, but x, as you have it here has shape (1000, 10), meaning it has 10 channels. So you will need to change that line to
self.linear = nn.Linear(10, 1)
that will do the trick, here is the image I get with that change:
I want to define my own loss function in keras , and the bce_loss multiply with a varible W . Actually, W has the same shape with the tensor bce_loss.
If I print the tensor bce_loss, maybe it can be shown as follow:
Tensor("loss_8/activation_14_loss/logistic_loss:0", shape=(?, 3), dtype=float32)
Now I don't know how to get the shape of bce_loss, and make the varible W has the the same of bce_loss.
My code:
def myLoss(y_true, y_pred):
bce_loss = K.binary_crossentropy(y_true, y_pred)
# want to get a variable W with the same shape of bce_loss
# And W is initialized with normal distribution.
val = np.random.normal(0, 0.05, size= bce_loss.size())
W = keras.variable( val )
return K.mean(self.W*bce_loss, axis = -1)
You can define your loss function like this:
from keras import backend as K
import numpy as np
def myLoss(y_true, y_pred):
bce_loss = K.binary_crossentropy(y_true, y_pred)
w = K.random_normal(K.shape(bce_loss), 0, 0.05)
return K.mean(w * bce_loss, axis=-1)
y_t = K.placeholder((1,2))
y_p = K.placeholder((1,2))
loss = myLoss(y_t, y_p)
print(K.get_session().run(loss, {y_t: np.array([[1,1]]), y_p: np.array([[0.5, 0.2]])}))
I try to reproduce results generated by the LSTMCell from TensorFlow to be sure that I know what it does.
Here is my TensorFlow code:
num_units = 3
lstm = tf.nn.rnn_cell.LSTMCell(num_units = num_units)
timesteps = 7
num_input = 4
X = tf.placeholder("float", [None, timesteps, num_input])
x = tf.unstack(X, timesteps, 1)
outputs, states = tf.contrib.rnn.static_rnn(lstm, x, dtype=tf.float32)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
x_val = np.random.normal(size = (1, 7, num_input))
res = sess.run(outputs, feed_dict = {X:x_val})
for e in res:
print e
Here is its output:
[[-0.13285545 -0.13569424 -0.23993783]]
[[-0.04818152 0.05927373 0.2558436 ]]
[[-0.13818116 -0.13837864 -0.15348436]]
[[-0.232219 0.08512601 0.05254192]]
[[-0.20371495 -0.14795329 -0.2261929 ]]
[[-0.10371902 -0.0263292 -0.0914975 ]]
[[0.00286371 0.16377522 0.059478 ]]
And here is my own implementation:
n_steps, _ = X.shape
h = np.zeros(shape = self.hid_dim)
c = np.zeros(shape = self.hid_dim)
for i in range(n_steps):
x = X[i,:]
vec = np.concatenate([x, h])
#vec = np.concatenate([h, x])
gs = np.dot(vec, self.kernel) + self.bias
g1 = gs[0*self.hid_dim : 1*self.hid_dim]
g2 = gs[1*self.hid_dim : 2*self.hid_dim]
g3 = gs[2*self.hid_dim : 3*self.hid_dim]
g4 = gs[3*self.hid_dim : 4*self.hid_dim]
I = vsigmoid(g1)
N = np.tanh(g2)
F = vsigmoid(g3)
O = vsigmoid(g4)
c = c*F + I*N
h = O * np.tanh(c)
print h
And here is its output:
[-0.13285543 -0.13569425 -0.23993781]
[-0.01461723 0.08060743 0.30876374]
[-0.13142865 -0.14921292 -0.16898363]
[-0.09892188 0.11739943 0.08772941]
[-0.15569218 -0.15165766 -0.21918869]
[-0.0480604 -0.00918626 -0.06084118]
[0.0963612 0.1876516 0.11888081]
As you might notice I was able to reproduce the first hidden vector, but the second one and all the following ones are different. What am I missing?
i examined this link and your code is almost perfect but you forgot to add forget_bias value(default 1.0) in this line F = vsigmoid(g3) its actualy F = vsigmoid(g3+self.forget_bias) or in your case its 1 F = vsigmoid(g3+1)
here is my imp with numpy:
import numpy as np
import tensorflow as tf
num_units = 3
lstm = tf.nn.rnn_cell.LSTMCell(num_units = num_units)
batch=1
timesteps = 7
num_input = 4
X = tf.placeholder("float", [batch, timesteps, num_input])
x = tf.unstack(X, timesteps, 1)
outputs, states = tf.contrib.rnn.static_rnn(lstm, x, dtype=tf.float32)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
x_val = np.reshape(range(28),[batch, timesteps, num_input])
res = sess.run(outputs, feed_dict = {X:x_val})
for e in res:
print(e)
print("\nmy imp\n")
#my impl
def sigmoid(x):
return 1/(1+np.exp(-x))
kernel,bias=sess.run([lstm._kernel,lstm._bias])
f_b_=lstm._forget_bias
c,h=np.zeros([batch,num_input-1]),np.zeros([batch,num_input-1])
for step in range(timesteps):
inpt=np.split(x_val,7,1)[step][0]
lstm_mtrx=np.matmul(np.concatenate([inpt,h],1),kernel)+bias
i,j,f,o=np.split(lstm_mtrx,4,1)
c=sigmoid(f+f_b_)*c+sigmoid(i)*np.tanh(j)
h=sigmoid(o)*np.tanh(c)
print(h)
output:
[[ 0.06964055 -0.06541953 -0.00682676]]
[[ 0.005264 -0.03234607 0.00014838]]
[[ 1.617855e-04 -1.316892e-02 8.596722e-06]]
[[ 3.9425286e-06 -5.1347450e-03 7.5078127e-08]]
[[ 8.7508155e-08 -1.9560163e-03 6.3853928e-10]]
[[ 1.8867894e-09 -7.3784427e-04 5.8551406e-12]]
[[ 4.0385355e-11 -2.7728223e-04 5.3957669e-14]]
my imp
[[ 0.06964057 -0.06541953 -0.00682676]]
[[ 0.005264 -0.03234607 0.00014838]]
[[ 1.61785520e-04 -1.31689185e-02 8.59672610e-06]]
[[ 3.94252745e-06 -5.13474567e-03 7.50781122e-08]]
[[ 8.75080644e-08 -1.95601574e-03 6.38539112e-10]]
[[ 1.88678843e-09 -7.37844070e-04 5.85513438e-12]]
[[ 4.03853841e-11 -2.77282006e-04 5.39576024e-14]]
Tensorflow uses glorot_uniform() function to initialize the lstm kernel, which samples weights from a random uniform distribution. We need to fix a value for the kernel to get reproducible results:
import tensorflow as tf
import numpy as np
np.random.seed(0)
timesteps = 7
num_input = 4
x_val = np.random.normal(size = (1, timesteps, num_input))
num_units = 3
def glorot_uniform(shape):
limit = np.sqrt(6.0 / (shape[0] + shape[1]))
return np.random.uniform(low=-limit, high=limit, size=shape)
kernel_init = glorot_uniform((num_input + num_units, 4 * num_units))
My implementation of the LSTMCell (well, actually it's just slightly rewritten tensorflow's code):
def sigmoid(x):
return 1. / (1 + np.exp(-x))
class LSTMCell():
"""Long short-term memory unit (LSTM) recurrent network cell.
"""
def __init__(self, num_units, initializer=glorot_uniform,
forget_bias=1.0, activation=np.tanh):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
initializer: The initializer to use for the kernel matrix. Default: glorot_uniform
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
activation: Activation function of the inner states. Default: np.tanh.
"""
# Inputs must be 2-dimensional.
self._num_units = num_units
self._forget_bias = forget_bias
self._activation = activation
self._initializer = initializer
def build(self, inputs_shape):
input_depth = inputs_shape[-1]
h_depth = self._num_units
self._kernel = self._initializer(shape=(input_depth + h_depth, 4 * self._num_units))
self._bias = np.zeros(shape=(4 * self._num_units))
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input numpy array, must be 2-D, `[batch, input_size]`.
state: a tuple of numpy arrays, both `2-D`, with column sizes `c_state` and
`m_state`.
Returns:
A tuple containing:
- A `2-D, [batch, output_dim]`, numpy array representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is equal to num_units.
- Numpy array(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
"""
num_proj = self._num_units
(c_prev, m_prev) = state
input_size = inputs.shape[-1]
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
lstm_matrix = np.hstack([inputs, m_prev]).dot(self._kernel)
lstm_matrix += self._bias
i, j, f, o = np.split(lstm_matrix, indices_or_sections=4, axis=0)
# Diagonal connections
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) *
self._activation(j))
m = sigmoid(o) * self._activation(c)
new_state = (c, m)
return m, new_state
X = x_val.reshape(x_val.shape[1:])
cell = LSTMCell(num_units, initializer=lambda shape: kernel_init)
cell.build(X.shape)
state = (np.zeros(num_units), np.zeros(num_units))
for i in range(timesteps):
x = X[i,:]
output, state = cell.call(x, state)
print(output)
Produces output:
[-0.21386017 -0.08401277 -0.25431477]
[-0.22243588 -0.25817422 -0.1612211 ]
[-0.2282134 -0.14207162 -0.35017249]
[-0.23286737 -0.17129192 -0.2706512 ]
[-0.11768674 -0.20717363 -0.13339118]
[-0.0599215 -0.17756104 -0.2028935 ]
[ 0.11437953 -0.19484555 0.05371994]
While your Tensorflow code, if you replace the second line with
lstm = tf.nn.rnn_cell.LSTMCell(num_units = num_units, initializer = tf.constant_initializer(kernel_init))
returns:
[[-0.2138602 -0.08401276 -0.25431478]]
[[-0.22243595 -0.25817424 -0.16122109]]
[[-0.22821338 -0.1420716 -0.35017252]]
[[-0.23286738 -0.1712919 -0.27065122]]
[[-0.1176867 -0.2071736 -0.13339119]]
[[-0.05992149 -0.177561 -0.2028935 ]]
[[ 0.11437953 -0.19484554 0.05371996]]
Here is a blog which will answer any conceptual questions related to LSTM's. Seems that there is a lot which goes into building an LSTM from scratch!
Of course, this answer doesn't solve your question but just giving a direction.
Considering Linear Algebra, it's possible to exist a dimension mismatch in the matrix multiplication between I*N (red circle), affecting the output, given that n x m dot m x p will give you a n x p dimensional output.
import tensorflow as tf
import numpy as np
xy = np.loadtxt('data-01-test-score.csv', delimiter = ',', dtype = np.float32)
# numpy array data is made for tensor
x_imp_np = xy[:,:-1]
y_imp_np = xy[:,[-1]]
x_imp_ten = tf.constant(x_imp_np)
y_imp_ten = tf.constant(y_imp_np)
# make batches for data
x_batch, y_batch = tf.train.batch([x_imp_ten, y_imp_ten], batch_size = 10)
x = tf.placeholder(tf.float32, shape = [None,3])
y = tf.placeholder(tf.float32, shape = [None,1])
w = tf.Variable(tf.random_normal([3,1]), name = 'weight')
b = tf.Variable(tf.random_normal([1]), name = 'bias')
hypothesis = tf.matmul(x,w) + b
cost = tf.reduce_mean(tf.square(hypothesis - y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 1e-5)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
x_data, y_data = sess.run([x_imp_ten,y_imp_ten])
for step in range(2001):
x_batch_tr, y_batch_tr = sess.run([x_batch,y_batch])
_, cost_val , hypothesis_val = sess.run([train, cost,hypothesis], feed_dict= {x: x_data, y: y_data})
if step % 10 == 0:
print(step, cost_val)
above code is just simple linear regression problem which is from sung kim's lecture. i have a problem about tf.train.batch. when queue was used, it operates well. however if i didn't uses the queue it doesn.t working. is there any method for not using queue data loading?
in here, it takes so much lots of time that it almostly useless..
i just want to use tf.train.batch for that numpy array by using just simple array slicing.
I Wrote a Neural Network in TensorFlow for the XOR input. I have used 1 hidden layer with 2 units and softmax classification. The input is of the form <1, x_1, x_2, zero, one> , where
1 is the bias
x_1 and x_2 are either between 0 and 1 for all the combination {00, 01, 10, 11}. Selected to be normally distributed around 0 or 1
zero: is 1 if the output is zero
one: is 1 if the output is one
The accuracy is always around 0.5. What has gone wrong? Is the architecture of the neural network wrong, or is there something with the code?
import tensorflow as tf
import numpy as np
from random import randint
DEBUG=True
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01))
def model(X, weight_hidden, weight_output):
# [1,3] x [3,n_hiddent_units] = [1,n_hiddent_units]
hiddern_units_output = tf.nn.sigmoid(tf.matmul(X, weight_hidden))
# [1,n_hiddent_units] x [n_hiddent_units, 2] = [1,2]
return hiddern_units_output
#return tf.matmul(hiddern_units_output, weight_output)
def getHiddenLayerOutput(X, weight_hidden):
hiddern_units_output = tf.nn.sigmoid(tf.matmul(X, weight_hidden))
return hiddern_units_output
total_inputs = 100
zeros = tf.zeros([total_inputs,1])
ones = tf.ones([total_inputs,1])
around_zeros = tf.random_normal([total_inputs,1], mean=0, stddev=0.01)
around_ones = tf.random_normal([total_inputs,1], mean=1, stddev=0.01)
batch_size = 10
n_hiddent_units = 2
X = tf.placeholder("float", [None, 3])
Y = tf.placeholder("float", [None, 2])
weight_hidden = init_weights([3, n_hiddent_units])
weight_output = init_weights([n_hiddent_units, 2])
hiddern_units_output = getHiddenLayerOutput(X, weight_hidden)
py_x = model(X, weight_hidden, weight_output)
#cost = tf.square(Y - py_x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y))
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost)
with tf.Session() as sess:
tf.global_variables_initializer().run()
trX_0_0 = sess.run(tf.concat([ones, around_zeros, around_zeros, ones, zeros], axis=1))
trX_0_1 = sess.run(tf.concat([ones, around_zeros, around_ones, zeros, ones], axis=1))
trX_1_0 = sess.run(tf.concat([ones, around_ones, around_zeros, zeros, ones], axis=1))
trX_1_1 = sess.run(tf.concat([ones, around_ones, around_ones, ones, zeros], axis=1))
trX = sess.run(tf.concat([trX_0_0, trX_0_1, trX_1_0, trX_1_1], axis=0))
trX = sess.run(tf.random_shuffle(trX))
print(trX)
for i in range(10):
for start, end in zip(range(0, len(trX), batch_size), range(batch_size, len(trX) + 1, batch_size)):
trY = tf.identity(trX[start:end,3:5])
trY = sess.run(tf.reshape(trY,[batch_size, 2]))
sess.run(train_op, feed_dict={ X: trX[start:end,0:3], Y: trY })
start_index = randint(0, (total_inputs*4)-batch_size)
y_0 = sess.run(py_x, feed_dict={X: trX[start_index:start_index+batch_size,0:3]})
print("iteration :",i, " accuracy :", np.mean(np.absolute(trX[start_index:start_index+batch_size,3:5]-y_0)),"\n")
Check the comments section for the updated code
The problem was with the randomly assigned weights. Here is the modified version, obtained after a series of trail-and-error.