How to get number of neurons in TensorFlow layer? - python

Suppose I am trying to connect the output of a pooling layer to a dense layer. In order to do this, I need to flatten the pooled tensor. Consider the layers below:
def conv_layer(input, in_channels, out_channels, name="conv"):
w = tf.get_variable("W", initializer=tf.truncated_normal([3, 3, in_channels, out_channels], stddev=0.1))
b = tf.get_variable("B", initializer=tf.constant(0.1, shape=[out_channels]))
conv = tf.nn.conv2d(input, w, strides=[1,1,1,1], padding="SAME")
act = tf.nn.relu(conv + b)
return act
def pool_layer(input, name="pool"):
pool = tf.nn.max_pool(input, ksize=[1,2,2,1], strides=[1,2,2,1], padding="SAME")
return pool
def dense_layer(input, size_in, size_out, name="dense"):
w = tf.get_variable("W", initializer=tf.truncated_normal([size_in, size_out], stddev=0.1))
b = tf.get_variable("B", initializer=tf.constant(0.1, shape=[size_out]))
act = tf.nn.relu(tf.matmul(input, w) + b)
return act
I am using them to create a network:
def cnn_model(x):
x_image = tf.reshape(x, [-1, nseries, present_window, 1])
conv1 = conv_layer(x_image, 1, 32, "conv1")
pool1 = pool_layer(conv1, "pool1")
conv2 = conv_layer(pool1, 32, 64, "conv2")
pool2 = pool_layer(conv2, "pool2")
nflat = 17*15*64 # hard-coded
flat = tf.reshape(pool2, [-1, nflat])
yhat = dense_layer(flat, nflat, future_window, "dense1")
return yhat
As you can see I am hard-coding the variable nflat. How to avoid this?

If it's a tensor pool.get_shape() should work on Keras or Tensorflow.
This will actually return a tuple with the size of each dimension, so you need to choose from it, probably it's the 2nd in your case.
If input is actually your input (without any other layer), why are you max-pooling? aren't you looking for dropout ?
Indeed you will find a problem if your batch size is variable, since there's no way of telling the model the size of the reshape

Related

How to restructure the output tensor of a cnn layer for use by a linear layer in a simple pytorch model

Given a pytorch input dataset with dimensions:
dat.shape = torch.Size([128, 3, 64, 64])
This is a supervised learning problem: we have a separate labels.txt file containing one of C classes for each input observation. The value of C is calculated by the number of distinct values in the labeles file and is presently in the single digits.
I could use assistance on how to mesh the layers of a simple mix of convolutional and linear layers network that is performing multiclass classification. The intent is to pass through:
two cnn layers with maxpooling after each
a linear "readout" layer
softmax activation before the output/labels
Here is the core of my (faulty/broken) network. I am unable to determine the proper size/shape required of:
Output of Convolutional layer -> Input of Linear [Readout] layer
class CNNClassifier(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, 3)
self.maxpool = nn.MaxPool2d(kernel_size=3,padding=1)
self.conv2 = nn.Conv2d(16, 32, 3)
self.linear1 = nn.Linear(32*16*16, C)
self.softmax1 = nn.LogSoftmax(dim=1)
def forward(self, x):
x = self.conv1(x)
x = self.maxpool(F.leaky_relu(x))
x = self.conv2(x)
x = self.maxpool(F.leaky_relu(x))
x = self.linear1(x) # Size mismatch error HERE
x = self.softmax1(x)
return x
Training of the model is started by :
Xout = model(dat)
This results in :
RuntimeError: size mismatch, m1: [128 x 1568], m2: [8192 x 6]
at the linear1 input. What is needed here ? Note I have seen uses of wildcard input sizes e.g via a view:
..
x = x.view(x.size(0), -1)
x = self.linear1(x) # Size mismatch error HERE
If that is included then the error changes to
RuntimeError: size mismatch, m1: [28672 x 7], m2: [8192 x 6]
Some pointers on how to think about and calculate the cnn layer / linear layer input/output sizes would be much appreciated.
The error
You have miscalculated the output size from convolutional stack. It is actually [batch, 32, 7, 7] instead of [batch, 32, 16, 16].
You have to use reshape (or view) as output from Conv2d has 4 dimensions ([batch, channels, width, height]), while input to nn.Linear is required to have 2 dimensions ([batch, features]).
Use this for nn.Linear:
self.linear1 = nn.Linear(32 * 7 * 7, C)
And this in forward:
x = self.linear1(x.view(x.shape[0], -1))
Other possibilities
Current new architectures use pooling across channels (usually called global pooling). In PyTorch there is an torch.nn.AdaptiveAvgPool2d (or Max pooling). Using this approach allows you to have variable size of height and width of your input image as only one value per channel is used as input to nn.Linear. This is how it looks:
class CNNClassifier(torch.nn.Module):
def __init__(self, C=10):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, 3)
self.maxpool = nn.MaxPool2d(kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(16, 32, 3)
self.pooling = torch.nn.AdaptiveAvgPool2d(output_size=1)
self.linear1 = nn.Linear(32, C)
self.softmax1 = nn.LogSoftmax(dim=1)
def forward(self, x):
x = self.conv1(x)
x = self.maxpool(F.leaky_relu(x))
x = self.conv2(x)
x = self.maxpool(F.leaky_relu(x))
x = self.linear1(self.pooling(x).view(x.shape[0], -1))
x = self.softmax1(x)
return x
So now images of torch.Size([128, 3, 64, 64]) and torch.Size([128, 3, 128, 128]) can be passed to the network.
So the issue is with the way you defined the nn.Linear. You set the input size to 32*16*16 which is not the shape of the output image but the number 32/16 represent the number of "channels" dim that the Conv2d expect for the input and what it will output.
If you will add print(x.shape) before the entrance to the fully connected layer you will get:
torch.Size([Batch, 32, 7, 7])
So your calculation should have been 7*7*32:
self.linear1 = nn.Linear(32*7*7, C)
And then using:
x = x.view(x.size(0), -1)
x = self.linear1(x)
Will work perfectly fine. You can read about the what the view does in: How does the "view" method work in PyTorch?

How to plot weights attached to 3 different filters?

I'm using one convolution layer and one fully connected layer in CNN. where as there are two output nodes. I'm using one input channel and 3 filter channel in the convolutional layer(1D convolution). When I store final weight matrix of fully connected layer it has shape (36,2). Whereas a single input has 12 features. Now I want to plot filter weights attached to 1st channel, second channel and third channel separately. If I plot first 12 weights Does it mean they correspond to the 1 class of first channel?
`
def weight_variable(shape):
initial = tf.truncated_normal(shape, mean=0, stddev=0.1)
return tf.Variable(initial)
def conv1d(input, filter):
return tf.nn.conv1d(input, filter, stride=1, padding='SAME')
x = tf.placeholder(tf.float32, [None, FLAGS.image_width])
y_ = tf.placeholder(tf.float32, [None, 2])
input = tf.reshape(x, [-1, FLAGS.image_width, FLAGS.input_channel])
filter = weight_variable([FLAGS.filter_width, FLAGS.input_channel,
FLAGS.filter_channel])
conv_out = tf.nn.tanh(conv1d(input, filter))
#Fully_Connected_layer
dim = conv_out.get_shape().as_list()
conv_re = tf.reshape(conv_out, (-1, dim[1]*dim[2]))
W_fc = weight_variable([dim[1]*dim[2], 2])
logits = tf.matmul(conv_re, W_fc)
y_prime = tf.nn.softmax(logits)
#Cross_entropy:
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits,
labels= y_)
loss = tf.reduce_mean(cross_entropy)
optimizer = tf.train.GradientDescentOptimizer(FLAGS.rLearn).minimize(loss)
#Check_predictions:
correct_prediction = tf.equal(tf.argmax(y_prime, axis=1),tf.argmax(y_,
axis=1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))`
`W = W.eval() #shape (36,2)
W1 = W[0:12,0]
W2 = W[12:24,0]
W3 = W[24:36,0]`

Weights of my TensorFlow FCN all drop to 0

I am trying to implement a Fully Convolutional network (5 layers) in TensorFlow.
But after few time of training, all my logits fall to 0.
Did anyone have the same problem before ?
Here is how I implemented my CONV-ReLU-maxPOOL layer :
def conv_relu_layer (in_data, nb_filters, filter_shape) :
nb_in_channels = int (in_data_reshaped.shape[3])
conv_shape = [filter_shape[0], filter_shape[1],
nb_in_channels, nb_filters]
weights = tf.Variable (
tf.truncated_normal (conv_shape, mean=0., stddev=.05))
bias = tf.Variable (
tf.truncated_normal ([nb_filters], mean=0., stddev=1.))
output = tf.nn.conv2d (in_data_reshaped, weights,
[1,1,1,1], padding="SAME")
output += bias
output = tf.nn.relu (output)
return output
def conv_relu_pool_layer (in_data, nb_filters, filter_shape, pool_shape,
pooling=tf.nn.max_pool) :
conv_out = conv_relu_layer (in_data, nb_filters, filter_shape)
ksize = [1, pool_shape[0], pool_shape[1], 1]
strides = [1, pool_shape[0], pool_shape[1], 1]
return pooling (conv_out, ksize=ksize, strides=strides, padding="SAME")
Here is my network :
def create_network_5C (in_data, name="5C") :
c1 = conv_relu_pool_layer (in_data, 64, [5,5], [2,2])
c2 = conv_relu_pool_layer (c1, 128, [5,5], [2,2])
c3 = conv_relu_pool_layer (c2, 256, [5,5], [2,2])
c4 = conv_relu_pool_layer (c3, 64, [5,5], [2,2])
return conv_relu_layer (c4, 2, [5,5])
The loss function :
def loss (logits, labels, num_classes) :
with tf.name_scope('loss'):
logits = tf.reshape(logits, (-1, num_classes))
epsilon = tf.constant(value=1e-4)
labels = tf.to_float(tf.reshape(labels, (-1, num_classes)))
softmax = tf.nn.softmax(logits) + epsilon
cross_entropy = - tf.reduce_sum (
tf.multiply (labels * tf.log (softmax), head),
reduction_indices=[1])
cross_entropy_mean = tf.reduce_mean (cross_entropy)
tf.add_to_collection('losses', cross_entropy_mean)
loss = tf.add_n(tf.get_collection('losses'))
return loss
My main routine :
batch_size = 5
# Load data
x = tf.placeholder (tf.float32, [None, 416, 416, 3], name="x")
y = tf.placeholder (tf.float32, [None, 416, 416, 1], name="y")
# Contrast normalization and computation
x_gcn = tf.map_fn (lambda img : tf.image.per_image_standardization (img), x)
logits = create_network_5C (x_gcn)
# Having label at the same dimension as the output
y_p = tf.nn.avg_pool (tf.sign (y),
ksize=[1,16,16,1], strides=[1,16,16,1], padding="SAME")
y_rshp = tf.reshape (y_p, [batch_size, 416//16, 416//16])
y_bin = tf.cast (y_rshp > .5, tf.int32)
y_1hot = tf.one_hot (y_bin, 2)
# Compute error
error = loss (logits, y_1hot, 2)
optimizer = tf.train.AdamOptimizer (learning_rate=args.eta).minimize (error)
# Run the session
init_op = tf.global_variables_initializer ()
with tf.Session () as session :
session.run (init_op)
err, _ = session.run ([error, optimizer],
feed_dict={ x: image_batch,
y: label_batch })
I note that, if I reduce my network to 2 layers only, it won't drop the logits to 0, but it won't learn anything either. If I reduce it to 3 layers, it will drop to 0, but after a many iterations (while 5 layers drop to 0 in few batches).
Can this be linked to what is called "gradient vanish" ?
If it's relevant, my spec are : Ubuntu 16.04 - Python 3.6.4 - tensorflow 1.6.0
[EDIT] My problem really look like dead-ReLU, as mentioned here : StackOverflow : FCN training error, but my data is normalized (between something like -2 and +2, and I already tried to change the mean and stddev initial value of my weights and biases
[EDIT 2] I tried to replace the ReLUs with Leaky ReLU, or a softplus, in both cases, logits get stucked under 0.1 and loss stay between 0.6 and 0.7
Using some leaky relu was actually enough, then I just needed to let him train for a hudge amount of time.

Using weights initializer with tf.nn.conv2d

When using tf.layers.conv2d, setting the initializer is easy, it can be done through its parameter. But what if I use tf.nn.conv2d? I use this code. Is this equivalent to setting the kernel_initializer parameter in tf.layers.conv2d? Although the program runs without errors, I don't know how to verify whether it does what it is expected do.
with tf.name_scope('conv1_2') as scope:
kernel = tf.get_variable(initializer=tf.contrib.layers.xavier_initializer(),
shape=[3, 3, 32, 32], name='weights')
conv = tf.nn.conv2d(conv1_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[32], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv1_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
The operation underneath is the same (see here).
As for the kernel and its initialization, I took a glimpse in the code and it looked the same... the layers.conv2d call a tf.get_variable at the end of the day.
But I wanted to see it empirically, so here is a test code that declares a conv2d using each method (tf.layers.conv2d and tf.nn.conv2d), evaluates the initialized kernels and compares them.
I've arbitrarily set the things that shouldn't interfere in the comparison, such as an input tensor and the strides.
import tensorflow as tf
import numpy as np
# the way you described in your question
def _nn(input_tensor, initializer, filters, size):
kernel = tf.get_variable(
initializer=initializer,
shape=[size, size, 32, filters],
name='kernel')
conv = tf.nn.conv2d(
input=input_tensor,
filter=kernel,
strides=[1, 1, 1, 1],
padding='SAME')
return kernel
# the other way
def _layer(input_tensor, initializer, filters, size):
tf.layers.conv2d(
inputs=input_tensor,
filters=filters,
kernel_size=size,
kernel_initializer=initializer)
# 'conv2d/kernel:0' is the name of the generated kernel
return tf.get_default_graph().get_tensor_by_name('conv2d/kernel:0')
def _get_kernel(method):
# an isolated context for each conv2d
graph = tf.Graph()
sess = tf.Session(graph=graph)
with graph.as_default(), sess.as_default():
# important so that same randomness doesnt play a role
tf.set_random_seed(42)
# arbitrary input tensor with compatible shape
input_tensor = tf.constant(1.0, shape=[1, 64, 64, 32])
initializer = tf.contrib.layers.xavier_initializer()
kernel = method(
input_tensor=input_tensor,
initializer=initializer,
filters=32,
size=3)
sess.run(tf.global_variables_initializer())
return sess.run(kernel)
if __name__ == '__main__':
kernel_nn = _get_kernel(_nn)
kernel_layer = _get_kernel(_layer)
print('kernels are ', end='')
# compares shape and values
if np.array_equal(kernel_layer, kernel_nn):
print('exactly the same')
else:
print('not the same!')
And the output is... kernels are exactly the same.
The docs, btw: tf.nn.conv2d and tf.layers.conv2d.

How to change a classification model to a regression model?

I am using pre-trained Alexnet as shown below. I want to use that model for regression with 6 outputs (Xcoordinate (range (0,227),Ycoordinate (range (0,227),height (range (20,50), width (range (20,50), sine(theta), cos(theta)). (range of theta is -180 to 180 degrees)
These are the following things I change -
changed loss function to MSE.
changed the output layer from 1000 to 6.
changed from RELU to linear activation function last layer.
Now, I am not getting proper valued of sine and cosine above (it should be in the range of (-1 to 1)), I am getting out of bound values. What should I do, How should I keep a bound one the values. Also, should I keep a bout on other parameters as well. What should I do incorporate those changes?
What are the other changes should I make to use this model for regression.?
import tensorflow as tf
import numpy as np
class AlexNet(object):
def __init__(self, x, keep_prob, num_classes, skip_layer,
weights_path = 'DEFAULT'):
# Parse input arguments into class variables
self.X = x
self.NUM_CLASSES = num_classes
self.KEEP_PROB = keep_prob
self.SKIP_LAYER = skip_layer
if weights_path == 'DEFAULT':
self.WEIGHTS_PATH = 'bvlc_alexnet.npy'
else:
self.WEIGHTS_PATH = weights_path
# Call the create function to build the computational graph of AlexNet
self.create()
def create(self):
# 1st Layer: Conv (w ReLu) -> Pool -> Lrn
conv1 = conv(self.X, 11, 11, 96, 4, 4, padding = 'VALID', name = 'conv1')
pool1 = max_pool(conv1, 3, 3, 2, 2, padding = 'VALID', name = 'pool1')
norm1 = lrn(pool1, 2, 2e-05, 0.75, name = 'norm1')
# 2nd Layer: Conv (w ReLu) -> Pool -> Lrn with 2 groups
conv2 = conv(norm1, 5, 5, 256, 1, 1, groups = 2, name = 'conv2')
pool2 = max_pool(conv2, 3, 3, 2, 2, padding = 'VALID', name ='pool2')
norm2 = lrn(pool2, 2, 2e-05, 0.75, name = 'norm2')
# 3rd Layer: Conv (w ReLu)
conv3 = conv(norm2, 3, 3, 384, 1, 1, name = 'conv3')
# 4th Layer: Conv (w ReLu) splitted into two groups
conv4 = conv(conv3, 3, 3, 384, 1, 1, groups = 2, name = 'conv4')
# 5th Layer: Conv (w ReLu) -> Pool splitted into two groups
conv5 = conv(conv4, 3, 3, 256, 1, 1, groups = 2, name = 'conv5')
pool5 = max_pool(conv5, 3, 3, 2, 2, padding = 'VALID', name = 'pool5')
# 6th Layer: Flatten -> FC (w ReLu) -> Dropout
flattened = tf.reshape(pool5, [-1, 6*6*256])
fc6 = fc(flattened, 6*6*256, 4096, name='fc6',relu =True)
dropout6 = dropout(fc6, self.KEEP_PROB)
# 7th Layer: FC (w ReLu) -> Dropout
fc7 = fc(dropout6, 4096, 4096, name = 'fc7',relu =False)
# dropout7 = dropout(fc7, self.KEEP_PROB)
# 8th Layer: FC and return unscaled activations (for tf.nn.softmax_cross_entropy_with_logits)
self.fc8 = fc(fc7, 4096, self.NUM_CLASSES, name='fc8',relu = False)
def load_initial_weights(self, session):
"""
As the weights from http://www.cs.toronto.edu/~guerzhoy/tf_alexnet/ come
as a dict of lists (e.g. weights['conv1'] is a list) and not as dict of
dicts (e.g. weights['conv1'] is a dict with keys 'weights' & 'biases') we
need a special load function
"""
# Load the weights into memory
weights_dict = np.load(self.WEIGHTS_PATH, encoding = 'bytes').item()
# Loop over all layer names stored in the weights dict
for op_name in weights_dict:
# Check if the layer is one of the layers that should be reinitialized
if op_name not in self.SKIP_LAYER:
with tf.variable_scope(op_name, reuse = True):
# Loop over list of weights/biases and assign them to their corresponding tf variable
for data in weights_dict[op_name]:
# Biases
if len(data.shape) == 1:
var = tf.get_variable('biases', trainable = False)
session.run(var.assign(data))
# Weights
else:
var = tf.get_variable('weights', trainable = False)
session.run(var.assign(data))
"""
Predefine all necessary layer for the AlexNet
"""
def conv(x, filter_height, filter_width, num_filters, stride_y, stride_x, name,
padding='SAME', groups=1):
"""
Adapted from: https://github.com/ethereon/caffe-tensorflow
"""
# Get number of input channels
input_channels = int(x.get_shape()[-1])
# Create lambda function for the convolution
convolve = lambda i, k: tf.nn.conv2d(i, k,
strides = [1, stride_y, stride_x, 1],
padding = padding)
with tf.variable_scope(name) as scope:
# Create tf variables for the weights and biases of the conv layer
weights = tf.get_variable('weights', shape = [filter_height, filter_width, input_channels/groups, num_filters])
biases = tf.get_variable('biases', shape = [num_filters])
if groups == 1:
conv = convolve(x, weights)
# In the cases of multiple groups, split inputs & weights and
else:
# Split input and weights and convolve them separately
#input_groups = tf.split(value=x, num_split= groups, split_dim=3)
#input_groups = tf.split(split_dim=3, num_split= groups,value=x)
input_groups = tf.split(axis = 3, num_or_size_splits=groups, value=x)
# weight_groups = tf.split(value =weights, num_split=groups, split_dim=3)
weight_groups = tf.split(axis = 3, num_or_size_splits=groups, value=weights)
output_groups = [convolve(i, k) for i,k in zip(input_groups, weight_groups)]
# Concat the convolved output together again
#conv = tf.concat( values = output_groups,concat_dim = 3)
conv = tf.concat(axis = 3, values = output_groups)
# Add biases
bias = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape().as_list())
# Apply relu function
relu = tf.nn.relu(bias, name = scope.name)
return relu
#def fc(x, num_in, num_out, name, relu = True):
def fc(x, num_in, num_out, name, relu):
with tf.variable_scope(name) as scope:
# Create tf variables for the weights and biases
weights = tf.get_variable('weights', shape=[num_in, num_out], trainable=True)
biases = tf.get_variable('biases', [num_out], trainable=True)
# Matrix multiply weights and inputs and add bias
act = tf.nn.xw_plus_b(x, weights, biases, name=scope.name)
if relu == True:
# Apply ReLu non linearity
relu = tf.nn.relu(act)
return relu
else:
return act
def max_pool(x, filter_height, filter_width, stride_y, stride_x, name, padding='SAME'):
return tf.nn.max_pool(x, ksize=[1, filter_height, filter_width, 1],
strides = [1, stride_y, stride_x, 1],
padding = padding, name = name)
def lrn(x, radius, alpha, beta, name, bias=1.0):
return tf.nn.local_response_normalization(x, depth_radius = radius, alpha = alpha,
beta = beta, bias = bias, name = name)
def dropout(x, keep_prob):
return tf.nn.dropout(x, keep_prob)
Now the code for the loss function ad optimizer is
# Op for calculating the loss
with tf.name_scope("cross_ent"):
loss = tf.reduce_mean(tf.squared_difference(score, y))
# Train op
with tf.name_scope("train"):
# Get gradients of all trainable variables
gradients = tf.gradients(loss, var_list)
gradients = list(zip(gradients, var_list))
# Create optimizer and apply gradient descent to the trainable variables
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.apply_gradients(grads_and_vars=gradients)
Anything I should change in this part?
Or any comments, or anything I should take care of to change the model from classifcation to regression.
I am new to tensorflow and deep learning

Categories

Resources