Predicting point sequence in image - python

My training set is a set of images (either 3 channel or 1 ofc i use only one type of channel). And the labels are a sequence of points in a specific order that i want to predict from the images.
I am using a model inspired by the image captioning example on the tensorflow website. This is the also the approach that this paper takes https://arxiv.org/pdf/1901.03781.pdf
class CNN_Encoder(tf.keras.Model):
# Since you have already extracted the features and dumped it using pickle
# This encoder passes those features through a Fully connected layer
def __init__(self, embedding_dim):
super(CNN_Encoder, self).__init__()
self.fc = tf.keras.layers.Dense(embedding_dim)
def call(self, x):
x = self.fc(x)
x = tf.nn.relu(x)
return x
class RNN_Decoder(tf.keras.Model):
def __init__(self, embedding_dim, units, output_dim):
super(RNN_Decoder, self).__init__()
self.units = units
self.gru = tf.keras.layers.GRU(self.units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc1 = tf.keras.layers.Dense(self.units)
self.fc2 = tf.keras.layers.Dense(output_dim)
def call(self, x, features, hidden):
x = tf.concat((features, x), axis=-1)
output, state = self.gru(x)
x = self.fc1(state)
x = self.fc2(x)
return x
def reset_state(self, batch_size):
return tf.zeros((batch_size, self.units))
#tf.function
def train_step(img_tensor, target):
loss = 0
hidden = decoder.reset_state(batch_size=target.shape[0])
dec_input = tf.expand_dims([[0., 0.]] * target.shape[0], 1)
with tf.GradientTape() as tape:
features = encoder(img_tensor)
for i in (range(1, target.shape[1])):
predictions = decoder(dec_input, features, hidden)
loss += loss_function(target[:, i], predictions)
# using teacher forcing
dec_input = tf.expand_dims(target[:, i], 1)
total_loss = (loss / int(target.shape[1]))
trainable_variables = encoder.trainable_variables + decoder.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
optimizer.apply_gradients(zip(gradients, trainable_variables))
return loss, total_loss
EPOCHS = 20
batch_size = 8
for epoch in tqdm(range(start_epoch, EPOCHS)):
start = time.time()
total_loss = 0
for (batch, (img_tensor, target)) in enumerate((data_generator(preds_t, labels_t))):
img_tensor = img_tensor.reshape((-1, 1, 128*128))
batch_loss, t_loss = train_step(img_tensor, target)
total_loss += t_loss
if batch % 100 == 0:
print ('Epoch {} Batch {} Loss {:.4f}'.format(
epoch + 1, batch, batch_loss.numpy() / int(target.shape[1])))
if batch == 10000:
break
# storing the epoch end loss value to plot later
#loss_plot.append(total_loss / num_steps)
if epoch % 5 == 0:
ckpt_manager.save()
print ('Epoch {} Loss {:.6f}'.format(epoch + 1,
total_loss/num_steps))
print ('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
For the features vector. I am extracting the last layer of a unet. So each image has a size 1x128x128. I reshape it to be 1x1x128*128. Which i then pass through a fully connected layer. The shape then becomes 1x1x256
My labels i want to predict are image coordinates so (x, y). The input to the gru layer is the
concatenated 1x1x256 , 1x1x2 (t-1 coordinates). Which i then further pass through a 2 layer fc layer with output dimension 2 for the 2 coordinates. I have removed attention for now to get a simpler model. I normalize my images. I pad the coordinate sequences with 0,0 for the start -1, -1 for the end and -2,-2 for the regular padding to get uniform sequence length of 350x2.
The network doesnt seem to learn much. I just get a few points scattered diagonally across the image. The biggest difference i see with the image captioning model is that the words can be converted to embeddings and then you have a 128 image features 128 word features being concatenated and fed into the lstm. In my case the sequence information is just 1 entry. Could that be the reason that the network is not learning much.
If someone has any insights into what i should change that would be great

Your question requires certain experience and a deep investigation. I'd only suggest general advices for under-fitting issue. Here's a list of things to try.
Personally, I'd start by trying to overfit on a single batch.

Related

Word2Vec with negative sampling python implementation

I'm trying to implement word2vec with negative sampling in python almost from scratch and quite new in neural networks and faced some issues. Would be very appreciate for any help.
So, I wrote simple nn with a forward pass. I didn't get which element have to have grad_fn, I'd been getting error like 'tensor have no grad_fn' until I add requires_grad_() on the returning value. Is that correct?
dataset = Word2VecNegativeSampling(data, num_negative_samples, 30000)
dataset.generate_dataset()
wordvec_dim = 10
class Word2VecNegativeSamples(nn.Module):
def __init__(self, num_tokens):
super(Word2VecNegativeSamples, self).__init__()
self.input = nn.Linear(num_tokens, 10, bias=False)
self.output = nn.Linear(10, num_tokens, bias=False)
self.num_tokens = num_tokens
def forward(self, input_index_batch, output_indices_batch):
'''
Implements forward pass with negative sampling
Arguments:
input_index_batch - Tensor of ints, shape: (batch_size, ), indices of input words in the batch
output_indices_batch - Tensor if ints, shape: (batch_size, num_negative_samples+1),
indices of the target words for every sample
Returns:
predictions - Tensor of floats, shape: (batch_size, num_negative_samples+1)
'''
results = []
batch_size = len(input_index_batch)
for i in range(batch_size):
input_one_hot = torch.zeros(self.num_tokens)
input_one_hot[input_index_batch[i]] = 1
forward_result = self.output(self.input(input_one_hot))
results.append(torch.tensor([forward_result[out_index] for out_index in output_indices_batch[i]]))
return torch.stack(results).requires_grad_()
nn_model = Word2VecNegativeSamples(data.num_tokens())
nn_model.type(torch.FloatTensor)
After all i'm trying to train the model, but neither loss nor accuracy changing. Is the code for model prediction correct as well?
Here is training code:
def train_neg_sample(model, dataset, train_loader, optimizer, scheduler, num_epochs):
loss = nn.BCEWithLogitsLoss().type(torch.FloatTensor)
loss_history = []
train_history = []
for epoch in range(num_epochs):
model.train() # Enter train mode
dataset.generate_dataset()
loss_accum = 0
correct_samples = 0
total_samples = 0
for i_step, (inp, out, lab) in enumerate(train_loader):
prediction = model(inp, out)
loss_value = loss(prediction, lab)
optimizer.zero_grad()
loss_value.backward()
optimizer.step()
_, indices = torch.max(prediction, 1)
correct_samples += torch.sum(indices == 0)
total_samples += lab.shape[0]
loss_accum += loss_value
scheduler.step()
ave_loss = loss_accum / i_step
train_accuracy = float(correct_samples) / total_samples
loss_history.append(float(ave_loss))
train_history.append(train_accuracy)
print("Epoch#: %i, Average loss: %f, Train accuracy: %f" % (epoch, ave_loss, train_accuracy))
return loss_history, train_history
If your loss function is not changing, it's highly probable that you register the wrong set of parameters to the optimizer. Can you post the code snippet where you initialize your model and optimizer? It is supposed to look like this:
nn_model = Word2VecNegativeSamples(data.num_tokens())
optimizer = optim.SGD(nn_model.parameters(), lr=0.001, momentum=0.9)

Embedding matrix and one hot vector (Pytorch)

I derived a Bidirectional LSTM for Sentiment analysis on news headlines, but when training the model the loss function value doesn't improve, stays around 0.6 and 0.7.
Certainly I'm making something wrong, I wonder if it is something related with the Embedding layer.
I'm iteratively passing each batch into the Network with a size of 10 and sentence length of 30 words, my vocabulary size is 5745 so the shape of this tensor would be (10, 30, 5745) after one hot encoding.
My Embedding layer has num_embeddings = 5745 and embed-dim = 100 so when I call self.embedding(input), have as output shape: (10, 30, 5745, 100)
I wanted to have an output shape of: (10,30,100)
Therefore I used this line of code:
embeddings = torch.max(embeddings, dim=2)
But I'm not sure if it does what I expect it to do for each word/one-hot vector which is:
if I have one hot encoding vector representing a word with shape (5745,1) and an embbeding matrix with shape (100, 5745), I get an embedding vector of (100,1) and therefore I would have an ouptut of (10,30,100) by doing the above code?
Maybe I'm not thinking correctly and it's affecting my end result
RNN:
class RNN(nn.Module):
def __init__(self, vocab_size, embed_dim, hidden_dim, output_dim, n_layers, dropout):
#calling the init function of the RNN parent
super(RNN, self).__init__()
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.encoder = nn.LSTM(embed_dim,
hidden_dim,
n_layers,
dropout=dropout,
bidirectional=True
)
#Linear transformation
self.decoder = nn.Linear(hidden_dim*2, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, inputs):
#(batch_size, timesteps, embed_dim)
embeddings = self.dropout(self.embedding(inputs))
embeddings = torch.max(embeddings, dim=2)
embeddings = embeddings[0].type(torch.cuda.FloatTensor)
#output of each timestep
output, (hidden, cell) = self.encoder(embeddings)
merge = self.dropout(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=1))
output = self.decoder(merge)
return output
train:
def train(model, text, label, epochs, lr=0.001):
model.train()
opt = torch.optim.Adam(model.parameters(), lr=lr)
#The BCEWithLogitsLoss criterion carries out both the sigmoid and the binary cross entropy steps
criterion = nn.BCEWithLogitsLoss()
counter = 0
for e in range(epochs):
for x, y in batches(text, label):
#In order to avoid gradient accumulation before backpropagation
x = one_hot_encode(x, vocab)
x = torch.from_numpy(x).to(device)
output = model(x)
#print(torch.cuda.memory_summary(device=None, abbreviated=False))
y = torch.from_numpy(y)
y = torch.unsqueeze(y,1).to(device)
#print(output.shape)
#print(y.shape)
loss = criterion(output, y.float())
acc = binaryAccuracy(output,y)
opt.zero_grad()
loss.backward()
opt.step()
counter += 1
print("Epoch {}/{}".format(e+1, epochs),
"Loss: {}".format(loss.item()),
"accuracy: {}".format(acc))
one-hot:
def one_hot_encode(arr, n_labels):
one_hot = np.zeros((np.multiply(*arr.shape), n_labels), dtype=np.int64)
one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1
one_hot = one_hot.reshape((*arr.shape, n_labels))
return one_hot
batches:
def batches(text, label, num_seqs=10):
counter = 0
#create empty arrays with the specified number of columns
x = np.array([], dtype=int).reshape(0,findMaxLen())
y = np.array([], dtype=int).reshape(0,1)
for sent, l in zip(text, label):
# create a np array with zeros with length 30
tmp1 = np.zeros((findMaxLen()), dtype=int)
#tmp1 = np.randint(0, high=vocab,size=findMaxLen(), dtype=int)
#create a 1d array
tmp2 = np.atleast_1d(np.array(l))
for ind, wrd in enumerate(sent):
if wrd in uniqueWrds and ind < 30:
tmp1[ind] = word_to_index[wrd]
# the arrays to the empty arrays
x = np.vstack([x, tmp1])
y = np.vstack([y, tmp2])
counter +=1
if counter == num_seqs:
yield x, np.squeeze(y,1)
counter = 0
x = np.array([], dtype=int).reshape(0,findMaxLen())
y = np.array([], dtype=int).reshape(0,1)
If you want your output to be of dimension (10,30,100), I think you need to input the indexes of the words of the sentences in the form of an array of dimension (10x30). You can probably get the indexes by performing a torch.argmax on the one hot encoded input tensor.

LSTM-CNN to classify sequences of images

I got an assignment and stuck with it while going down the rabbit hole of learning PyTorch, LSTM and cnn.
Provided the well known MNIST library I take combinations of 4 numbers and per combination it falls down into one of 7 labels.
eg:
1111 label 1 (follow a constant trend)
1234 label 2 increasing trend
4321 label 3 decreasing trend
...
7382 label 7 decreasing trend - increasing trend - decreasing trend
The shape of my tensor after loading of the tensor become (3,4,28,28) where the 28 comes from the MNIST image's width and height. 3 is the batch size and 4 is the channels (4 images).
I'm somewhat stuck with how to pass this into a PyTorch backed LSTM and CNN as basically all Google searches lead to articles where simply one image is passed in.
I was thinking of reshaping it to 1 long array of (pixel values) where I put all of the values of the first image row by row (28) after each other, then appended by the same approach for the second, third and fourth image. So that would make 4 * 28 * 28 = 3136.
Is my way of thinking on how to tackle this a correct one or should I rethink? I'm rather new to this all and looking for some guidance on how to go forward. I've been reading loads of articles, YT videos, ... but all seem to touch the basic stuff or alternatives of the same subject.
I have written some code but running it gives errors.
import numpy as np
import torch
import torch.nn as nn
from torch import optim, softmax
from sklearn.model_selection import train_test_split
#dataset = sequences of 4 MNIST images each
#datalabels =7
#Data
x_train, x_test, y_train, y_test = train_test_split(dataset.data, dataset.data_label, test_size=0.15,
random_state=42)
#model
class Mylstm(nn.Module):
def __init__(self, input_size, hidden_size, n_layers, n_classes):
super(Mylstm, self).__init__()
self.input_size = input_size
self.n_layers = n_layers
self.hidden_size = hidden_size
self.lstm = nn.LSTM(input_size, hidden_size, n_layers, batch_first=True)
# readout layer
self.fc = nn.Linear(hidden_size, n_classes)
def forward(self, x):
# Initialize hidden state with zeros
h0 = torch.zeros(self.n_layers, x.size(0), self.hidden_size).requires_grad_()
# initialize the cell state:
c0 = torch.zeros(self.n_layers, x.size(0), self.hidden_size).requires_grad_()
out, (h_n, h_c) = self.lstm(x, (h0.detach(), c0.detach()))
x = h_n[-1, :, 1]
x = self.fc(x)
x = softmax(x, dim=1)
return x
#Hyperparameters
input_size = 28
hidden_size = 256
sequence_length = 28
n_layers = 2
n_classes = 7
learning_rate = 0.001
model = Mylstm(input_size, hidden_size, n_layers, n_classes)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
#training
bs = 0
num_epochs = 5
batch_size=3
if np.mod(x_train.shape[0], batch_size) == 0.0:
iter = int(x_train.shape[0] / batch_size)
else:
iter = int(x_train.shape[0] / batch_size) + 1
bs = 0
for i in range(iter):
sequences = x_test[bs:bs + batch_size, :]
labels = y_test[bs:bs + batch_size]
test_images = dataset.load_images(sequences)
bs += batch_size
for epoch in range(num_epochs):
for i in range(iter):
sequences = x_train[bs:bs + batch_size, :]
labels = y_train[bs:bs + batch_size]
input_images = dataset.load_images(sequences)
bs += batch_size
images=(torch.from_numpy(input_images)).view(batch_size,4,-1)
labels=torch.from_numpy(labels)
optimizer.zero_grad()
output = model(images)
# calculate Loss
loss = criterion(output, labels)
loss.backward()
optimizer.step()
The error I'm currently getting is:
RuntimeError: input.size(-1) must be equal to input_size. Expected 28, got 784
Change your input size from 28 to 784. (784=28*28).
Input size argument is the number of features in one element of the sequence, so the number of feature of an mnist image, so the number of pixels which is width*hight of the image.

Weights not updating on my neural net (Pytorch)

I'm completely new to neural nets, so I tried to roughly follow some tutorials to create a neural net that can just distinguish if a given binary picture contains a white circle or if it is all black. So, I generated 1000 arrays of size 10000 representing a 100x100 picture with half of them containing a white circle somewhere. The generation of my dataset looks like this:
for i in range(1000):
image = [0] * (IMAGE_SIZE * IMAGE_SIZE)
if random() < 0.5:
dataset.append([image, [[0]]])
else:
#inserts circle in image
#...
dataset.append([image, [[1]]])
np.random.shuffle(dataset)
np.save("testdataset.npy", dataset)
The double list around the classifications is because the net seemed to give that format as an output, so I matched that.
Now since I don't really have any precise idea of how pytorch works, I don't really now which parts of the code are relevant for solving my problem and which aren't. Therefore, I gave the code for the net and the training down below and really hope that someone can explain to me where I went wrong. I'm sorry if it's too much code. The code runs without errors, but if I print the parameters before and after training they didn't change in any way and the net will always just return a 0 for every image/array.
IMAGE_SIZE = 100
EPOCHS = 3
BATCH_SIZE = 50
VAL_PCT = 0.1
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(IMAGE_SIZE * IMAGE_SIZE, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, 64)
self.fc4 = nn.Linear(64, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return F.log_softmax(x, dim = 1)
net = Net()
optimizer = optim.Adam(net.parameters(), lr = 0.01)
loss_function = nn.MSELoss()
dataset = np.load("testdataset.npy", allow_pickle = True)
X = torch.Tensor([i[0] for i in dataset]).view(-1, 10000)
y = torch.Tensor([i[1] for i in dataset])
val_size = int(len(X) * VAL_PCT)
train_X = X[:-val_size]
train_y = y[:-val_size]
test_X = X[-val_size:]
test_y = y[-val_size:]
for epoch in range(EPOCHS):
for i in range(0, len(train_X), BATCH_SIZE):
batch_X = train_X[i:i + BATCH_SIZE].view(-1, 1, 10000)
batch_y = train_y[i:i + BATCH_SIZE]
net.zero_grad()
outputs = net(batch_X)
loss = loss_function(outputs, batch_y)
loss.backward()
optimizer.step()
Instead of net.zero_grad() I would recommend using optimizer.zero_grad() as it's more common and de facto standard. Your training loop should be:
for epoch in range(EPOCHS):
for i in range(0, len(train_X), BATCH_SIZE):
batch_X = train_X[i:i + BATCH_SIZE].view(-1, 1, 10000)
batch_y = train_y[i:i + BATCH_SIZE]
optimizer.zero_grad()
outputs = net(batch_X)
loss = loss_function(outputs, batch_y)
loss.backward()
optimizer.step()
I would recommend you reading a bit about different loss functions. It seems you have a classification problem, for that you should use the logits (binary classification) or cross entropy (multi class) loss. I would make the following changes to the network and loss function:
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(IMAGE_SIZE * IMAGE_SIZE, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, 64)
self.fc4 = nn.Linear(64, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
loss_function = nn.BCEWithLogitsLoss()
Check the documentation before using it: https://pytorch.org/docs/stable/nn.html#bcewithlogitsloss
Good luck!
First, It is not ideal to use Neural networks for address this kind of problems. Neural Networks train on highly non-linear data. For this example, You can use average intensities of image to find out a white pixel is present or not
However, A classic logistic regression problem outputs a value from 0 to 1 or probabilities
Softmax function is used when you have multiple classes and convert all the sum of classes equal to 1
log_softmax implementation: log( exp(x_i) / exp(x).sum() ). Here, your output layer consists of only 1 neuron. outputs = net(batch_X) is always 1.

Regression loss functions incorrect

I'm trying a basic averaging example, but the validation and loss don't match and the network fails to converge if I increase the training time. I'm training a network with 2 hidden layers, each 500 units wide on three integers from the range [0,9] with a learning rate of 1e-1, Adam, batch size of 1, and dropout for 3000 iterations and validate every 100 iterations. If the absolute difference between the label and the hypothesis is less than a threshold, here I set the threshold to 1, I consider that correct. Could someone let me know if this is an issue with the choice of loss function, something wrong with Pytorch, or something I'm doing. Below are some plots:
val_diff = 1
acc_diff = torch.FloatTensor([val_diff]).expand(self.batch_size)
Loop 100 times to during validation:
num_correct += torch.sum(torch.abs(val_h - val_y) < acc_diff)
Append after each validation phase:
validate.append(num_correct / total_val)
Here are some examples of the (hypothesis, and labels):
[...(-0.7043088674545288, 6.0), (-0.15691305696964264, 2.6666667461395264),
(0.2827358841896057, 3.3333332538604736)]
I tried six of the loss functions in the API that are typically used for regression:
torch.nn.L1Loss(size_average=False)
torch.nn.L1Loss()
torch.nn.MSELoss(size_average=False)
torch.nn.MSELoss()
torch.nn.SmoothL1Loss(size_average=False)
torch.nn.SmoothL1Loss()
Thanks.
Network code:
class Feedforward(nn.Module):
def __init__(self, topology):
super(Feedforward, self).__init__()
self.input_dim = topology['features']
self.num_hidden = topology['hidden_layers']
self.hidden_dim = topology['hidden_dim']
self.output_dim = topology['output_dim']
self.input_layer = nn.Linear(self.input_dim, self.hidden_dim)
self.hidden_layer = nn.Linear(self.hidden_dim, self.hidden_dim)
self.output_layer = nn.Linear(self.hidden_dim, self.output_dim)
self.dropout_layer = nn.Dropout(p=0.2)
def forward(self, x):
batch_size = x.size()[0]
feat_size = x.size()[1]
input_size = batch_size * feat_size
self.input_layer = nn.Linear(input_size, self.hidden_dim).cuda()
hidden = self.input_layer(x.view(1, input_size)).clamp(min=0)
for _ in range(self.num_hidden):
hidden = self.dropout_layer(F.relu(self.hidden_layer(hidden)))
output_size = batch_size * self.output_dim
self.output_layer = nn.Linear(self.hidden_dim, output_size).cuda()
return self.output_layer(hidden).view(output_size)
Training code:
def train(self):
if self.cuda:
self.network.cuda()
dh = DataHandler(self.data)
# loss_fn = nn.L1Loss(size_average=False)
# loss_fn = nn.L1Loss()
# loss_fn = nn.SmoothL1Loss(size_average=False)
# loss_fn = nn.SmoothL1Loss()
# loss_fn = nn.MSELoss(size_average=False)
loss_fn = torch.nn.MSELoss()
losses = []
validate = []
hypos = []
labels = []
val_size = 100
val_diff = 1
total_val = float(val_size * self.batch_size)
for i in range(self.iterations):
x, y = dh.get_batch(self.batch_size)
x = self.tensor_to_Variable(x)
y = self.tensor_to_Variable(y)
self.optimizer.zero_grad()
loss = loss_fn(self.network(x), y)
loss.backward()
self.optimizer.step()
It looks like you've misunderstood how layers in pytorch works, here are a few tips:
In your forward when you do nn.Linear(...) you are definining new layers instead of using those you pre-defined in your network __init__. Therefore, it cannot learn anything as weights are constantly reinitalized.
You shouldn't need to call .cuda() inside net.forward(...) since you've already copied the network on gpu in your train by calling self.network.cuda()
Ideally the net.forward(...) input should directly have the shape of the first layer so you won't have to modify it. Here you should have x.size() <=> Linear -- > (Batch_size, Features).
Your forward should look close to this:
def forward(self, x):
x = F.relu(self.input_layer(x))
x = F.dropout(F.relu(self.hidden_layer(x)),training=self.training)
x = self.output_layer(x)
return x

Categories

Resources