My Loss Function doesn't get smaller values during training - python

I am trying to predict the center of my palm
The structure of my neural network consists of 2 cnn which both are followed by max-pooling and a linear layer that has 2 outputs, one for x and the other one for y. The input is a 720x720 image.
class MyNeuralNetwork(torch.nn.Module):
def __init__(self):
super(MyNeuralNetwork, self).__init__()
self.conv1 = torch.nn.Conv2d(4, 5, 5)
self.conv2 = torch.nn.Conv2d(5, 5, 5)
self.pool = torch.nn.MaxPool2d(3, 3)
self.linear = torch.nn.Linear(5 * 78 * 78, 2)
def forward(self, x):
x = self.conv1(x)
x = self.pool(x)
x = self.conv2(x)
x = self.pool(x)
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
I have the pathnames of the images saved in a csv file. the x and y coordinates are saved in a different csv file. Here is the code for my Dataset.
class MyHand(Dataset):
"""Creating the proper dataset to feed my neural network"""
def __init__(self, name_path, root_dir, results_path, transform=None):
self.names = pd.read_csv(name_path)
self.rootdir = root_dir
self.transform = transform
self.results = pd.read_csv(results_path)
def __len__(self):
length = len(self.names.columns)
return length
def __getitem__(self, index):
img_path = os.path.join(self.rootdir, self.names.columns[index])
image = pl.imread(img_path)
x_top_left_corner = torch.tensor(self.results.iloc[index, 0])
y_top_left_corner = torch.tensor(self.results.iloc[index, 1])
width = torch.tensor(self.results.iloc[index, 2])
height = torch.tensor(self.results.iloc[index, 3])
# calculating the x and y center of my palm
x_center = x_top_left_corner + width/2
y_center = y_top_left_corner - height/2
if self.transform:
image = self.transform(image)
return image, x_center, y_center
and the code for training the network is
dataset = MyHand(name_path='path to the names of the images csv',
results_path='path to the results cvs',
transform=torchvision.transforms.ToTensor( ))
loader = DataLoader(dataset=dataset, batch_size=4)
model = MyNeuralNetwork()
criterion = torch.nn.MSELoss()
EPOCHS = 5
LEARNING_RATE = 0.001
optimizer = optim.SGD(model.parameters(), LEARNING_RATE)
for epoch in range(EPOCHS):
print("epoch:", epoch)
for data in dataset:
pic, x, y = data
model.zero_grad()
outpout = model(pic[None, :, :, :])
loss1 = criterion(outpout[0, 0], x)
loss2 = criterion(outpout[0, 1], y)
loss = loss1 + loss2
loss.backward()
print(loss)
but as you can see below my loss function has exactly the same results at each epoch and it doesn't decrease at all. What can i do for that? I tried different values of learning rate but still the same.

Your loss values are extremly high as you see. I would propose that you normalize your outputs by using the sigmoid activation function. Now the coordinates are in the range 0-1 and can be later translated to the image by multiplying them with 720. To calculate the loss, you have to divide your target cooridnates by 720. Then you should get a nice and stable loss in the range 0-1.
Also:
either decay your learning rate or try a smaller one
scale your image down (I don't know how the images look like but 720x720 is quite big)
use three convolutions with smaller kernels
add a second linear layer

Related

Error when checking input: expected input_12 to have 4 dimensions, but got array with shape (None, None, None, None, None)

I am trying to classify 24 RGB images belonging to 2 classes. Each image was originally of dimension 400 X 400, but has been resized to 32 X 32 in the code. Iam using the metric-learning for image similarity search algorithm. However, I obtain the error " Error when checking input.....", when I run the line history = model.fit(AnchorPositivePairs(num_batchs=2), epochs=20) at the end of the program. What could be causing this error?
Here is my code!
import random
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from collections import defaultdict
from PIL import Image
from sklearn.metrics import ConfusionMatrixDisplay
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import utils
import glob
import os
import tqdm
IMG_DIR = "C:/Temp2/AGAIN_4/" # load the images from one directory
IM_WIDTH = 32
IM_HEIGHT = 32
#batch_size = 2
num_classes = 2
#epochs = 15
def read_images(directory, resize_to=(32, 32)): # extract image labels
"""
Reads images and labels from the given directory
:param directory directory from which to read the files
:param resize_to a tuple of width, height to resize the images
: returns a tuple of list of images and labels
"""
files = glob.glob(directory + "*.jpg")
images = []
labels = []
for f in tqdm.tqdm_notebook(files):
im = Image.open(f)
im = im.resize(resize_to)
im = np.array(im) / 255.0
im = im.astype("float32")
images.append(im)
label = 1 if 'microwave' in f.lower() else 0
labels.append(label)
return np.array(images), np.array(labels)
x, y = read_images(directory=IMG_DIR, resize_to=(IM_WIDTH, IM_HEIGHT))
# make sure we have 25000 images if we are reading the full data set.
# Change the number accordingly if you have created a subset
assert len(x) == len(y) == 24 #25000
from sklearn.model_selection import train_test_split # extract train and test data
x_train, x_test, y_train, y_test =train_test_split(x, y, test_size=0.25)
# remove X and y since we don't need them anymore
# otherwise it will just use the memory
del x
del y
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# Display some of the images
height_width = 32
def show_collage(examples):
box_size = height_width + 2
num_rows, num_cols = examples.shape[:2]
collage = Image.new(
mode="RGB",
size=(num_cols * box_size, num_rows * box_size),
color=(250, 250, 250),
)
for row_idx in range(num_rows):
for col_idx in range(num_cols):
array = (np.array(examples[row_idx, col_idx]) * 255).astype(np.uint8)
collage.paste(
Image.fromarray(array), (col_idx * box_size, row_idx * box_size)
)
# Double size for visualisation.
collage = collage.resize((2 * num_cols * box_size, 2 * num_rows * box_size))
return collage
# Show a collage of 3x3 random images.
sample_idxs = np.random.randint(0, 15, size=(3, 3))
examples = x_train[sample_idxs]
show_collage(examples) # Displays 9 images
class_idx_to_train_idxs = defaultdict(list)
for y_train_idx, y in enumerate(y_train):
class_idx_to_train_idxs[y].append(y_train_idx)
class_idx_to_test_idxs = defaultdict(list)
for y_test_idx, y in enumerate(y_test):
class_idx_to_test_idxs[y].append(y_test_idx)
num_classes = 2
class AnchorPositivePairs(keras.utils.Sequence):
def __init__(self, num_batchs):
self.num_batchs = num_batchs
def __len__(self):
return self.num_batchs
def __getitem__(self, _idx):
x = np.empty((2, num_classes, height_width, height_width, 3), dtype=np.float32)
for class_idx in range(num_classes):
examples_for_class = class_idx_to_train_idxs[class_idx]
anchor_idx = random.choice(examples_for_class)
positive_idx = random.choice(examples_for_class)
while positive_idx == anchor_idx:
positive_idx = random.choice(examples_for_class)
x[0, class_idx] = x_train[anchor_idx]
x[1, class_idx] = x_train[positive_idx]
return x
examples = next(iter(AnchorPositivePairs(num_batchs=1)))
show_collage(examples)
class EmbeddingModel(keras.Model):
def train_step(self, data):
# Note: Workaround for open issue, to be removed.
if isinstance(data, tuple):
data = data[0]
anchors, positives = data[0], data[1]
with tf.GradientTape() as tape:
# Run both anchors and positives through model.
anchor_embeddings = self(anchors, training=True)
positive_embeddings = self(positives, training=True)
# Calculate cosine similarity between anchors and positives. As they have
# been normalised this is just the pair wise dot products.
similarities = tf.einsum(
"ae,pe->ap", anchor_embeddings, positive_embeddings
)
# Since we intend to use these as logits we scale them by a temperature.
# This value would normally be chosen as a hyper parameter.
temperature = 0.2
similarities /= temperature
# We use these similarities as logits for a softmax. The labels for
# this call are just the sequence [0, 1, 2, ..., num_classes] since we
# want the main diagonal values, which correspond to the anchor/positive
# pairs, to be high. This loss will move embeddings for the
# anchor/positive pairs together and move all other pairs apart.
sparse_labels = tf.range(num_classes)
loss = self.compiled_loss(sparse_labels, similarities)
# Calculate gradients and apply via optimizer.
gradients = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
# Update and return metrics (specifically the one for the loss value).
self.compiled_metrics.update_state(sparse_labels, similarities)
return {m.name: m.result() for m in self.metrics}
inputs = layers.Input(shape=(height_width, height_width, 3))
#inputs = layers.Input(shape=(32, 32, 3))
x = layers.Conv2D(filters=32, kernel_size=3, strides=2, activation="relu")(inputs)
x = layers.Conv2D(filters=64, kernel_size=3, strides=2, activation="relu")(x)
x = layers.Conv2D(filters=128, kernel_size=3, strides=2, activation="relu")(x)
x = layers.GlobalAveragePooling2D()(x)
embeddings = layers.Dense(units=8, activation=None)(x)
embeddings = tf.nn.l2_normalize(embeddings, axis=-1)
model = EmbeddingModel(inputs, embeddings)
model.summary()
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
)
history = model.fit(AnchorPositivePairs(num_batchs=2), epochs=20)
plt.plot(history.history["loss"])
plt.show()
I have used the cifar10 dataset as input instead of my local directory images as shown in the next code, but I still get the same error.
import random
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from collections import defaultdict
from PIL import Image
from sklearn.metrics import ConfusionMatrixDisplay
from tensorflow import keras
from tensorflow.keras import layers
"""
## Dataset
For this example we will be using the
[CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset.
"""
from tensorflow.keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype("float32") / 255.0
y_train = np.squeeze(y_train)
x_test = x_test.astype("float32") / 255.0
y_test = np.squeeze(y_test)
"""
To get a sense of the dataset we can visualise a grid of 25 random examples.
"""
height_width = 32
def show_collage(examples):
box_size = height_width + 2
num_rows, num_cols = examples.shape[:2]
collage = Image.new(
mode="RGB",
size=(num_cols * box_size, num_rows * box_size),
color=(250, 250, 250),
)
for row_idx in range(num_rows):
for col_idx in range(num_cols):
array = (np.array(examples[row_idx, col_idx]) * 255).astype(np.uint8)
collage.paste(
Image.fromarray(array), (col_idx * box_size, row_idx * box_size)
)
# Double size for visualisation.
collage = collage.resize((2 * num_cols * box_size, 2 * num_rows * box_size))
return collage
# Show a collage of 5x5 random images.
sample_idxs = np.random.randint(0, 50000, size=(5, 5))
examples = x_train[sample_idxs]
show_collage(examples)
"""
Metric learning provides training data not as explicit `(X, y)` pairs but instead uses
multiple instances that are related in the way we want to express similarity. In our
example we will use instances of the same class to represent similarity; a single
training instance will not be one image, but a pair of images of the same class. When
referring to the images in this pair we'll use the common metric learning names of the
`anchor` (a randomly chosen image) and the `positive` (another randomly chosen image of
the same class).
To facilitate this we need to build a form of lookup that maps from classes to the
instances of that class. When generating data for training we will sample from this
lookup.
"""
class_idx_to_train_idxs = defaultdict(list)
for y_train_idx, y in enumerate(y_train):
class_idx_to_train_idxs[y].append(y_train_idx)
class_idx_to_test_idxs = defaultdict(list)
for y_test_idx, y in enumerate(y_test):
class_idx_to_test_idxs[y].append(y_test_idx)
"""
For this example we are using the simplest approach to training; a batch will consist of
`(anchor, positive)` pairs spread across the classes. The goal of learning will be to
move the anchor and positive pairs closer together and further away from other instances
in the batch. In this case the batch size will be dictated by the number of classes; for
CIFAR-10 this is 10.
"""
num_classes = 10
class AnchorPositivePairs(keras.utils.Sequence):
def __init__(self, num_batchs):
self.num_batchs = num_batchs
def __len__(self):
return self.num_batchs
def __getitem__(self, _idx):
x = np.empty((2, num_classes, height_width, height_width, 3), dtype=np.float32)
for class_idx in range(num_classes):
examples_for_class = class_idx_to_train_idxs[class_idx]
anchor_idx = random.choice(examples_for_class)
positive_idx = random.choice(examples_for_class)
while positive_idx == anchor_idx:
positive_idx = random.choice(examples_for_class)
x[0, class_idx] = x_train[anchor_idx]
x[1, class_idx] = x_train[positive_idx]
return x
"""
We can visualise a batch in another collage. The top row shows randomly chosen anchors
from the 10 classes, the bottom row shows the corresponding 10 positives.
"""
examples = next(iter(AnchorPositivePairs(num_batchs=1)))
show_collage(examples)
"""
## Embedding model
We define a custom model with a `train_step` that first embeds both anchors and positives
and then uses their pairwise dot products as logits for a softmax.
"""
class EmbeddingModel(keras.Model):
def train_step(self, data):
# Note: Workaround for open issue, to be removed.
if isinstance(data, tuple):
data = data[0]
anchors, positives = data[0], data[1]
with tf.GradientTape() as tape:
# Run both anchors and positives through model.
anchor_embeddings = self(anchors, training=True)
positive_embeddings = self(positives, training=True)
# Calculate cosine similarity between anchors and positives. As they have
# been normalised this is just the pair wise dot products.
similarities = tf.einsum(
"ae,pe->ap", anchor_embeddings, positive_embeddings
)
# Since we intend to use these as logits we scale them by a temperature.
# This value would normally be chosen as a hyper parameter.
temperature = 0.2
similarities /= temperature
# We use these similarities as logits for a softmax. The labels for
# this call are just the sequence [0, 1, 2, ..., num_classes] since we
# want the main diagonal values, which correspond to the anchor/positive
# pairs, to be high. This loss will move embeddings for the
# anchor/positive pairs together and move all other pairs apart.
sparse_labels = tf.range(num_classes)
loss = self.compiled_loss(sparse_labels, similarities)
# Calculate gradients and apply via optimizer.
gradients = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
# Update and return metrics (specifically the one for the loss value).
self.compiled_metrics.update_state(sparse_labels, similarities)
return {m.name: m.result() for m in self.metrics}
"""
Next we describe the architecture that maps from an image to an embedding. This model
simply consists of a sequence of 2d convolutions followed by global pooling with a final
linear projection to an embedding space. As is common in metric learning we normalise the
embeddings so that we can use simple dot products to measure similarity. For simplicity
this model is intentionally small.
"""
inputs = layers.Input(shape=(height_width, height_width, 3))
x = layers.Conv2D(filters=32, kernel_size=3, strides=2, activation="relu")(inputs)
x = layers.Conv2D(filters=64, kernel_size=3, strides=2, activation="relu")(x)
x = layers.Conv2D(filters=128, kernel_size=3, strides=2, activation="relu")(x)
x = layers.GlobalAveragePooling2D()(x)
embeddings = layers.Dense(units=8, activation=None)(x)
embeddings = tf.nn.l2_normalize(embeddings, axis=-1)
model = EmbeddingModel(inputs, embeddings)
"""
Finally we run the training. On a Google Colab GPU instance this takes about a minute.
"""
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
)
history = model.fit(AnchorPositivePairs(num_batchs=1000), epochs=20)
plt.plot(history.history["loss"])
plt.show()
"""
## Testing
We can review the quality of this model by applying it to the test set and considering
near neighbours in the embedding space.
First we embed the test set and calculate all near neighbours. Recall that since the
embeddings are unit length we can calculate cosine similarity via dot products.
"""
near_neighbours_per_example = 10
embeddings = model.predict(x_test)
gram_matrix = np.einsum("ae,be->ab", embeddings, embeddings)
near_neighbours = np.argsort(gram_matrix.T)[:, -(near_neighbours_per_example + 1) :]
"""
As a visual check of these embeddings we can build a collage of the near neighbours for 5
random examples. The first column of the image below is a randomly selected image, the
following 10 columns show the nearest neighbours in order of similarity.
"""
num_collage_examples = 5
examples = np.empty(
(
num_collage_examples,
near_neighbours_per_example + 1,
height_width,
height_width,
3,
),
dtype=np.float32,
)
for row_idx in range(num_collage_examples):
examples[row_idx, 0] = x_test[row_idx]
anchor_near_neighbours = reversed(near_neighbours[row_idx][:-1])
for col_idx, nn_idx in enumerate(anchor_near_neighbours):
examples[row_idx, col_idx + 1] = x_test[nn_idx]
show_collage(examples)
"""
We can also get a quantified view of the performance by considering the correctness of
near neighbours in terms of a confusion matrix.
Let us sample 10 examples from each of the 10 classes and consider their near neighbours
as a form of prediction; that is, does the example and its near neighbours share the same
class?
We observe that each animal class does generally well, and is confused the most with the
other animal classes. The vehicle classes follow the same pattern.
"""
confusion_matrix = np.zeros((num_classes, num_classes))
# For each class.
for class_idx in range(num_classes):
# Consider 10 examples.
example_idxs = class_idx_to_test_idxs[class_idx][:10]
for y_test_idx in example_idxs:
# And count the classes of its near neighbours.
for nn_idx in near_neighbours[y_test_idx][:-1]:
nn_class_idx = y_test[nn_idx]
confusion_matrix[class_idx, nn_class_idx] += 1
# Display a confusion matrix.
labels = [
"Airplane",
"Automobile",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
]
disp = ConfusionMatrixDisplay(confusion_matrix=confusion_matrix, display_labels=labels)
disp.plot(include_values=True, cmap="viridis", ax=None, xticks_rotation="vertical")
plt.show()
ValueError: Error when checking input: expected input_16 to have 4 dimensions, but got array with shape (None, None, None, None, None)
I m not sure but as i understand it your own data generator cause this error. You should also pass to try your data size in generator, try this:
def __len__(self):
if self.batch_size > self.X.shape[0]:
print("Batch size is greater than data size!!")
return -1
return int(np.floor(self.X.shape[0] / self.batch_size))

Weights not updating on my neural net (Pytorch)

I'm completely new to neural nets, so I tried to roughly follow some tutorials to create a neural net that can just distinguish if a given binary picture contains a white circle or if it is all black. So, I generated 1000 arrays of size 10000 representing a 100x100 picture with half of them containing a white circle somewhere. The generation of my dataset looks like this:
for i in range(1000):
image = [0] * (IMAGE_SIZE * IMAGE_SIZE)
if random() < 0.5:
dataset.append([image, [[0]]])
else:
#inserts circle in image
#...
dataset.append([image, [[1]]])
np.random.shuffle(dataset)
np.save("testdataset.npy", dataset)
The double list around the classifications is because the net seemed to give that format as an output, so I matched that.
Now since I don't really have any precise idea of how pytorch works, I don't really now which parts of the code are relevant for solving my problem and which aren't. Therefore, I gave the code for the net and the training down below and really hope that someone can explain to me where I went wrong. I'm sorry if it's too much code. The code runs without errors, but if I print the parameters before and after training they didn't change in any way and the net will always just return a 0 for every image/array.
IMAGE_SIZE = 100
EPOCHS = 3
BATCH_SIZE = 50
VAL_PCT = 0.1
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(IMAGE_SIZE * IMAGE_SIZE, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, 64)
self.fc4 = nn.Linear(64, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return F.log_softmax(x, dim = 1)
net = Net()
optimizer = optim.Adam(net.parameters(), lr = 0.01)
loss_function = nn.MSELoss()
dataset = np.load("testdataset.npy", allow_pickle = True)
X = torch.Tensor([i[0] for i in dataset]).view(-1, 10000)
y = torch.Tensor([i[1] for i in dataset])
val_size = int(len(X) * VAL_PCT)
train_X = X[:-val_size]
train_y = y[:-val_size]
test_X = X[-val_size:]
test_y = y[-val_size:]
for epoch in range(EPOCHS):
for i in range(0, len(train_X), BATCH_SIZE):
batch_X = train_X[i:i + BATCH_SIZE].view(-1, 1, 10000)
batch_y = train_y[i:i + BATCH_SIZE]
net.zero_grad()
outputs = net(batch_X)
loss = loss_function(outputs, batch_y)
loss.backward()
optimizer.step()
Instead of net.zero_grad() I would recommend using optimizer.zero_grad() as it's more common and de facto standard. Your training loop should be:
for epoch in range(EPOCHS):
for i in range(0, len(train_X), BATCH_SIZE):
batch_X = train_X[i:i + BATCH_SIZE].view(-1, 1, 10000)
batch_y = train_y[i:i + BATCH_SIZE]
optimizer.zero_grad()
outputs = net(batch_X)
loss = loss_function(outputs, batch_y)
loss.backward()
optimizer.step()
I would recommend you reading a bit about different loss functions. It seems you have a classification problem, for that you should use the logits (binary classification) or cross entropy (multi class) loss. I would make the following changes to the network and loss function:
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(IMAGE_SIZE * IMAGE_SIZE, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, 64)
self.fc4 = nn.Linear(64, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
loss_function = nn.BCEWithLogitsLoss()
Check the documentation before using it: https://pytorch.org/docs/stable/nn.html#bcewithlogitsloss
Good luck!
First, It is not ideal to use Neural networks for address this kind of problems. Neural Networks train on highly non-linear data. For this example, You can use average intensities of image to find out a white pixel is present or not
However, A classic logistic regression problem outputs a value from 0 to 1 or probabilities
Softmax function is used when you have multiple classes and convert all the sum of classes equal to 1
log_softmax implementation: log( exp(x_i) / exp(x).sum() ). Here, your output layer consists of only 1 neuron. outputs = net(batch_X) is always 1.

Predicting point sequence in image

My training set is a set of images (either 3 channel or 1 ofc i use only one type of channel). And the labels are a sequence of points in a specific order that i want to predict from the images.
I am using a model inspired by the image captioning example on the tensorflow website. This is the also the approach that this paper takes https://arxiv.org/pdf/1901.03781.pdf
class CNN_Encoder(tf.keras.Model):
# Since you have already extracted the features and dumped it using pickle
# This encoder passes those features through a Fully connected layer
def __init__(self, embedding_dim):
super(CNN_Encoder, self).__init__()
self.fc = tf.keras.layers.Dense(embedding_dim)
def call(self, x):
x = self.fc(x)
x = tf.nn.relu(x)
return x
class RNN_Decoder(tf.keras.Model):
def __init__(self, embedding_dim, units, output_dim):
super(RNN_Decoder, self).__init__()
self.units = units
self.gru = tf.keras.layers.GRU(self.units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc1 = tf.keras.layers.Dense(self.units)
self.fc2 = tf.keras.layers.Dense(output_dim)
def call(self, x, features, hidden):
x = tf.concat((features, x), axis=-1)
output, state = self.gru(x)
x = self.fc1(state)
x = self.fc2(x)
return x
def reset_state(self, batch_size):
return tf.zeros((batch_size, self.units))
#tf.function
def train_step(img_tensor, target):
loss = 0
hidden = decoder.reset_state(batch_size=target.shape[0])
dec_input = tf.expand_dims([[0., 0.]] * target.shape[0], 1)
with tf.GradientTape() as tape:
features = encoder(img_tensor)
for i in (range(1, target.shape[1])):
predictions = decoder(dec_input, features, hidden)
loss += loss_function(target[:, i], predictions)
# using teacher forcing
dec_input = tf.expand_dims(target[:, i], 1)
total_loss = (loss / int(target.shape[1]))
trainable_variables = encoder.trainable_variables + decoder.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
optimizer.apply_gradients(zip(gradients, trainable_variables))
return loss, total_loss
EPOCHS = 20
batch_size = 8
for epoch in tqdm(range(start_epoch, EPOCHS)):
start = time.time()
total_loss = 0
for (batch, (img_tensor, target)) in enumerate((data_generator(preds_t, labels_t))):
img_tensor = img_tensor.reshape((-1, 1, 128*128))
batch_loss, t_loss = train_step(img_tensor, target)
total_loss += t_loss
if batch % 100 == 0:
print ('Epoch {} Batch {} Loss {:.4f}'.format(
epoch + 1, batch, batch_loss.numpy() / int(target.shape[1])))
if batch == 10000:
break
# storing the epoch end loss value to plot later
#loss_plot.append(total_loss / num_steps)
if epoch % 5 == 0:
ckpt_manager.save()
print ('Epoch {} Loss {:.6f}'.format(epoch + 1,
total_loss/num_steps))
print ('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
For the features vector. I am extracting the last layer of a unet. So each image has a size 1x128x128. I reshape it to be 1x1x128*128. Which i then pass through a fully connected layer. The shape then becomes 1x1x256
My labels i want to predict are image coordinates so (x, y). The input to the gru layer is the
concatenated 1x1x256 , 1x1x2 (t-1 coordinates). Which i then further pass through a 2 layer fc layer with output dimension 2 for the 2 coordinates. I have removed attention for now to get a simpler model. I normalize my images. I pad the coordinate sequences with 0,0 for the start -1, -1 for the end and -2,-2 for the regular padding to get uniform sequence length of 350x2.
The network doesnt seem to learn much. I just get a few points scattered diagonally across the image. The biggest difference i see with the image captioning model is that the words can be converted to embeddings and then you have a 128 image features 128 word features being concatenated and fed into the lstm. In my case the sequence information is just 1 entry. Could that be the reason that the network is not learning much.
If someone has any insights into what i should change that would be great
Your question requires certain experience and a deep investigation. I'd only suggest general advices for under-fitting issue. Here's a list of things to try.
Personally, I'd start by trying to overfit on a single batch.

How do I load up an image and convert it to a proper tensor for PyTorch?

I'm trying to custom load some image files (JPG files) with some labels and feed them into a convolutional neural network (CNN) in PyTorch following the example here. However, there still seem to be no decent end-to-end tutorials. The problem that I am seeing is the following.
RuntimeError: thnn_conv2d_forward is not implemented for type
torch.ByteTensor
My Dataset looks like the following.
class ImageData(Dataset):
def __init__(self, width=256, height=256, transform=None):
self.width = width
self.height = height
self.transform = transform
y, x = get_images() #y is a list of labels, x is a list of file paths
self.y = y
self.x = x
def __getitem__(self, index):
img = Image.open(self.x[index]) # use pillow to open a file
img = img.resize((self.width, self.height)) # resize the file to 256x256
img = img.convert('RGB') #convert image to RGB channel
if self.transform is not None:
img = self.transform(img)
img = np.asarray(img).transpose(-1, 0, 1) # we have to change the dimensions from width x height x channel (WHC) to channel x width x height (CWH)
img = torch.from_numpy(np.asarray(img)) # create the image tensor
label = torch.from_numpy(np.asarray(self.y[index]).reshape([1, 1])) # create the label tensor
return img, label
def __len__(self):
return len(self.x)
The CNN is taken from here and is modified to handle NCWH (batch x channel x width x height) as follows.
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 256, 256)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
The learning loop is also taken from the same tutorial and looks like the following.
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(dataloader, 0):
# get the inputs
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
However, the RuntimeError mentioned above is thrown. Any ideas on what I am doing wrong?
Additionally, I know that without transposing the image data, it is in the shape of WHC, but the NN model requires it as CWH. The problem is this, if we change from WHC to CWH, then we can no longer simply plot the images if we iterate over the DataLoader.
data = ImageData()
dataloader = DataLoader(data, batch_size=10, shuffle=True, num_workers=1)
imgs, labels = next(iter(dataloader))
plt.imshow(imgs.numpy()[0,:,:,:])
plt.show()
Attempting to do will throw the following error.
TypeError: Invalid dimensions for image data
To me, that Pillow gives you WHC and you can use that to plot, but the PyTorch CNN wants CWH to process, is a nuisance. Any idea on how to consistently or easily not do so many transforms but be able to plot and feed the data into the CNN? Or is this mismatch of WHC vs CWH just something we have to live with?
Without transposing the image, when feeding it to the CNN, the following error is thrown.
RuntimeError: Given groups=1, weight[256, 3, 256, 256], so expected
input[10, 256, 256, 3] to have 3 channels, but got 256 channels
instead.
conv2d operates on float tensors. It's common and good practice to normalize input images before passing them into the neural network.
I would add the line img = img/255 immediately before you convert it to a Torch tensor in __getitem__, then it will be converted to a float tensor rather than a byte tensor and thus will be compatible with the conv2d method.
your training data “inputs” is type of ByteTensor, but torch.conv2d() only support most operations for FloatTensor and DoubleTensor.
so just need add this:
inputs = inputs.type(torch.FloatTensor)
or
inputs = inputs.type(torch.DoubleTensor)
before u put it in Net

Regression loss functions incorrect

I'm trying a basic averaging example, but the validation and loss don't match and the network fails to converge if I increase the training time. I'm training a network with 2 hidden layers, each 500 units wide on three integers from the range [0,9] with a learning rate of 1e-1, Adam, batch size of 1, and dropout for 3000 iterations and validate every 100 iterations. If the absolute difference between the label and the hypothesis is less than a threshold, here I set the threshold to 1, I consider that correct. Could someone let me know if this is an issue with the choice of loss function, something wrong with Pytorch, or something I'm doing. Below are some plots:
val_diff = 1
acc_diff = torch.FloatTensor([val_diff]).expand(self.batch_size)
Loop 100 times to during validation:
num_correct += torch.sum(torch.abs(val_h - val_y) < acc_diff)
Append after each validation phase:
validate.append(num_correct / total_val)
Here are some examples of the (hypothesis, and labels):
[...(-0.7043088674545288, 6.0), (-0.15691305696964264, 2.6666667461395264),
(0.2827358841896057, 3.3333332538604736)]
I tried six of the loss functions in the API that are typically used for regression:
torch.nn.L1Loss(size_average=False)
torch.nn.L1Loss()
torch.nn.MSELoss(size_average=False)
torch.nn.MSELoss()
torch.nn.SmoothL1Loss(size_average=False)
torch.nn.SmoothL1Loss()
Thanks.
Network code:
class Feedforward(nn.Module):
def __init__(self, topology):
super(Feedforward, self).__init__()
self.input_dim = topology['features']
self.num_hidden = topology['hidden_layers']
self.hidden_dim = topology['hidden_dim']
self.output_dim = topology['output_dim']
self.input_layer = nn.Linear(self.input_dim, self.hidden_dim)
self.hidden_layer = nn.Linear(self.hidden_dim, self.hidden_dim)
self.output_layer = nn.Linear(self.hidden_dim, self.output_dim)
self.dropout_layer = nn.Dropout(p=0.2)
def forward(self, x):
batch_size = x.size()[0]
feat_size = x.size()[1]
input_size = batch_size * feat_size
self.input_layer = nn.Linear(input_size, self.hidden_dim).cuda()
hidden = self.input_layer(x.view(1, input_size)).clamp(min=0)
for _ in range(self.num_hidden):
hidden = self.dropout_layer(F.relu(self.hidden_layer(hidden)))
output_size = batch_size * self.output_dim
self.output_layer = nn.Linear(self.hidden_dim, output_size).cuda()
return self.output_layer(hidden).view(output_size)
Training code:
def train(self):
if self.cuda:
self.network.cuda()
dh = DataHandler(self.data)
# loss_fn = nn.L1Loss(size_average=False)
# loss_fn = nn.L1Loss()
# loss_fn = nn.SmoothL1Loss(size_average=False)
# loss_fn = nn.SmoothL1Loss()
# loss_fn = nn.MSELoss(size_average=False)
loss_fn = torch.nn.MSELoss()
losses = []
validate = []
hypos = []
labels = []
val_size = 100
val_diff = 1
total_val = float(val_size * self.batch_size)
for i in range(self.iterations):
x, y = dh.get_batch(self.batch_size)
x = self.tensor_to_Variable(x)
y = self.tensor_to_Variable(y)
self.optimizer.zero_grad()
loss = loss_fn(self.network(x), y)
loss.backward()
self.optimizer.step()
It looks like you've misunderstood how layers in pytorch works, here are a few tips:
In your forward when you do nn.Linear(...) you are definining new layers instead of using those you pre-defined in your network __init__. Therefore, it cannot learn anything as weights are constantly reinitalized.
You shouldn't need to call .cuda() inside net.forward(...) since you've already copied the network on gpu in your train by calling self.network.cuda()
Ideally the net.forward(...) input should directly have the shape of the first layer so you won't have to modify it. Here you should have x.size() <=> Linear -- > (Batch_size, Features).
Your forward should look close to this:
def forward(self, x):
x = F.relu(self.input_layer(x))
x = F.dropout(F.relu(self.hidden_layer(x)),training=self.training)
x = self.output_layer(x)
return x

Categories

Resources