Firstly, I have implemented a simple VGG16 network for image classification.
model = keras.applications.vgg16.VGG16(include_top = False,
weights = None,
input_shape = (32,32,3),
pooling = 'max',
classes = 10)
Whose input shape is 32 x 32. Now, I am trying to implement a patch-based neural network. The main idea is, from the input image, extract 4 image patch like this image,
and train the extracted patch image(resizing to 32 x 32 as it is input shape of our model) finally, combine their four output probability and find the final output result (Using normalizing & argmax). Like this,
How can I do that?
Thanks in advance for your help.
Note:
I am guessing using lambda layer it can be possible.
My simple VGG classification implementation is here in Colab.
I used the MNIST dataset to get every image as 4 patches with tf.image.extract_patches, which are subsequently passed as a batch:
import tensorflow as tf
from tensorflow import keras as K
from tensorflow.keras.layers import Conv2D, Flatten, Dense, MaxPooling2D, Dropout
from tensorflow import nn as nn
from functools import partial
import matplotlib.pyplot as plt
(xtrain, ytrain), (xtest, ytest) = tf.keras.datasets.mnist.load_data()
train = tf.data.Dataset.from_tensor_slices((xtrain, ytrain))
test = tf.data.Dataset.from_tensor_slices((xtest, ytest))
patch_s = 18
stride = xtrain.shape[1] - patch_s
get_patches = lambda x, y: (tf.reshape(
tf.image.extract_patches(
images=tf.expand_dims(x[..., None], 0),
sizes=[1, patch_s, patch_s, 1],
strides=[1, stride, stride, 1],
rates=[1, 1, 1, 1],
padding='VALID'), (4, patch_s, patch_s, 1)), y)
train = train.map(get_patches)
test = test.map(get_patches)
fig = plt.figure()
plt.subplots_adjust(wspace=.1, hspace=.2)
images, labels = next(iter(train))
for index, image in enumerate(images):
ax = plt.subplot(2, 2, index + 1)
ax.set_xticks([])
ax.set_yticks([])
ax.imshow(image)
plt.show()
Then, in the training loop, I'm getting the loss for every one of these 4 outputs:
def compute_loss(model, x, y, training):
out = model(x=x, training=training)
repeated_y = tf.repeat(tf.expand_dims(y, 0), repeats=4, axis=0)
loss = loss_object(y_true=repeated_y, y_pred=out, from_logits=True)
loss = tf.reduce_mean(loss, axis=0)
return loss
Then I'm reducing the mean of axis 0 to merge all probabilities together. Here's the full running code:
import tensorflow as tf
from tensorflow import keras as K
from tensorflow.keras.layers import Conv2D, Flatten, Dense, MaxPooling2D, Dropout
from tensorflow import nn as nn
from functools import partial
import matplotlib.pyplot as plt
(xtrain, ytrain), (xtest, ytest) = tf.keras.datasets.mnist.load_data()
train = tf.data.Dataset.from_tensor_slices((xtrain, ytrain))
test = tf.data.Dataset.from_tensor_slices((xtest, ytest))
patch_s = 18
stride = xtrain.shape[1] - patch_s
get_patches = lambda x, y: (tf.reshape(
tf.image.extract_patches(
images=tf.expand_dims(x[..., None], 0),
sizes=[1, patch_s, patch_s, 1],
strides=[1, stride, stride, 1],
rates=[1, 1, 1, 1],
padding='VALID'), (4, patch_s, patch_s, 1)), y)
train = train.map(get_patches)
test = test.map(get_patches)
fig = plt.figure()
plt.subplots_adjust(wspace=.1, hspace=.2)
images, labels = next(iter(train))
for index, image in enumerate(images):
ax = plt.subplot(2, 2, index + 1)
ax.set_xticks([])
ax.set_yticks([])
ax.imshow(image)
plt.show()
def prepare(inputs, targets):
inputs = tf.divide(x=inputs, y=255)
targets = tf.one_hot(indices=targets, depth=10)
return inputs, targets
train = train.take(10_000).map(prepare)
test = test.take(10_00).map(prepare)
class MyCNN(K.Model):
def __init__(self):
super(MyCNN, self).__init__()
Conv = partial(Conv2D, kernel_size=(3, 3), activation=nn.relu)
MaxPool = partial(MaxPooling2D, pool_size=(2, 2))
self.conv1 = Conv(filters=16)
self.maxp1 = MaxPool()
self.conv2 = Conv(filters=32)
self.maxp2 = MaxPool()
self.conv3 = Conv(filters=64)
self.maxp3 = MaxPool()
self.flatt = Flatten()
self.dens1 = Dense(64, activation=nn.relu)
self.drop1 = Dropout(.5)
self.dens2 = Dense(10, activation=nn.softmax)
def call(self, inputs, training=None, **kwargs):
x = self.conv1(inputs)
x = self.maxp1(x)
x = self.conv2(x)
x = self.maxp2(x)
x = self.conv3(x)
x = self.maxp3(x)
x = self.flatt(x)
x = self.dens1(x)
x = self.drop1(x)
x = self.dens2(x)
return x
model = MyCNN()
loss_object = tf.losses.categorical_crossentropy
def compute_loss(model, x, y, training):
out = model(inputs=x, training=training)
repeated_y = tf.repeat(tf.expand_dims(y, 0), repeats=4, axis=0)
loss = loss_object(y_true=repeated_y, y_pred=out, from_logits=True)
loss = tf.reduce_mean(loss, axis=0)
return loss
def get_grad(model, x, y):
with tf.GradientTape() as tape:
loss = compute_loss(model, x, y, training=False)
return loss, tape.gradient(loss, model.trainable_variables)
optimizer = tf.optimizers.Adam()
verbose = "Epoch {:2d}" \
" Loss: {:.3f} Acc: {:.3%} TLoss: {:.3f} TAcc: {:.3%}"
for epoch in range(1, 10 + 1):
train_loss = tf.metrics.Mean()
train_acc = tf.metrics.CategoricalAccuracy()
test_loss = tf.metrics.Mean()
test_acc = tf.metrics.CategoricalAccuracy()
for x, y in train:
loss_value, grads = get_grad(model, x, y)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
train_loss.update_state(loss_value)
train_acc.update_state(y, model(x, training=True))
for x, y in test:
loss_value, _ = get_grad(model, x, y)
test_loss.update_state(loss_value)
test_acc.update_state(y, model(x, training=False))
print(verbose.format(epoch,
train_loss.result(),
train_acc.result(),
test_loss.result(),
test_acc.result()))
Spoiler alert: with such small patches, it doesn't do well. Make patches bigger than 18/28 for better performance.
Related
please I'm trying to build an NLP classifier on top of BERT but I'm struggling with data imbalance. I'm looking for an implementation of weighted CategoricalCrossEntropy. I've already seen a solution using class_weight parameter on fit function but it doesn't "fit" well with my data (I've one hot encoded them and it actually throws an error cause dict element are not matching.
Can someone please give me an implementation from scratch of a WeightedCategoricalCrossEntropy function allowing me me to add weights manually to Tensorflow's native CategoricalCrossEntropy.
The __call__ method of tf.losses.CategoricalCrossentropy accepts three arguments:
y_pred
y_true
sample_weights
And the sample_weight acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If sample_weight is a tensor of size [batch_size], then the total loss for each sample of the batch is rescaled by the corresponding element in the sample_weight vector. You can use it as such:
def compute_loss(model, x, y, training):
out = model(inputs=x, training=training)
sample_weight = tf.random.uniform((tf.shape(x)[0], 1),
minval=0,
maxval=1,
dtype=tf.float32)
loss = loss_object(y_true=y, y_pred=out,
sample_weight=sample_weight)
return loss
These are random values but you can change the values depending on y so it becomes a class weight rather than a sample weight. Here's a full example of a running training loop with custom sample weights:
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from tensorflow import keras as K
from tensorflow.keras.layers import Conv2D, Flatten, Dense, MaxPooling2D, Dropout
from tensorflow import nn as nn
from functools import partial
(xtrain, ytrain), (xtest, ytest) = tf.keras.datasets.mnist.load_data()
train = tf.data.Dataset.from_tensor_slices((xtrain, ytrain))
test = tf.data.Dataset.from_tensor_slices((xtest, ytest))
def prepare(inputs, outputs):
inputs = tf.divide(x=inputs, y=255)
inputs = tf.expand_dims(inputs, -1)
targets = tf.one_hot(indices=outputs, depth=10)
return inputs, targets
train = train.take(5_000).batch(4).map(prepare)
test = test.take(1_000).batch(4).map(prepare)
class MyCNN(K.Model):
def __init__(self):
super(MyCNN, self).__init__()
Conv = partial(Conv2D, kernel_size=(3, 3), activation=nn.relu)
MaxPool = partial(MaxPooling2D, pool_size=(2, 2))
self.conv1 = Conv(filters=8)
self.maxp1 = MaxPool()
self.conv2 = Conv(filters=16)
self.maxp2 = MaxPool()
self.conv3 = Conv(filters=32)
self.maxp3 = MaxPool()
self.flatt = Flatten()
self.dens1 = Dense(64, activation=nn.relu)
self.drop1 = Dropout(.5)
self.dens2 = Dense(10, activation=nn.softmax)
def call(self, x, training=None, **kwargs):
x = self.conv1(x)
x = self.maxp1(x)
x = self.conv2(x)
x = self.maxp2(x)
x = self.conv3(x)
x = self.maxp3(x)
x = self.flatt(x)
x = self.dens1(x)
x = self.drop1(x)
x = self.dens2(x)
return x
model = MyCNN()
loss_object = tf.losses.CategoricalCrossentropy()
def compute_loss(model, x, y, training):
out = model(inputs=x, training=training)
sample_weight = tf.random.uniform((tf.shape(x)[0], 1),
minval=0,
maxval=1,
dtype=tf.float32)
loss = loss_object(y_true=y, y_pred=out, sample_weight=sample_weight)
return loss
def get_grad(model, x, y):
with tf.GradientTape() as tape:
loss = compute_loss(model, x, y, training=False)
return loss, tape.gradient(loss, model.trainable_variables)
optimizer = tf.optimizers.Adam()
verbose = "Epoch {:2d}" \
" Loss: {:.3f} TLoss: {:.3f} Acc: {:.3%} TAcc: {:.3%}"
for epoch in range(1, 10 + 1):
train_loss = tf.metrics.Mean()
train_acc = tf.metrics.CategoricalAccuracy()
test_loss = tf.metrics.Mean()
test_acc = tf.metrics.CategoricalAccuracy()
for x, y in train:
loss_value, grads = get_grad(model, x, y)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
train_loss.update_state(loss_value)
train_acc.update_state(y, model(x, training=True))
for x, y in test:
loss_value, _ = get_grad(model, x, y)
test_loss.update_state(loss_value)
test_acc.update_state(y, model(x, training=False))
print(verbose.format(epoch,
train_loss.result(),
train_acc.result(),
test_loss.result(),
test_acc.result()))
Just to complement the answer , to transform from sample weight to class weight you can do something like this :
First one example without weight:
y_true = [[0, 1, 0], [0, 0, 1]]
y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
# Using 'auto'/'sum_over_batch_size' reduction type.
cce = tf.keras.losses.CategoricalCrossentropy()
cce(y_true, y_pred).numpy()
Now with weight implementation:
y_true = [[0, 1, 0], [0, 0, 1]]
y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
select_one_sum_indexes = tf.where(tf.equal(y_true,1))
Class_index= tf.gather(select_one_sum_indexes, 1, axis=1)
Class_index=tf.cast(Class_index, tf.int32)
Define your weight Here:
weight=tf.constant([1.2, 1,10.])#,dtype=tf.float32)
Dic for class and weight
table = tf.lookup.StaticHashTable(
initializer=tf.lookup.KeyValueTensorInitializer(
keys=tf.constant([0, 1, 2]),
values=weight
),default_value=1.)
weight_sample_class = table.lookup(Class_index)
Loss with weight_class
cce = tf.keras.losses.CategoricalCrossentropy()
cce(y_true, y_pred,sample_weight=weight_sample_class).numpy()
I am new to Generative-Adversarial Networks (GAN) and Neural Networks in general.
Using Python and Keras, I want to apply GANs for Time-Series Prediction. My final goal also includes to detect anomalies in the time series.
I'm using the popular Air-Passangers time series data.
Here is the code I am using for time-series prediction. However, the result I get using GANs is bit uninterpretable for me and I think it needs some improvement.
Thanks for your help.
from __future__ import print_function, division
from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
#import matplotlib.pyplot as plt
from matplotlib import pyplot as plt
import pandas as pd
import sys
import numpy as np
class GAN():
def __init__(self):
self.data_rows = 1
self.data_cols = 1
self.data_shape = (self.data_rows, self.data_cols)
self.latent_dim = 48
optimizer = Adam(0.0002, 0.5)
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
self.generator = self.build_generator()
z = Input(shape=(self.latent_dim,))
data = self.generator(z)
self.discriminator.trainable = False
validity = self.discriminator(data)
self.combined = Model(z, validity)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
def build_generator(self):
model = Sequential()
model.add(Dense(256, input_dim=self.latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(self.data_shape), activation='linear'))
model.add(Reshape(self.data_shape))
model.summary()
noise = Input(shape=(self.latent_dim,))
data = model(noise)
return Model(noise, data)
def build_discriminator(self):
model = Sequential()
model.add(Flatten(input_shape=self.data_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()
data = Input(shape=self.data_shape)
validity = model(data)
return Model(data, validity)
def train(self, epochs, batch_size=128, sample_interval=50):
df = pd.read_csv("AirPassengers.csv")
ts = df[["#Passengers"]]
X_train = ts.as_matrix()
# Rescale -1 to 1
#X_train = X_train / 127.5 - 1.
X_train = np.expand_dims(X_train, axis=3)
print("X_train")
print(X_train.shape)
#print(X_train)
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for epoch in range(epochs):
idx = np.random.randint(0, X_train.shape[0], batch_size)
data_s = X_train[idx]
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
gen_data = self.generator.predict(noise)
d_loss_real = self.discriminator.train_on_batch(data_s, valid)
d_loss_fake = self.discriminator.train_on_batch(gen_data, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# Train the generator (to have the discriminator label samples as valid)
g_loss = self.combined.train_on_batch(noise, valid)
print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
if epoch % sample_interval == 0:
self.plot_gan_result(epoch, batch_size)
c = X_train.reshape(144, 1)
fig, axs = plt.subplots()
axs.plot(c, color = "blue", label = 'true')
def plot_gan_result(self, epoch, batch_size):
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
gen_data = self.generator.predict(noise)
b = gen_data.reshape(24, 1)
fig, axs = plt.subplots()
print("noise shape")
print(noise.shape)
print(noise[0])
axs.plot(b, color = "red", label = 'generated')
if __name__ == '__main__':
gan = GAN()
gan.train(epochs=30, batch_size=24, sample_interval=200)
Task: Using the example of the "fetch_lfw_people" dataset to write and train an autocoder.
Write an iteration code by epoch. Write code to visualize the learning process and count the metrics for validation after each epoch.
Train auto encoder. Achieve low loss on validation.
My code:
from sklearn.datasets import fetch_lfw_people
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader
from sklearn.model_selection import train_test_split
Data preparation:
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
X = lfw_people['images']
X_train, X_test = train_test_split(X, test_size=0.1)
X_train = torch.tensor(X_train, dtype=torch.float32, requires_grad=True)
X_test = torch.tensor(X_test, dtype=torch.float32, requires_grad=False)
dataset_train = TensorDataset(X_train, torch.zeros(len(X_train)))
dataset_test = TensorDataset(X_test, torch.zeros(len(X_test)))
batch_size = 32
train_loader = DataLoader(dataset_train, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset_test, batch_size=batch_size, shuffle=False)
Сreate a network with encoding and decoding functions:
class Autoencoder(torch.nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
self.encoder = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, stride=2),
torch.nn.ReLU(),
torch.nn.Conv2d(in_channels=32, out_channels=64, stride=2, kernel_size=3),
torch.nn.ReLU(),
torch.nn.Conv2d(in_channels=64, out_channels=64, stride=2, kernel_size=3),
torch.nn.ReLU(),
torch.nn.Conv2d(in_channels=64, out_channels=64, stride=2, kernel_size=3)
)
self.decoder = torch.nn.Sequential(
torch.nn.ConvTranspose2d(in_channels=64, out_channels=64, kernel_size=3, stride=2),
torch.nn.ConvTranspose2d(in_channels=64, out_channels=64, kernel_size=(3,4), stride=2),
torch.nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=4, stride=2),
torch.nn.ConvTranspose2d(in_channels=32, out_channels=1, kernel_size=(4,3), stride=2)
)
def encode(self, X):
encoded_X = self.encoder(X)
batch_size = X.shape[0]
return encoded_X.reshape(batch_size, -1)
def decode(self, X):
pre_decoder = X.reshape(-1, 64, 2, 1)
return self.decoder(pre_decoder)
I check the work of the model before learning by one example:
model = Autoencoder()
sample = X_test[:1]
sample = sample[:, None]
result = model.decode(model.encode(sample)) # before train
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2)
ax1.imshow(sample[0][0].detach().numpy(), cmap=plt.cm.Greys_r)
ax2.imshow(result[0][0].detach().numpy(), cmap=plt.cm.Greys_r)
plt.show()
The result is unsatisfactory. I start training:
model = Autoencoder()
loss = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
history_train = []
history_test = []
for i in range(5):
for x, y in train_loader:
x = x[:, None]
model.train()
decoded_x = model.decode(model.encode(x))
mse_loss = loss(torch.tensor(decoded_x, dtype=torch.float), x)
optimizer.zero_grad()
mse_loss.backward()
optimizer.step()
history_train.append(mse_loss.detach().numpy())
model.eval()
with torch.no_grad():
for x, y in train_loader:
x = x[:, None]
result_x = model.decode(model.encode(x))
loss_test = loss(torch.tensor(result_x, dtype=torch.float), x)
history_test.append(loss_test.detach().numpy())
plt.subplot(1, 2, 1)
plt.plot(history_train)
plt.title("Optimization process for train data")
plt.subplot(1, 2, 2)
plt.plot(history_test)
plt.title("Loss for test data")
plt.show
A huge loss on the training data and on the test.
Аfter training nothing has changed:
with torch.no_grad():
model.eval()
res1 = model.decode(model.encode(sample))
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2)
ax1.imshow(sample[0][0].detach().numpy(), cmap=plt.cm.Greys_r)
ax2.imshow(res1[0][0].detach().numpy(), cmap=plt.cm.Greys_r)
plt.show()
Why such a big loss? Reducing the input to the interval [-1, 1] does not help. I did it like this: (value / 255) * 2 - 1
Why do not change the parameters of the model after training?
Why does not change the decoded sample?
Result: before train, after train, loss
https://i.stack.imgur.com/OhdrJ.jpg
1) replace line
mse_loss = loss(torch.tensor(decoded_x, dtype=torch.float), x)
with line
mse_loss = loss(decoded_x, x)
2) replace lines
model.eval()
with torch.no_grad():
for x, y in train_loader:
with lines
replace lines
model.eval()
with torch.no_grad():
for x, y in test_loader:
I wrote alexnet in tensorflow to perform on the mnist dataset. I get a ValueErorr saying: Negative dimension size caused by subtracting 2 from 1 for 'pool5' (op: 'MaxPool') with input shapes: [?,1,1,1024]. How to solve it? Here is my code:
from __future__ import print_function
import tensorflow as tf
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
import os
import random
import matplotlib.pyplot as plt
import numpy as np
# Parameters
learning_rate = 0.001
training_iters = 100000
batch_size = 1000
display = True
display_step_console = 5
learn_from_scratch = False
train = False
# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.80 # Dropout, probability to keep units
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
# Create AlexNet model
def conv1st(name, l_input, w, b):
cov = tf.nn.conv2d(l_input, w, strides=[1, 4, 4, 1], padding='VALID')
return tf.nn.relu(tf.nn.bias_add(cov,b), name=name)
def conv2d(name, l_input, w, b):
cov = tf.nn.conv2d(l_input, w, strides=[1, 1, 1, 1], padding='SAME')
return tf.nn.relu(tf.nn.bias_add(cov,b), name=name)
def max_pool(name, l_input, k, s):
return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, s, s, 1],
padding='VALID', name=name)
def norm(name, l_input, lsize=4):
return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name=name)
def alex_net(_X, weights, biases, _dropout):
# Reshape input picture OH WAIT NOPE CUS JE SUIS UN TENSAI DESU
_X = tf.reshape(_X, shape=[-1, 28, 28, 1])
# Convolution Layer
conv1 = conv1st('conv1', _X, _weights['wc1'], _biases['bc1'])
# Max Pooling (down-sampling)
pool1 = max_pool('pool1', conv1, k=2, s=2)
# Apply Normalization
norm1 = norm('norm1', pool1, lsize=4)
# Apply Dropout
norm1 = tf.nn.dropout(norm1, _dropout)
# Convolution Layer
conv2 = conv2d('conv2', norm1, _weights['wc2'], _biases['bc2'])
# Max Pooling (down-sampling)
pool2 = max_pool('pool2', conv2, k=2, s=2)
# Apply Normalization
norm2 = norm('norm2', pool2, lsize=4)
# Apply Dropout
norm2 = tf.nn.dropout(norm2, _dropout)
# Convolution Layer
conv3 = conv2d('conv3', norm2, _weights['wc3'], _biases['bc3'])
conv4 = conv2d('conv4', conv3, _weights['wc4'], _biases['bc4'])
conv5 = conv2d('conv5', conv4, _weights['wc5'], _biases['bc5'])
# Max Pooling (down-sampling)
pool5 = max_pool('pool5', conv5, k=2, s=2)
# Apply Normalization
norm5 = norm('norm5', pool5, lsize=4)
# Apply Dropout
norm5 = tf.nn.dropout(norm5, _dropout)
# Fully connected layer
dense1 = tf.reshape(norm5, [-1, _weights['wd1'].get_shape().as_list()
[0]]) # Reshape conv3 output to fit dense layer input
dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'],
name='fc1') # Relu activation
dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'],
name='fc2') # Relu activation
# Output, class prediction
out = tf.matmul(dense2, _weights['out']) + _biases['out']
return out
# Store layers weight & bias
_weights = {
'wc1': tf.Variable(tf.random_normal([11, 11, 1, 96])),
'wc2': tf.Variable(tf.random_normal([5, 5, 96, 256])),
'wc3': tf.Variable(tf.random_normal([3, 3, 256, 512])),
'wc4': tf.Variable(tf.random_normal([3, 3, 512, 1024])),
'wc5': tf.Variable(tf.random_normal([3, 3, 1024, 1024])),
#'wd1': tf.Variable(tf.random_normal([8*8*256, 1024])),
'wd1': tf.Variable(tf.random_normal([6*6*256, 3072])),
'wd2': tf.Variable(tf.random_normal([3072, 4096])),
'out': tf.Variable(tf.random_normal([4096, n_classes]))
}
_biases = {
'bc1': tf.Variable(tf.random_normal([96])),
'bc2': tf.Variable(tf.random_normal([256])),
'bc3': tf.Variable(tf.random_normal([512])),
'bc4': tf.Variable(tf.random_normal([1024])),
'bc5': tf.Variable(tf.random_normal([1024])),
'bd1': tf.Variable(tf.random_normal([3072])),
'bd2': tf.Variable(tf.random_normal([4096])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
############### NOT SO IMPORTANT ANYMORE###################################
# Construct model
pred = alex_net(x, _weights, _biases, keep_prob)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,
labels=y))
optimizer =
tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
#Create summary scalars and operation
n1 = tf.summary.scalar("cost", cost)
n2 = tf.summary.scalar("accuracy", accuracy)
train_summary_op = tf.summary.merge([n1,n2])
#Do writer
log_dir = "./alexnet-classification-model-checkpoints/summary"
train_writer = tf.summary.FileWriter(log_dir+'/train',
graph=tf.get_default_graph())
# Initializing the variables
init = tf.global_variables_initializer()
saver = tf.train.Saver(max_to_keep=1)
initial_step = 0
# Launch the graph
with tf.Session() as sess:
if learn_from_scratch == False:
if os.path.isfile('./alexnet-classification-model-
checkpoints/step.txt'):
with open("alexnet-classification-model-checkpoints/step.txt",
"r") as file:
step = file.read()
print(step)
initial_step = int(step)
if os.path.isfile('./alexnet-classification-model-
checkpoints/checkpoint') and os.path.isfile('./alexnet-classification-model-
checkpoints/my-model.ckpt.meta'):
saver = tf.train.import_meta_graph('alexnet-classification-
model-checkpoints/my-model.ckpt.meta')
saver.restore(sess, 'alexnet-classification-model-checkpoints/my-
model.ckpt')
print("Loaded model successfully!")
else:
print("A saved model wasn't found, starting from scratch")
sess.run(init)
else:
sess.run(init)
if train:
print("Starting training!")
step = 1
# Keep training until reach max iterations
while step * batch_size <= training_iters:
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
keep_prob: dropout})
if step % display_step_console == 0:
if display:
batch_x_eval, batch_y_eval = mnist.train.next_batch(500,
shuffle=True)
# Calculate batch loss and accuracy
loss, acc, summary = sess.run([cost, accuracy,
train_summary_op], feed_dict={x: batch_x_eval,
y: batch_y_eval,
keep_prob: 1.0})
train_writer.add_summary(summary, global_step=((step +
initial_step)*batch_size))
print("Iter " + str((step + initial_step)*batch_size) +
", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " +
\
"{:.5f}".format(acc))
else:
print("Still training...
{}%".format(round((step*batch_size / training_iters)*100), 2))
step += 1
print("Optimization Finished!")
savePath = saver.save(sess, 'alexnet-classification-model-
checkpoints/my-model.ckpt')
with open("alexnet-classification-model-checkpoints/step.txt", "w") as file:
file.write(str(initial_step+step))
print("Saved!")
# Calculate accuracy for 256 mnist test images
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: mnist.test.images,
y: mnist.test.labels,
keep_prob: 1.}))
num = random.randint(0, mnist.test.images.shape[0])
img = mnist.test.images[num]
cls = sess.run(tf.argmax(conv_net(img, weights, biases, dropout), 1))
cls2 = mnist.test.labels[num]
plt.imshow(img.reshape(28, 28), cmap=plt.cm.binary)
print ('NN predicted', cls, np.argmax(cls2))
plt.show()
I have a problem which deals with predicting two outputs when given a vector of predictors.
Assume that a predictor vector looks like x1, y1, att1, att2, ..., attn, which says x1, y1 are coordinates and att's are the other attributes attached to the occurrence of x1, y1 coordinates. Based on this predictor set I want to predict x2, y2. This is a time series problem, which I am trying to solve using multiple regresssion.
My question is how do I setup keras, which can give me 2 outputs in the final layer.
from keras.models import Model
from keras.layers import *
#inp is a "tensor", that can be passed when calling other layers to produce an output
inp = Input((10,)) #supposing you have ten numeric values as input
#here, SomeLayer() is defining a layer,
#and calling it with (inp) produces the output tensor x
x = SomeLayer(blablabla)(inp)
x = SomeOtherLayer(blablabla)(x) #here, I just replace x, because this intermediate output is not interesting to keep
#here, I want to keep the two different outputs for defining the model
#notice that both left and right are called with the same input x, creating a fork
out1 = LeftSideLastLayer(balbalba)(x)
out2 = RightSideLastLayer(banblabala)(x)
#here, you define which path you will follow in the graph you've drawn with layers
#notice the two outputs passed in a list, telling the model I want it to have two outputs.
model = Model(inp, [out1,out2])
model.compile(optimizer = ...., loss = ....) #loss can be one for both sides or a list with different loss functions for out1 and out2
model.fit(inputData,[outputYLeft, outputYRight], epochs=..., batch_size=...)
You can make a model with multiple output with
the Functional API
by subclassing tf.keras.Model.
Here's an example of dual outputs (regression and classification) on the Iris Dataset, using the Functional API:
from sklearn.datasets import load_iris
from tensorflow.keras.layers import Dense
from tensorflow.keras import Input, Model
import tensorflow as tf
data, target = load_iris(return_X_y=True)
X = data[:, (0, 1, 2)]
Y = data[:, 3]
Z = target
inputs = Input(shape=(3,), name='input')
x = Dense(16, activation='relu', name='16')(inputs)
x = Dense(32, activation='relu', name='32')(x)
output1 = Dense(1, name='cont_out')(x)
output2 = Dense(3, activation='softmax', name='cat_out')(x)
model = Model(inputs=inputs, outputs=[output1, output2])
model.compile(loss={'cont_out': 'mean_absolute_error',
'cat_out': 'sparse_categorical_crossentropy'},
optimizer='adam',
metrics={'cat_out': tf.metrics.SparseCategoricalAccuracy(name='acc')})
history = model.fit(X, {'cont_out': Y, 'cat_out': Z}, epochs=10, batch_size=8)
Here's a simplified version:
from sklearn.datasets import load_iris
from tensorflow.keras.layers import Dense
from tensorflow.keras import Input, Model
data, target = load_iris(return_X_y=True)
X = data[:, (0, 1, 2)]
Y = data[:, 3]
Z = target
inputs = Input(shape=(3,))
x = Dense(16, activation='relu')(inputs)
x = Dense(32, activation='relu')(x)
output1 = Dense(1)(x)
output2 = Dense(3, activation='softmax')(x)
model = Model(inputs=inputs, outputs=[output1, output2])
model.compile(loss=['mae', 'sparse_categorical_crossentropy'], optimizer='adam')
history = model.fit(X, [Y, Z], epochs=10, batch_size=8)
Here's the same example, subclassing tf.keras.Model and with a custom training loop:
import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras import Model
from sklearn.datasets import load_iris
tf.keras.backend.set_floatx('float64')
iris, target = load_iris(return_X_y=True)
X = iris[:, :3]
y = iris[:, 3]
z = target
ds = tf.data.Dataset.from_tensor_slices((X, y, z)).shuffle(150).batch(8)
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.d0 = Dense(16, activation='relu')
self.d1 = Dense(32, activation='relu')
self.d2 = Dense(1)
self.d3 = Dense(3, activation='softmax')
def call(self, x, training=None, **kwargs):
x = self.d0(x)
x = self.d1(x)
a = self.d2(x)
b = self.d3(x)
return a, b
model = MyModel()
loss_obj_reg = tf.keras.losses.MeanAbsoluteError()
loss_obj_cat = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
loss_reg = tf.keras.metrics.Mean(name='regression loss')
loss_cat = tf.keras.metrics.Mean(name='categorical loss')
error_reg = tf.keras.metrics.MeanAbsoluteError()
error_cat = tf.keras.metrics.SparseCategoricalAccuracy()
#tf.function
def train_step(inputs, y_reg, y_cat):
with tf.GradientTape() as tape:
pred_reg, pred_cat = model(inputs)
reg_loss = loss_obj_reg(y_reg, pred_reg)
cat_loss = loss_obj_cat(y_cat, pred_cat)
gradients = tape.gradient([reg_loss, cat_loss], model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
loss_reg(reg_loss)
loss_cat(cat_loss)
error_reg(y_reg, pred_reg)
error_cat(y_cat, pred_cat)
for epoch in range(50):
for xx, yy, zz in ds:
train_step(xx, yy, zz)
template = 'Epoch {:>2}, SCCE: {:>5.2f},' \
' MAE: {:>4.2f}, SAcc: {:>5.1%}'
print(template.format(epoch+1,
loss_cat.result(),
error_reg.result(),
error_cat.result()))
loss_reg.reset_states()
loss_cat.reset_states()
error_reg.reset_states()
error_cat.reset_states()