How can I match the encoder decoder dimension of deep auto encoder? - python

I'm coding very simple deep auto encoder with MNIST data set which have three latent space layer.
However, there are problems with the encoder and decoder dimensions.
The exact error message is : ValueError: Error when checking input: expected input_2 to have shape (128,) but got array with shape (32,) at line 60.
(line 60 : decoded_imgs = decoder.predict(encoded_imgs))
and I don't know how to solve it. I will attach my full code below.
Please help. Thanks.
from keras.layers import Input, Dense
from keras.models import Model
import cv2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import mlab
from matplotlib import pyplot as plt
import sys
np.set_printoptions(threshold=sys.maxsize)
# encoding_dimensions
encoding_dim = 128
encoding_dim2 = 64
encoding_dim3 = 32
# input placeholder
input_img = Input(shape=(784,))
encoded = Dense(128, activation='relu')(input_img)
encoded = Dense(64, activation='relu')(encoded)
encoded = Dense(32, activation='relu')(encoded)
decoded = Dense(64, activation='relu')(encoded)
decoded = Dense(128, activation='relu')(decoded)
decoded = Dense(784, activation='sigmoid')(decoded)
print(encoded.shape)
print(decoded.shape)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
encoder = Model(input_img, encoded)
encoded_input = Input(shape=(encoding_dim,))
decoder_layer = autoencoder.layers[-1]
decoder = Model(encoded_input, decoder_layer(encoded_input))
from keras.datasets import mnist
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print(x_test.shape)
print(x_train.shape)
autoencoder.fit(x_train, x_train,
epochs=1,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)
n=10
plt.figure(num=2, figsize=(20, 3))
for i in range(n):
# input data
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
# recnstructed data
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()

The problem is with the input definition of your decoder model. You set the input to the decoder model to be: encoded_input = Input(shape=(encoding_dim,)) where encoding_dim=128. However, the real input to the decoder is the encoder output, which is of size 32 (the number of neurons in last Dense layer of the encoder). You need to fix the declaration of the input shape in the decoder model, for example:
encoded_input = Input(shape=(32,))
or mode generally:
encoded_input = Input(shape=encoded.shape)

Related

logits and labels must have the same first dimension, got logits shape [1568,10] and labels shape [32]

i'm a deep learning enthusiasts, and i want finetune the keras.application deep model to train a new model, but something wrong. And the code is :
import keras.applications.resnet_v2
from keras.applications.resnet_v2 import ResNet50V2
from keras.applications.resnet_v2 import preprocess_input, decode_predictions
import numpy as np
import keras
import tensorflow as tf
from keras.layers import Dense, Input
from keras.models import Model
import os
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()#train_szie :(60000, 28, 28)
print(x_train.shape)#(60000, 28, 28)
print(y_train.shape)#(60000,)
x_train = [cv2.cvtColor(cv2.resize(img,(28,28)),cv2.COLOR_GRAY2BGR) for img in x_train]
x_train = np.asarray(x_train)
x_test = [cv2.cvtColor(cv2.resize(img,(28,28)),cv2.COLOR_GRAY2BGR) for img in x_test]
x_test = np.asarray(x_test)
x_train, x_test = x_train / 255.0, x_test / 255.0
base_model = ResNet50V2( weights='imagenet', include_top = False)
x = base_model.get_layer("conv2_block1_preact_bn").output
x = Dense(1024, activation='relu')(x)
predictions = Dense(200, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=1)
when i run it, the code is error,named
"res = tf.nn.sparse_softmax_cross_entropy_with_logits(
Node: 'sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits'
logits and labels must have the same first dimension, got logits shape [1568,10] and labels shape [32]
[[{{node sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits}}]] [Op:__inference_train_function_4157]"
but when i write the code, is now running fine.
x = base_model.output
I don't know why,i need your help, thanks
Quite a few changes required in your code and has multiple issues.
You're missing a Flatten layer in the middle. Because of that, when computing loss (with a 1D vector of labels), tensorflow squash all of the dimensions except the last together giving [7 x 7 x32 = 1568, 200] sized set of logits. Here 7x7 is the output width and height for a 28x28 sized input by your last conv layer. 32 is default batch size when using model.fit(). This is not compatible with the labels which would simply be a 32 item 1d vector in one iteration.
base_model = ResNet50V2(weights='imagenet', include_top = False)
# Input the way you've defined has undefined width and heigh
# dimension. This is an important requirement for the Flatten layer downstream
input = Input(shape=(28,28,3))
x = input
# Getting only the layers we care about
for layer in base_model.layers:
x = layer(x)
if layer.name == "conv2_block1_preact_bn":
break
# Before feeding data into the dense layer, you need to flatten
x = Flatten()(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(200, activation='softmax')(x)
model = Model(inputs=input, outputs=predictions)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
One concern I have is that mnist has 10 classes, so your last layer should only need 10 not 200

Unexpected dimensions with TensorFlow dataset

I am trying to do transfer learning with InceptionV3 on the MNIST dataset.
The plan is to read in the MNIST dataset, resize the images, and then use these to train, like so:
import numpy as np
import os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import tensorflow.compat.v2 as tf
import tensorflow.compat.v1 as tfv1
from tensorflow.python.keras.applications import InceptionV3
tfv1.enable_v2_behavior()
print(tf.version.VERSION)
img_size = 299
def preprocess_tf_image(image, label):
image = tf.image.grayscale_to_rgb(image)
image = tf.image.resize(image, [img_size, img_size])
return image, label
#Acquire MNIST data
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
#Convert data to [0,1] range
x_train, x_test = x_train / 255.0, x_test / 255.0
#Add extra dimension to images so that they can be converted to RGB
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape (x_test.shape[0], 28, 28, 1)
x_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
x_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))
#Convert images to RGB space and resize
x_train = x_train.map(preprocess_tf_image)
x_test = x_test.map(preprocess_tf_image)
img_shape = (img_size, img_size, 3)
#Get trained model, but leave off the head
base_model = InceptionV3(input_shape = img_shape, weights='imagenet', include_top=False)
base_model.trainable = False
#Make a model with a new head
model = tf.keras.Sequential([
base_model,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
#Compile model
model.compile(
optimizer='adam', #tf.keras.optimizers.RMSprop(lr=BASE_LEARNING_RATE),
loss='binary_crossentropy',
metrics=['accuracy']
)
model.fit(x_train, epochs=5)
model.evaluate(x_test)
But, when I run this, things stop at model.fit() with the error:
ValueError: Error when checking input: expected inception_v3_input to have 4 dimensions, but got array with shape (299, 299, 3)
What's going on?
After you apply map to a dataset the response has no information about the batch size, you have to invoke the batch function to add it:
x_train = x_train.batch(batch_size = BATCH_SIZE) # adds batch size dimension to train dataset
x_test = x_test.batch(batch_size = BATCH_SIZE) # idem for test.
After that I could fully train and evaluate the model using Google's Colab as you can check here.

reuse middle layer as input for another model in Keras

I am building a 5-layer autoencoder with Keras. I made the model that maps from input to output which was fine. I built another model that maps from input to the latent coded vector which worked fine. However, I then tried to make a decoding model that maps from the latent coded vector to the output which did not work.
I know that first I should be making an input layer for the decoded model that makes that shape but I can't figure out how to get my coded layers data as an input for the decoded model and to let it map from the coded vector to the final layer.
from keras.layers import Input, Dense
from keras.models import Model
from keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()
# Prepare data and normalize
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape(len(x_train), -1)
x_test = x_test.reshape(len(x_test), -1)
input_size = 784
hidden_size = 128
coded_size = 64
x = Input(shape=(input_size,))
hidden_1 = Dense(hidden_size, activation='relu')(x)
coded =Dense(coded_size, activation='relu')(hidden_1)
hidden_2 = Dense(hidden_size, activation='relu')(coded)
r = Dense(input_size, activation='sigmoid')(hidden_2)
autoencoder = Model(inputs=x, outputs=r)
encoder = Model(inputs=x, outputs=coded)
decoder_input = Input(shape=(coded_size,)) # should do this, but don't know how to connect it below
decoder = Model(inputs=coded, output=r)
You can do it like this:
decoder_input = Input(shape=(coded_size,))
next_input = decoder_input
# get the decoder layers and apply them consecutively
for layer in autoencoder.layers[-2:]:
next_input = layer(next_input)
decoder = Model(inputs=decoder_input, outputs=next_input)
As a side note, there is no h in your model. I think it must be replaced by coded.

What should be the input to Convolution neural network (CNN) using keras and tensorflow?

I'm trying to create CNN model using keras ad tensorflow as backend.
below is code for the same..
Cannot understand what input it is expecting...
import cv2,os
import glob
import numpy as np
from sklearn.utils import shuffle
from sklearn.cross_validation import train_test_split
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Input, Convolution2D, MaxPooling2D, Dense, Dropout, Flatten
PATH = os.getcwd()
data_path = PATH + '/data1/cat/*.PNG'
files = glob.glob(data_path)
X_data = []
for myFile in files:
image = cv2.imread (myFile)
image_resize = cv2.resize(image,(128,128))
X_data.append (image_resize)
image_data = np.array(X_data)
image_data = image_data.astype('float32')
image_data /= 255
print('X_data shape:', image_data.shape)
#Class ani labels
class_num = 2
total_Images = image_data.shape[0]
labels = np.ones((total_Images),dtype='int64')
labels[0:30] = 0
labels[31:] = 1
Y = to_categorical(labels,class_num)
#print(Y);
# Shuffle the dataset
x, y = shuffle(image_data, Y, random_state=2)
# Split the dataset
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=2)
input_shape = image_data[0].shape
#print(input_shape)
model = Sequential()
conv1 = Convolution2D(32,(3,3),padding='same',activation='relu')(input_shape)
conv2 = Convolution2D(32,(3,3),padding='same',activation='relu')(conv1)
pool_1 = MaxPooling2D(pool_size=(2,2))(conv2)
drop1 = Dropout(0.5)(pool_1)
conv3 = Convolution2D(64,(3,3),padding='same',activation='relu')(drop1)
conv4 = Convolution2D(64,(3,3),padding='same',activation='relu')(conv3)
pool_2 = MaxPooling2D(pool_size=(2,2))(conv4)
drop2 = Dropout(0.5)(pool_2)
flat = Flatten()(drop2)
hidden = Dense(64,activation='relu')(flat)
drop3 = Dropout(0.5)(hidden)
out = Dense(class_num,activation='softmax')(drop3)
model.compile(loss = 'categorical_crossentropy', optimizer= 'adam', metrics=['accuracy'])
model.fit(X_train,y_train,batch_size=16,nb_epoch=20, verbose=1, validation_data=(X_test,y_test))
model.evaluate(X_test,y_test,verbose=1)
Error: ValueError: Layer conv2d_1 was called with an input that isn't a
symbolic tensor. Received type: <class 'tuple'>. Full input: [(128, 128,3)].
All inputs to the layer should be tensors.
You are attempting to use the functional API and the sequential model all at once you need to first eliminate this line
model = Sequential()
Then, from the documentation of the functional API, we add an Input(channels,rows,columns) layer, and fill in the size values from your X_train matrix.
input_shape = Input()

Too strong regularization for an autoencoder (Keras autoencoder tutorial code)

I'm using this tutorial about autoencoders: https://blog.keras.io/building-autoencoders-in-keras.html
All the code is working, however the performance is very bad (the results are blurred) when I set 10e-5 for the regularization parameter, which is the parameter defined in the tutorial code. In fact, I need to decrease the regularization to 10e-8 to have a correct output.
My question is as follows: Why the result is so different from the tutorial? Data is the same and parameters are the same, I didn't expect a large difference.
I suspect that the default behavior of the Keras functions has been changed (automatic batch normalization performed in all cases?) from May 14th, 2016.
Outputs
With 10e-5 regularization (blurred); val_loss of 0.2967 after 50 epochs and 0.2774 after 100 epochs.
With 10e-8 regularization: val_loss of 0.1080 after 50 epochs and 0.1009 after 100 epochs.
With no regularization: val_loss of 0.1018 after 50 epochs and 0.0944 after 100 epochs.
Complete code (for reference)
# Source: https://blog.keras.io/building-autoencoders-in-keras.html
import numpy as np
np.random.seed(2713)
from keras.layers import Input, Dense
from keras.models import Model
from keras import regularizers
encoding_dim = 32
input_img = Input(shape=(784,))
# add a Dense layer with a L1 activity regularizer
encoded = Dense(encoding_dim, activation='relu',
activity_regularizer=regularizers.l1(10e-5))(input_img)
decoded = Dense(784, activation='sigmoid')(encoded)
autoencoder = Model(input_img, decoded)
# this model maps an input to its encoded representation
encoder = Model(input_img, encoded)
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
from keras.datasets import mnist
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print(x_train.shape)
print(x_test.shape)
autoencoder.fit(x_train, x_train,
epochs=100,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
# encode and decode some digits
# note that we take them from the *test* set
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)
# use Matplotlib (don't ask)
import matplotlib.pyplot as plt
n = 10 # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
I have the same issue. And it is on GitHub here https://github.com/keras-team/keras/issues/5414
It seems like you were correct in just changing the constant.

Categories

Resources