Error in Image Segmentation using Unet and Keras - python

I am using a Unet model for satellite image segmentation with inputs 512x512x3. But on executing the model i am getting the following error:
ValueError: Cannot feed value of shape (3, 512, 512) for Tensor 'conv2d_19_target:0', which has shape '(?, ?, ?, ?)'. the code for the Unet model is :
from __future__ import print_function
import os
from skimage.transform import resize
from skimage.io import imsave
import numpy as np
from keras.models import Model
from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from data import load_train_data, load_test_data
K.set_image_data_format('channels_last') # TF dimension ordering in this code
img_rows = 512
img_cols = 512
image_channels=3
smooth = 1.
OUTPUT_MASK_CHANNELS = 1
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def get_unet():
inputs = Input((img_rows, img_cols, 3))
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)
conv_final = Conv2D(OUTPUT_MASK_CHANNELS, (1, 1),activation='sigmoid')(conv9)
#conv_final = Activation('sigmoid')(conv_final)
model = Model(inputs, conv_final, name="ZF_UNET_224")
#conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)
#model = Model(inputs=[inputs], outputs=[conv10])
model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])
return model
def preprocess(imgs):
imgs_p = np.ndarray((imgs.shape[0], img_rows, img_cols), dtype=np.uint8)
for i in range(imgs.shape[0]):
imgs_p[i] = resize(imgs[i], (img_cols, img_rows), preserve_range=True)
imgs_p = imgs_p[..., np.newaxis]
return imgs_p
def train_and_predict():
print('-'*30)
print('Loading and preprocessing train data...')
print('-'*30)
imgs_train, imgs_mask_train = load_train_data()
#imgs_train = preprocess(imgs_train)
#imgs_mask_train = preprocess(imgs_mask_train)
imgs_train = imgs_train.astype('float32')
mean = np.mean(imgs_train) # mean for data centering
std = np.std(imgs_train) # std for data normalization
imgs_train -= mean
imgs_train /= std
imgs_mask_train = imgs_mask_train.astype('float32')
imgs_mask_train /= 255. # scale masks to [0, 1]
print('-'*30)
print('Creating and compiling model...')
print('-'*30)
model = get_unet()
model_checkpoint = ModelCheckpoint('weights.h5', monitor='val_loss', save_best_only=True)
print('-'*30)
print('Fitting model...')
print('-'*30)
model.fit(imgs_train, imgs_mask_train, batch_size=3, epochs=20, verbose=2, shuffle=True,
validation_split=0.2,
callbacks=[model_checkpoint])
print('-'*30)
print('Loading and preprocessing test data...')
print('-'*30)
imgs_test, imgs_id_test = load_test_data()
imgs_test = preprocess(imgs_test)
imgs_test = imgs_test.astype('float32')
imgs_test -= mean
imgs_test /= std
print('-'*30)
print('Loading saved weights...')
print('-'*30)
model.load_weights('weights.h5')
print('-'*30)
print('Predicting masks on test data...')
print('-'*30)
imgs_mask_test = model.predict(imgs_test, verbose=1)
np.save('imgs_mask_test.npy', imgs_mask_test)
print('-' * 30)
print('Saving predicted masks to files...')
print('-' * 30)
pred_dir = 'preds'
if not os.path.exists(pred_dir):
os.mkdir(pred_dir)
for image, image_id in zip(imgs_mask_test, imgs_id_test):
image = (image[:, :, 0] * 255.).astype(np.uint8)
imsave(os.path.join(pred_dir, str(image_id) + '_pred.png'), image)
if __name__ == '__main__':
train_and_predict()
The error traceback is as follows:
File "/home/deeplearning/Downloads/Models/ultrasound-nerve-segmentation-master/train.py", line 158, in <module> train_and_predict()
File "/home/deeplearning/Downloads/Models/ultrasound-nerve-segmentation-master/train.py", line 124, in train_and_predict callbacks=[model_checkpoint])
File "/home/deeplearning/anaconda3/envs/myenv/lib/python3.6/site-packages/keras/engine/training.py", line 1037, in fit
validation_steps=validation_steps)
File "/home/deeplearning/anaconda3/envs/myenv/lib/python3.6/site-packages/keras/engine/training_arrays.py", line 199, in fit_loop
outs = f(ins_batch)
File "/home/deeplearning/anaconda3/envs/myenv/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2672, in __call__
return self._legacy_call(inputs)
File "/home/deeplearning/anaconda3/envs/myenv/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2654, in _legacy_call
**self.session_kwargs)
File "/home/deeplearning/anaconda3/envs/myenv/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 767, in run
run_metadata_ptr)
File "/home/deeplearning/anaconda3/envs/myenv/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 944, in _run
% (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
ValueError: Cannot feed value of shape (3, 512, 512) for Tensor 'conv2d_19_target:0', which has shape '(?, ?, ?, ?)'
Plz help me finding what wrong is going in it

You set K.set_image_data_format('channels_last'), but your input image (3 X 512 X 512) has channels first. Either change to K.set_image_data_format('channels_first')(which may not work for the UNET), or permute the dimensions of your input image with np.tranpose to have the input shape (512,512,3).

Related

having an array pass problem for multi scale can network

I am dealing with audio data (.wav) files, I have created two CNN network to pass the data. so the two models at the end will use the function of Concatenate the output of these two models and give out the classification. but I am having problem for training it, it says the model expects to see 2 arrays but it got one array. any one who can help me with this problem I will appreciate, I have been struggling for weeks.
this is my config file
class Config:
def __init__(self, mode='conv', nfilt=26, nfeat=40, nfft=4096, rate=16000):
self.mode = mode
self.nfilt = nfilt
self.nfeat = nfeat
self.nfft = nfft
self.rate = rate
self.step = int(rate / 10)
self.model_path = os.path.join('models', mode + '.model')
self.p_path = os.path.join('pickles', mode + '.p')
I build one function for my mfcc
build a function for build
def build_rand_feat():
X = []
y = []
_min, _max = float('inf'), -float('inf')
for _ in tqdm(range(n_sample)):
rand_class = np.random.choice(class_dist.index, p=prob_dist)
file = np.random.choice(df[df.label == rand_class].index)
data, rate = sf.read('audios/' + file)
label = df.at[file, 'label']
rand_index = np.random.randint(0, data.shape[0] - config.step)
sample = data[rand_index:rand_index + config.step]
X_sample = mfcc(sample, rate, winlen=0.05, winstep=0.02, numcep=config.nfeat, nfilt=config.nfilt, nfft=config.nfft)
_min = min(np.amin(X_sample), _min)
_max = max(np.amax(X_sample), _max)
X.append(X_sample)
y.append(classes.index(label))
config.min = _min
X = np.array(X)
y = np.array(y)
X = (X - _min) / (_max - _min)
if config.mode == 'conv':
X = X.reshape(X.shape[0], X.shape[1], X.shape[2], 1)
print(X.shape)
elif config.mode == 'time':
X = X.reshape(X.shape[0], X.shape[1], X.shape[2])
y = to_categorical(y, num_classes=10)
config.data = (X, y)
return X, y
and this is my multi scale network
def get_conv_model():
main_model = Sequential()
main_model.add(Conv2D(128, (3, 3), activation='relu', strides=(1, 1), padding='same', input_shape=input_shape))
main_model.add(MaxPool2D((2, 2)))
main_model.add(Conv2D(128, (3, 3), activation='relu', strides=(1, 1), padding='same'))
main_model.add(MaxPool2D((2, 2), padding='same'))
main_model.add(Flatten())
second_model = Sequential()
second_model.add(Conv2D(256, (3, 3), activation='relu', strides=(1, 1), padding='same', input_shape=input_shape))
second_model.add(MaxPool2D((2, 2), padding='same'))
second_model.add(Conv2D(256, (3, 3), activation='relu', strides=(1, 1), padding='same'))
second_model.add(MaxPool2D((2, 2)))
second_model.add(Flatten())
# first model upper
main_model = Sequential()
main_model.add(Conv2D(64, (3, 3), activation='relu', strides=(1, 1), padding='same', input_shape=input_shape))
main_model.add(BatchNormalization())
main_model.add(Dropout(0.3))
main_model.add(Conv2D(128, (3, 3), activation='relu', strides=(1, 1), padding='same'))
main_model.add(BatchNormalization())
main_model.add(Dropout(0.3))
main_model.add(Conv2D(256, (3, 3), activation='relu', strides=(1, 1), padding='same'))
main_model.add(BatchNormalization())
main_model.add(Dropout(0.3))
main_model.add(Flatten())
# second model lower
lower_model1 = Sequential()
lower_model1.add(MaxPool2D(strides=(1, 1), padding='same', input_shape=input_shape))
lower_model1.add(Conv2D(128, (3, 3), activation='relu', strides=(1, 1), padding='same', input_shape=input_shape))
lower_model1.add(BatchNormalization())
lower_model1.add(Dropout(0.3))
lower_model1.add(Conv2D(256, (3, 3), activation='relu', strides=(1, 1), padding='same'))
lower_model1.add(BatchNormalization())
lower_model1.add(Dropout(0.3))
lower_model1.add(Conv2D(512, (3, 3), activation='relu', strides=(1, 1), padding='same'))
lower_model1.add(Flatten())
# merged models
merged_model = Concatenate()([main_model.output, lower_model1.output])
x = Dense(256, activation='relu')(merged_model)
x = Dropout(0.3)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.3)(x)
x = Dense(128, activation='relu')(x)
x = Dropout(0.3)(x)
output = Dense(10, activation='softmax')(x)
final_model = Model(inputs=[main_model.input, lower_model1.input], outputs=[output])
final_model.summary()
final_model.compile(loss="categorical_crossentropy", optimizer=Adam(0.001), metrics=['acc'])
print(K.eval(final_model.optimizer.lr))
#class_weight = compute_class_weight('balanced', np.unique(y_flat), y_flat)
final_model.fit(X,y, epochs=10, batch_size=64, shuffle=True, validation_split=0.3)
return main_model, lower_model1, merged_model

why does this error "input_2_1:0 is both fed and fetched" happen?

I am trying to show the output of intermediate layers in my network and I used the below code:
from keras import models
layer_outputs = [layer.output for layer in w_extraction.layers[:102]]
activation_model = models.Model(inputs=w_extraction.input, outputs=layer_outputs)
activations = activation_model.predict([x_test[8000:8001],wt_expand])
but it produces this error. I do not know why it produces this error! coudl you please help me with this issue
Traceback (most recent call last):
File "", line 1, in
activations = activation_model.predict([x_test[8000:8001],wt_expand])
File
"D:\software\Anaconda3\envs\py36\lib\site-packages\keras\engine\training.py",
line 1169, in predict
steps=steps)
File
"D:\software\Anaconda3\envs\py36\lib\site-packages\keras\engine\training_arrays.py",
line 294, in predict_loop
batch_outs = f(ins_batch)
File
"D:\software\Anaconda3\envs\py36\lib\site-packages\keras\backend\tensorflow_backend.py",
line 2715, in call
return self._call(inputs)
File
"D:\software\Anaconda3\envs\py36\lib\site-packages\keras\backend\tensorflow_backend.py",
line 2671, in _call
session)
File
"D:\software\Anaconda3\envs\py36\lib\site-packages\keras\backend\tensorflow_backend.py",
line 2623, in _make_callable
callable_fn = session._make_callable_from_options(callable_opts)
File
"D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\client\session.py",
line 1471, in _make_callable_from_options
return BaseSession._Callable(self, callable_options)
File
"D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\client\session.py",
line 1425, in init
session._session, options_ptr, status)
File
"D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\framework\errors_impl.py",
line 528, in exit
c_api.TF_GetCode(self.status.status))
InvalidArgumentError: input_2_1:0 is both fed and fetched.
my complete code is here:
from keras.layers import Input, Concatenate, GaussianNoise,Dropout,BatchNormalization
from keras.layers import Conv2D, AtrousConv2D
from keras.models import Model
from keras.datasets import mnist
from keras.callbacks import TensorBoard
from keras import backend as K
from keras import layers
import matplotlib.pyplot as plt
import tensorflow as tf
import keras as Kr
from keras.optimizers import SGD,RMSprop,Adam
from keras.callbacks import ReduceLROnPlateau
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
import numpy as np
import pylab as pl
import matplotlib.cm as cm
import keract
from matplotlib import pyplot
from keras import optimizers
from keras import regularizers
from tensorflow.python.keras.layers import Lambda;
#-----------------building w train---------------------------------------------
w_expand=np.zeros((49999,28,28),dtype='float32')
wv_expand=np.zeros((9999,28,28),dtype='float32')
wt_random=np.random.randint(2, size=(49999,4,4))
wt_random=wt_random.astype(np.float32)
wv_random=np.random.randint(2, size=(9999,4,4))
wv_random=wv_random.astype(np.float32)
w_expand[:,:4,:4]=wt_random
wv_expand[:,:4,:4]=wv_random
x,y,z=w_expand.shape
w_expand=w_expand.reshape((x,y,z,1))
x,y,z=wv_expand.shape
wv_expand=wv_expand.reshape((x,y,z,1))
#-----------------building w test---------------------------------------------
w_test = np.random.randint(2,size=(1,4,4))
w_test=w_test.astype(np.float32)
wt_expand=np.zeros((1,28,28),dtype='float32')
wt_expand[:,0:4,0:4]=w_test
wt_expand=wt_expand.reshape((1,28,28,1))
#-----------------------encoder------------------------------------------------
#------------------------------------------------------------------------------
wtm=Input((28,28,1))
image = Input((28, 28, 1))
conv1 = Conv2D(64, (5, 5), activation='relu', padding='same', name='convl1e',dilation_rate=(2,2))(image)
conv2 = Conv2D(64, (5, 5), activation='relu', padding='same', name='convl2e',dilation_rate=(2,2))(conv1)
conv3 = Conv2D(64, (5, 5), activation='relu', padding='same', name='convl3e',dilation_rate=(2,2))(conv2)
BN=BatchNormalization()(conv3)
encoded = Conv2D(1, (5, 5), activation='relu', padding='same',name='encoded_I',dilation_rate=(2,2))(BN)
add_const = Kr.layers.Lambda(lambda x: x[0] + x[1])
encoded_merged = add_const([encoded,wtm])
#-----------------------decoder------------------------------------------------
#------------------------------------------------------------------------------
deconv1 = Conv2D(64, (5, 5), activation='relu', padding='same', name='convl1d',dilation_rate=(2,2))(encoded_merged)
deconv2 = Conv2D(64, (5, 5), activation='relu', padding='same', name='convl2d',dilation_rate=(2,2))(deconv1)
deconv3 = Conv2D(64, (5, 5), activation='relu',padding='same', name='convl3d',dilation_rate=(2,2))(deconv2)
deconv4 = Conv2D(64, (5, 5), activation='relu',padding='same', name='convl4d',dilation_rate=(2,2))(deconv3)
BNd=BatchNormalization()(deconv3)
#DrO2=Dropout(0.25,name='DrO2')(BNd)
decoded = Conv2D(1, (5, 5), activation='sigmoid', padding='same', name='decoder_output',dilation_rate=(2,2))(BNd)
#model=Model(inputs=image,outputs=decoded)
model=Model(inputs=[image,wtm],outputs=decoded)
decoded_noise = GaussianNoise(0.5)(decoded)
#----------------------w extraction------------------------------------
convw1 = Conv2D(64, (3,3), activation='relu', padding='same', name='conl1w',dilation_rate=(2,2))(decoded_noise)
convw2 = Conv2D(64, (3, 3), activation='relu', padding='same', name='convl2w',dilation_rate=(2,2))(convw1)
convw3 = Conv2D(64, (3, 3), activation='relu', padding='same', name='conl3w',dilation_rate=(2,2))(convw2)
convw4 = Conv2D(64, (3, 3), activation='relu', padding='same', name='conl4w',dilation_rate=(2,2))(convw3)
convw5 = Conv2D(64, (3, 3), activation='relu', padding='same', name='conl5w',dilation_rate=(2,2))(convw4)
convw6 = Conv2D(64, (3, 3), activation='relu', padding='same', name='conl6w',dilation_rate=(2,2))(convw5)
pred_w = Conv2D(1, (1, 1), activation='sigmoid', padding='same', name='reconstructed_W',dilation_rate=(2,2))(convw6)
w_extraction=Model(inputs=[image,wtm],outputs=[decoded,pred_w])
#----------------------training the model--------------------------------------
#------------------------------------------------------------------------------
#----------------------Data preparation----------------------------------------
(x_train, _), (x_test, _) = mnist.load_data()
x_validation=x_train[1:10000,:,:]
x_train=x_train[10001:60000,:,:]
#
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_validation = x_validation.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format
x_validation = np.reshape(x_validation, (len(x_validation), 28, 28, 1))
#---------------------compile and train the model------------------------------
w_extraction.compile(optimizer='adam', loss={'decoder_output':'mse','reconstructed_W':'binary_crossentropy'}, loss_weights={'decoder_output': 0.45, 'reconstructed_W': 1.0},metrics=['mae'])
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=20)
#rlrp = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=20, min_delta=1E-4, verbose=1)
mc = ModelCheckpoint('best_model_5x5F_dil_Los751.h5', monitor='val_loss', mode='min', verbose=1, save_best_only=True)
history=w_extraction.fit([x_train,w_expand], [x_train,w_expand],
epochs=200,
batch_size=16,
validation_data=([x_validation,wv_expand], [x_validation,wv_expand]),
callbacks=[TensorBoard(log_dir='E:concatnatenetwork', histogram_freq=0, write_graph=False),es,mc])
You can't both feed and fetch a placeholder (i.e. the underlying Tensor of an Input layer). See this answer for more information. Therefore, you must exclude the input Tensors of w_extraction model from the outputs of activation_model. One way to do this is to filter them based on layer name:
layer_outputs = [layer.output for layer in w_extraction.layers[:102] if not layer.name.startswith('input')]

Keras Output tensors to a Model must be the output of a Keras `Layer` (thus holding past layer metadata)

I am trying to implement unpooling masks in Keras. I have a VGG encoder that outputs a specific feature map like relu5_1 and a list of unpooling masks.
def VGG19(input_tensor=None, input_shape=None, target_layer=1):
"""
VGG19, up to the target layer (1 for relu1_1, 2 for relu2_1, etc.)
"""
if input_tensor is None:
inputs = Input(shape=input_shape)
else:
inputs = Input(tensor=input_tensor, shape=input_shape)
layer, unpooling_masks = vgg_layers(inputs, target_layer)
model = Model(inputs, [layer, unpooling_masks], name='vgg19')
load_weights(model)
return model, unpooling_masks
def vgg_layers(inputs, target_layer):
unpooling_masks = []
# Block 1
x_b1 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(inputs)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x_b1)
before_pooling = x
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
unpooling_masks.append(make_unpooling_mask(x, before_pooling))
# Block 2
x_b2 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x_b2)
before_pooling = x
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
unpooling_masks.append(make_unpooling_mask(x, before_pooling))
# Block 3
x_b3 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x_b3)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x)
before_pooling = x
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
unpooling_masks.append(make_unpooling_mask(x, before_pooling))
# Block 4
x_b4 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x_b4)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x)
before_pooling = x
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
unpooling_masks.append(make_unpooling_mask(x, before_pooling))
# Block 5
x_b5 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
if target_layer == 5:
return x_b5, unpooling_masks
elif target_layer == 4:
return x_b4, unpooling_masks
elif target_layer == 3:
return x_b3, unpooling_masks
elif target_layer == 2:
return x_b2, unpooling_masks
elif target_layer == 1:
return x_b1, unpooling_masks
This is the unpooling function
def make_unpooling_mask(x, before_pooling):
t = UpSampling2D()(x)
mask = Lambda(lambda x: K.cast(K.greater(x[0],x[1]), dtype='float32'))([t, before_pooling])
return mask
I am getting this error
Exception has occurred: ValueError Output tensors to a Model must be
the output of a Keras Layer (thus holding past layer metadata).
Found: [<tf.Tensor 'lambda_1/Cast:0' shape=(?, 256, 256, 64)
dtype=float32>, <tf.Tensor 'lambda_2/Cast:0' shape=(?, 128, 128, 128)
dtype=float32>, <tf.Tensor 'lambda_3/Cast:0' shape=(?, 64, 64, 256)
dtype=float32>, <tf.Tensor 'lambda_4/Cast:0' shape=(?, 32, 32, 512)
dtype=float32>]
This happens when at the line that compiles the model model = Model(inputs, [layer, unpooling_masks], name='vgg19')
What can be done?
When invoking the Model API, the value for outputs argument should be tensor(or list of tensors), in this case it is a list of list of tensors, hence there is a problem. Just unpack the unpooling_masks list(*unpooling_masks) when calling Model.
model = Model(inputs, [layer, *unpooling_masks], name='vgg19')

IndexError: index 2992 is out of bounds for axis 0 with size 2992

this s my code in keras
batch_size = 16
num_classes = 4
epochs = 30
frames = 5 # The number of frames for each sequence
input_shape = [100 , 100, 3]
def build_rgb_model2():
model = Sequential()
model.add(TimeDistributed(Conv2D(32, (3, 3), padding='same'), input_shape=(frames, 120, 180, 3)))
model.add(TimeDistributed(Activation('relu')))
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
model.add(TimeDistributed(Conv2D(32, (3, 3))))
model.add(TimeDistributed(Activation('relu')))
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
model.add(TimeDistributed(Conv2D(32, (3, 3))))
model.add(TimeDistributed(Activation('relu')))
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
model.add(TimeDistributed(Conv2D(32, (3, 3))))
model.add(TimeDistributed(Activation('relu')))
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
model.add(TimeDistributed(Dropout(0.25)))
model.add(TimeDistributed(Flatten()))
model.add(TimeDistributed(Dense(512)))
model.add(TimeDistributed(Dense(32, name="first_dense_rgb")))
model.add(LSTM(20, return_sequences=True, name="lstm_layer_rgb"));
model.add(TimeDistributed(Dense(num_classes), name="time_distr_dense_one_rgb"))
model.add(GlobalAveragePooling1D(name="global_avg_rgb"))
return model
def build_rgb_model():
model = Sequential()
model.add(TimeDistributed(Conv2D(64, (3, 3), padding='same', activation='relu' ), input_shape=(frames, 120, 180, 3)))
#model.add(TimeDistributed(Activation('relu')))
# model.add(TimeDistributed(Conv2D(64, (3, 3), activation='relu', padding='same', input_shape=(frames, 120, 180, 3))))
model.add(TimeDistributed(Conv2D(64, (3, 3), activation='relu', padding='same')))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(TimeDistributed(Conv2D(128, (3, 3), activation='relu', padding='same')))
model.add(TimeDistributed(Conv2D(128, (3, 3), activation='relu', padding='same')))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(TimeDistributed(Conv2D(256, (3, 3), activation='relu', padding='same')))
model.add(TimeDistributed(Conv2D(256, (3, 3), activation='relu', padding='same')))
model.add(TimeDistributed(Conv2D(256, (3, 3), activation='relu', padding='same')))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same')))
model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same')))
model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same')))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')))
model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')))
model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(TimeDistributed(Dropout(0.25)))
model.add(TimeDistributed(Flatten()))
model.add(TimeDistributed(Dense(1024)))
model.add(TimeDistributed(Dense(32, name="first_dense_rgb")))
model.add(LSTM(20, return_sequences=True, name="lstm_layer_rgb"));
model.add(TimeDistributed(Dense(num_classes), name="time_distr_dense_one_rgb"))
model.add(GlobalAveragePooling1D(name="global_avg_rgb"))
return model
def build_flow_model():
model = Sequential()
model.add(TimeDistributed(Conv2D(64, (3, 3), padding='same',activation='relu'), input_shape=(frames, 120, 180, 2)))
# model.add(TimeDistributed(Activation('relu')))
#model.add(TimeDistributed(Conv2D(64, (3, 3), activation='relu', padding='same', input_shape=(frames, 120, 180, 2))))
model.add(TimeDistributed(Conv2D(64, (3, 3), activation='relu', padding='same')))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(TimeDistributed(Conv2D(128, (3, 3), activation='relu', padding='same')))
model.add(TimeDistributed(Conv2D(128, (3, 3), activation='relu', padding='same')))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(TimeDistributed(Conv2D(256, (3, 3), activation='relu', padding='same')))
model.add(TimeDistributed(Conv2D(256, (3, 3), activation='relu', padding='same')))
model.add(TimeDistributed(Conv2D(256, (3, 3), activation='relu', padding='same')))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same')))
model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same')))
model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same')))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')))
model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')))
model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(TimeDistributed(Dropout(0.25)))
model.add(TimeDistributed(Flatten()))
model.add(TimeDistributed(Dense(1024)))
model.add(TimeDistributed(Dense(32, name="first_dense_flow")))
model.add(LSTM(20, return_sequences=True, name="lstm_layer_flow"));
model.add(TimeDistributed(Dense(num_classes), name="time_distr_dense_one_flow"))
model.add(GlobalAveragePooling1D(name="global_avg_flow"))
return model
def build_model():
rgb_model = build_rgb_model()
flow_model = build_flow_model()
out=average([rgb_model.output, flow_model.output])
model=Model([rgb_model.input,flow_model.input], out)
#model.add(add([rgb_model, flow_model]))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
plot_model(model, to_file='model/cnn_lstm.png')
return model
def batch_iter(split_file):
split_data = np.genfromtxt(split_file, dtype=None, delimiter=",")
total_seq_num = len(split_data)
ADRi = "/UCF3"
split_data2 = np.genfromtxt("C.txt", dtype=None, delimiter=",")
num_batches_per_epoch = int(((int(split_data2[4])-1)/frames - 1) / batch_size)-300
total_frames=int(split_data2[4])-1
listing = sorted(os.listdir(ADRi))
indices2=[]
def data_generator():
p = 0
while 1:
indices = np.random.permutation(np.arange(total_seq_num))
t=0
for j in range(total_seq_num):
for k in range(int(split_data[j][1]/frames)):
indices2.append (j*1000+k*frames)
t=t+1
indices3 = np.random.permutation(np.arange(indices2.__len__()))
for batch_num in range(num_batches_per_epoch): # for each batch
start_index = batch_num * batch_size
end_index = ((batch_num + 1) * batch_size) -1
RGB = []
FLOW = []
Y = []
for i in range(start_index, end_index): # for each sequence
ii=int(indices3[i]/1000) # seqnumber
image_dir = split_data[indices[ii]][0].decode("UTF-8")
seq_len = int(split_data[indices[ii]][1])
y = int(split_data[indices[ii]][2])
# To reduce the computational time, data augmentation is performed for each frame
jj= min( int(indices3[i]/1000), seq_len-frames-1)
augs_rgb = []
augs_flow = []
for j in range(jj,jj+frames): # for each frame
# Get frames at regular interval. start from frame index 1
frame = j
hf = h5py.File(image_dir+".h5", 'r')
# rgb image
im = hf.get(str(frame))
#rgb_i = load_img("%s/img_%05d.jpg" % (image_dir, frame), target_size=(224, 224))
rgb = im[:, :, :, (3, 5, 7)].transpose(0,3,1,2)
#img_gen = ImageDataGenerator(horizontal_flip=True)
#= img_gen.apply_transform(rgb)
#img = Image.fromarray(rgb)
#rgb_flip_i=img.transpose(Image.FLIP_LEFT_RIGHT) # augmentation
rgb_flip = np.flip(rgb,2)
#t=np.append(rgb, rgb_flip,axis=0)
t=np.concatenate([rgb],axis=0)
augs_rgb.append(t)
# flow image
flow_x=im[:, :, :, 1]
flow_y = im[:, :, :, 2]
flow_x_flip = - np.flip(flow_x,2) # augmentation
flow_y_flip = np.flip(flow_y,2) # augmentation
flow = np.concatenate([flow_x, flow_y], axis=0)
flow_flip = np.concatenate([flow_x_flip, flow_y_flip], axis=0)
#tt=np.concatenate([flow[None,:,:,:], flow_flip[None,:,:,:]], axis=0)
tt = np.concatenate([flow[None, :, :, :]], axis=0)
augs_flow.append(tt)
augs_rgb = np.array(augs_rgb).transpose((1, 0, 3, 4, 2))
augs_flow = np.array(augs_flow).transpose((1, 0, 3, 4, 2))
RGB.extend(augs_rgb)
FLOW.extend(augs_flow)
Y.extend([y])
RGB1 = np.array(RGB)
FLOW1 = np.array(FLOW)
Y1 = np_utils.to_categorical(Y, num_classes)
p=p+1
yield ([RGB1, FLOW1], Y1)
return num_batches_per_epoch, data_generator()
if __name__ == "__main__":
# Build model
model = build_model()
model.summary()
print("Built model")
# Make batches
train_steps, train_batches = batch_iter(train_split_file)
valid_steps, valid_batches = batch_iter(test_split_file)
# Train model
history = model.fit_generator(train_batches, steps_per_epoch=train_steps,
epochs=10, verbose=1, validation_data=valid_batches,
validation_steps=valid_steps)
plot_history(history)
print("Trained model")
when I set train_steps to 100 it works. when I set in real samples (462) in the last run I got this error
461/462 [============================>.] - ETA: 2s - loss: 6.6250 -
acc: 0.4111Traceback (most recent call last):
Two.py", line 332, in
validation_steps=valid_steps)
File "/home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/legacy/interfaces.py",
line 91, in wrapper
return func(*args, **kwargs)
File "/home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/engine/training.py",
line 1418, in fit_generator
initial_epoch=initial_epoch)
File "/home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/engine/training_generator.py",
line 234, in fit_generator
workers=0)
File "/home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/legacy/interfaces.py",
line 91, in wrapper
return func(*args, **kwargs)
File "/home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/engine/training.py",
line 1472, in evaluate_generator
verbose=verbose)
File "/home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/engine/training_generator.py",
line 330, in evaluate_generator
generator_output = next(output_generator)
File "/home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/utils/data_utils.py",
line 709, in get
six.reraise(*sys.exc_info())
File "/home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/utils/data_utils.py",
line 685, in get
inputs = self.queue.get(block=True).get()
File "/usr/lib/python2.7/multiprocessing/pool.py", line 567, in get
raise self._value
IndexError: index 2992 is out of bounds for axis 0 with size 2992
I do not have any 2992 prometric value!
Arrays in python start at 0, so if there's 2992 elements, the last one is 2991.

Why does my code throwing KeyError: 'epochs' when I implemented Fully Convolutional Networks by Keras

I am trying to implement FCN by TensorFlow, and I used Keras. After first epoch training , I got this error:
I think it should be related to ModelCheckpoint() and model.fit(), because when I delete callbacks in model.fit(), it could finish all epochs.
Any help would be much appreciated. Thanks.
This is part of my code:
from __future__ import print_function
import os
from skimage.transform import resize
from skimage.io import imsave
import numpy as np
from keras.models import Model
from keras.optimizers import SGD
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from keras.callbacks import *
from keras.utils.vis_utils import plot_model
import shutil
import matplotlib.pyplot as plt
from pylab import *
from keras.regularizers import l2
from keras.layers import *
from keras.engine import Layer
from keras.applications.vgg16 import *
from keras.models import *
from keras.applications.imagenet_utils import _obtain_input_shape
import tensorflow as tf
import time
K.set_image_data_format('channels_last') # TF dimension ordering in this code
starttime = time.clock()
img_rows = 512
img_cols = 512
smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def FCN32():
# https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels.h5
inputs = Input((img_rows, img_cols, 1))
conv1 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(inputs)
conv1 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(conv1)
pool1 = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(conv1)
# Block 2
conv2 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(pool1)
conv2 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(conv2)
pool2 = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(conv2)
# Block 3
conv3 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(pool2)
conv3 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(conv3)
conv3 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(conv3)
pool3 = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(conv3)
# Block 4
conv4 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(pool3)
conv4 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(conv4)
conv4 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(conv4)
pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(conv4)
# Block 5
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(pool4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(conv5)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(conv5)
pool5 = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(conv5)
up1 = (Conv2D(32, (7, 7), activation='relu', padding='same'))(pool5)
drop1 = Dropout(0.5)(up1)
up2 = (Conv2D(32, (1, 1), activation='relu', padding='same'))(drop1)
drop2 = Dropout(0.5)(up2)
up3 = (Conv2D(1, (1, 1), kernel_initializer='he_normal'))(drop1)
up4 = Conv2DTranspose(1, kernel_size=(64, 64), strides=(30, 30), use_bias=False)(up3)
crop1 = Cropping2D(cropping=((1, 1), (1, 1)))(up4)
model = Model(inputs=[inputs], outputs=[crop1])
model.compile(optimizer=SGD(lr=0.005), loss='binary_crossentropy', metrics=[dice_coef])
return model
def train_and_predict():
print('-' * 30)
print('Loading train data...')
print('-' * 30)
imgs_train = np.load('train_data.npy')
imgs_label_train = np.load('train_label.npy')
imgs_train = imgs_train.reshape(1000, 512, 512, 1)
imgs_label_train = imgs_label_train.reshape(1000, 512, 512, 1)
# imgs_train = preprocess(imgs_train)
##imgs_mask_train = preprocess(imgs_mask_train)
imgs_train = imgs_train.astype('float32')
mean = np.mean(imgs_train) # mean for data centering
std = np.std(imgs_train) # std for data normalization
imgs_train -= mean
imgs_train /= std
imgs_label_train = imgs_label_train.astype('float32')
imgs_label_train /= 255. # scale masks to [0, 1]
print('-' * 30)
print('Creating and compiling model...')
print('-' * 30)
log_filepath = '/logs'
model = FCN32()
model_checkpoint = ModelCheckpoint('fcn32_weights.{epochs:02d-{dice_coef:.2f}}.h5', monitor='val_loss',
save_best_only=True)
tb_cb = TensorBoard(log_dir=log_filepath, write_images=False, histogram_freq=1, write_graph=True)
print('-' * 30)
print('Fitting model...')
print('-' * 30)
model.fit(imgs_train, imgs_label_train, batch_size=10, epochs=10, verbose=1, shuffle=True,
validation_split=0.02,
callbacks=[model_checkpoint, tb_cb])
print('-' * 30)
print('Loading test data...')
print('-' * 30)
imgs_test = np.load('test_data.npy')
imgs_test = imgs_test.reshape(100, 512, 512, 1)
imgs_test = imgs_test.astype('float32')
imgs_test -= mean
imgs_test /= std
print('-' * 30)
print('Loading saved weights...')
print('-' * 30)
model.load_weights('fcn32_weights.h5')
print('-' * 30)
print('Predicting masks on test data...')
print('-' * 30)
imgs_mask_test = model.predict(imgs_test, batch_size=10, verbose=1)
np.save('imgs_mask_test.npy', imgs_mask_test)
if __name__ == '__main__':
train_and_predict()
endtime = time.clock()
print("The train_and_predict running time is %g s" % (endtime - starttime))
Since you get the error at the end of first epoch, it may be due to your ModelCheckpoint callback: you have placed the first } at the wrong place!
Try
model_checkpoint = ModelCheckpoint('fcn32_weights.{epoch:02d}-{val_loss:.2f}.h5', monitor='val_loss', save_best_only=True)

Categories

Resources