ValueError: Failed to find data adapter that can - python

import numpy as np
import keras
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, Input, Add, BatchNormalization, Lambda, Concatenate, Reshape
from keras.models import Model
import tensorflow as tf
import os
import pandas as pd
import pydicom as dicom
import matplotlib.pyplot as plt
import math
import pydicom
import cv2
import os
def resize_dcm(dcmdosyası, boyut):
dcmgörsel = pydicom.read_file(dcmdosyası)
img = dcmgörsel.pixel_array
img = cv2.resize(img, boyut, interpolation = cv2.INTER_AREA)
dcmgörsel.PixelData = img.tobytes()
dcmgörsel.Rows, dcmgörsel.Columns = img.shape
return dcmgörsel
folder = 'C:/Users/USER/Desktop/data/'
ana1 = [f.path for f in os.scandir(folder) if f.is_dir()]
for ana2 in ana1:
görseller = [f.path for f in os.scandir(ana2) if f.is_file()]
for görsellercücük in görseller:
resize2 = resize_dcm(görsellercücük, (800, 800))
yeni = os.path.join(ana2, os.path.basename(görsellercücük))
pydicom.write_file(yeni, resize2)
inputs = Input(shape=(800,800,1))
input2 = Input(shape=(1))
input3 = Input(shape=(1))
x = Conv2D(64, (3, 3), activation='relu', padding='same')(inputs)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = Dropout(0.5)(x)
x = Conv2D(128, (3, 3), activation='relu', strides=2, padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3, 3), activation='relu', strides=2, padding='same')(x)
x = BatchNormalization()(x)
res1 = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
res2 = Conv2D(128, (3, 3), activation='relu', padding='same')(res1)
x = Add()([x, res2])
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = Dropout(0.5)(x)
x = Conv2D(256, (3, 3), activation='relu', strides=2, padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(256, (3, 3), activation='relu', strides=2, padding='same')(x)
x = BatchNormalization()(x)
res1 = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
res2 = Conv2D(256, (3, 3), activation='relu', padding='same')(res1)
x = Add()([x, res2])
res1 = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
res2 = Conv2D(256, (3, 3), activation='relu', padding='same')(res1)
x = Add()([x, res2])
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = Conv2D(512, (3, 3), activation='relu', strides=2, padding='same')(x)
x = BatchNormalization()(x)
x = Conv2D(512, (3, 3), activation='relu', strides=2, padding='same')(x)
x = BatchNormalization()(x)
res1 = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
res2 = Conv2D(512, (3, 3), activation='relu', padding='same')(res1)
x = Add()([x, res2])
res1 = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
res2 = Conv2D(512, (3, 3), activation='relu', padding='same')(res1)
x = Add()([x, res2])
x = Flatten()(x)
x = Concatenate()([x,input2,input3])
x = Dense(4096, activation='relu')(x)
x = Dense(4096, activation='relu')(x)
output = Dense(4, activation='softmax')(x) #birads score
output2 = Dense(4, activation='softmax')(x) #kompozisyon
output3 = Dense(2, activation='softmax')(x) #bolge 1
output4 = Dense(2, activation='softmax')(x) #bolge 2
output5 = Dense(2, activation='softmax')(x) #bolge 3
model = Model([inputs, input2,input3], [output,output2, output3])
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
data = pd.read_excel('C:/Users/USER/Desktop/asdsadasdas.xlsx')
data.rename(columns={'HASTANO': 'ID', 'BIRADS KATEGORİSİ':'birads', 'MEME KOMPOZİSYONU': 'comp', 'KADRAN BİLGİSİ (SAĞ)': 'areaR', 'KADRAN BİLGİSİ (SOL)': 'areaL'}, inplace=True)
data.replace(['BI-RADS0', 'BI-RADS1-2', 'BI-RADS4-5'], [0,1,2], inplace=True)
data.drop(data.columns[5], inplace=True, axis=1)
data['comp'].value_counts()
data.fillna(0, inplace=True)
def transform_to_hu(medical_image, image):
intercept = medical_image.RescaleIntercept
slope = medical_image.RescaleSlope
hu_image = image * slope + intercept
return hu_image
def train():
mainDir = 'C:/Users/USER/Desktop/data/'
for d in data.iterrows():
patientDir = mainDir + str(d[1][0])
for f in os.listdir(patientDir):
yon = 1 if f[0] == 'R' else 0
view = 1 if f[1] == 'M' else 0
dcm = dicom.dcmread(patientDir + '/' + f)
image = transform_to_hu(dcm, dcm.pixel_array)
o1 = 0
o2 = 0
o3 = 0
areaStr = d[1]['area' + ('R' if yon == 1 else 'L')]
if areaStr!= 0:
areaStr = areaStr.replace('[', '').replace(']', '')
lst = areaStr.split(',')
newLst = list(map(lambda x : x.replace('"', ''), lst))
print(newLst)
for a in newLst:
if(a == 'MERKEZ'):
o2 = 1
continue
t = a.split(' ')
if(view == 1): #mlo
if t[0] == 'ÜST':
o1 = 1
else:
o3 = 1
else:
if t[1] == 'DIŞ':
o1 = 1
else:
o3 = 1
print(o1, o2, o3, sep=', ')
model.fit([image, yon, view], [d[1]['birads'], d[1]['comp'], o1, o2, o3])
break
train()
Error:
Traceback (most recent call last):
File "C:\Users\USER\Desktop\aaac.py", line 157, in <module>
train()
File "C:\Users\USER\Desktop\aaac.py", line 155, in train
model.fit([image, yon, view], [d[1]['birads'], d[1]['comp'], o1, o2, o3])
File "C:\Users\USER\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\keras\utils\traceback_utils.py", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\USER\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\keras\engine\data_adapter.py", line 1081, in select_data_adapter
raise ValueError(
ValueError: Failed to find data adapter that can handle input: (<class 'list'> containing values of types {"<class 'numpy.ndarray'>", "<class 'int'>"}), (<class 'list'> containing values of types {"<class 'str'>", "<class 'int'>"})
Process finished with exit code 1
hello i wrote a deep learning code. At first, I shaped the code in accordance with the resnet architecture, and then I tried to obtain an output by combining the data on excel and visual. I have encountered such an error, I do not know why.
![Topluluk Tarafından Doğrulandı simgesi](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAQAAAAngNWGAAAA/0lEQVR4AYXNMSiEcRyA4cfmGHQbCZIipkuxnJgMStlMNmeyD2dwmc8+sZgxYJd9ErIZFHUyYYD7fkr6l4/rnvmtl7+KitrqV/fq2Y5eLY3Z9S48eRLe7BmVZ9qhTLhQ0algzZWQOVKSsCF8OjAnwbxDTWFDUhPK/jMr1H6HE/IqRky2DyvCefuwItwZzodVoYRiLqMkVCXrwpJ9twZ+sgfDYEFYl8wIWxZ9uFf7zkallxlJh4YrLGsKjZRx7VGHhLqwgFUN45DGdb8MeXGpgB4ABZdeDcpZEY51A+hyLKz4S1W4MQWm3AibWtgWmk6dyISa1pSdyWTOlLXVp0+eL9D/ZPfBTNanAAAAAElFTkSuQmCC)

Related

having an array pass problem for multi scale can network

I am dealing with audio data (.wav) files, I have created two CNN network to pass the data. so the two models at the end will use the function of Concatenate the output of these two models and give out the classification. but I am having problem for training it, it says the model expects to see 2 arrays but it got one array. any one who can help me with this problem I will appreciate, I have been struggling for weeks.
this is my config file
class Config:
def __init__(self, mode='conv', nfilt=26, nfeat=40, nfft=4096, rate=16000):
self.mode = mode
self.nfilt = nfilt
self.nfeat = nfeat
self.nfft = nfft
self.rate = rate
self.step = int(rate / 10)
self.model_path = os.path.join('models', mode + '.model')
self.p_path = os.path.join('pickles', mode + '.p')
I build one function for my mfcc
build a function for build
def build_rand_feat():
X = []
y = []
_min, _max = float('inf'), -float('inf')
for _ in tqdm(range(n_sample)):
rand_class = np.random.choice(class_dist.index, p=prob_dist)
file = np.random.choice(df[df.label == rand_class].index)
data, rate = sf.read('audios/' + file)
label = df.at[file, 'label']
rand_index = np.random.randint(0, data.shape[0] - config.step)
sample = data[rand_index:rand_index + config.step]
X_sample = mfcc(sample, rate, winlen=0.05, winstep=0.02, numcep=config.nfeat, nfilt=config.nfilt, nfft=config.nfft)
_min = min(np.amin(X_sample), _min)
_max = max(np.amax(X_sample), _max)
X.append(X_sample)
y.append(classes.index(label))
config.min = _min
X = np.array(X)
y = np.array(y)
X = (X - _min) / (_max - _min)
if config.mode == 'conv':
X = X.reshape(X.shape[0], X.shape[1], X.shape[2], 1)
print(X.shape)
elif config.mode == 'time':
X = X.reshape(X.shape[0], X.shape[1], X.shape[2])
y = to_categorical(y, num_classes=10)
config.data = (X, y)
return X, y
and this is my multi scale network
def get_conv_model():
main_model = Sequential()
main_model.add(Conv2D(128, (3, 3), activation='relu', strides=(1, 1), padding='same', input_shape=input_shape))
main_model.add(MaxPool2D((2, 2)))
main_model.add(Conv2D(128, (3, 3), activation='relu', strides=(1, 1), padding='same'))
main_model.add(MaxPool2D((2, 2), padding='same'))
main_model.add(Flatten())
second_model = Sequential()
second_model.add(Conv2D(256, (3, 3), activation='relu', strides=(1, 1), padding='same', input_shape=input_shape))
second_model.add(MaxPool2D((2, 2), padding='same'))
second_model.add(Conv2D(256, (3, 3), activation='relu', strides=(1, 1), padding='same'))
second_model.add(MaxPool2D((2, 2)))
second_model.add(Flatten())
# first model upper
main_model = Sequential()
main_model.add(Conv2D(64, (3, 3), activation='relu', strides=(1, 1), padding='same', input_shape=input_shape))
main_model.add(BatchNormalization())
main_model.add(Dropout(0.3))
main_model.add(Conv2D(128, (3, 3), activation='relu', strides=(1, 1), padding='same'))
main_model.add(BatchNormalization())
main_model.add(Dropout(0.3))
main_model.add(Conv2D(256, (3, 3), activation='relu', strides=(1, 1), padding='same'))
main_model.add(BatchNormalization())
main_model.add(Dropout(0.3))
main_model.add(Flatten())
# second model lower
lower_model1 = Sequential()
lower_model1.add(MaxPool2D(strides=(1, 1), padding='same', input_shape=input_shape))
lower_model1.add(Conv2D(128, (3, 3), activation='relu', strides=(1, 1), padding='same', input_shape=input_shape))
lower_model1.add(BatchNormalization())
lower_model1.add(Dropout(0.3))
lower_model1.add(Conv2D(256, (3, 3), activation='relu', strides=(1, 1), padding='same'))
lower_model1.add(BatchNormalization())
lower_model1.add(Dropout(0.3))
lower_model1.add(Conv2D(512, (3, 3), activation='relu', strides=(1, 1), padding='same'))
lower_model1.add(Flatten())
# merged models
merged_model = Concatenate()([main_model.output, lower_model1.output])
x = Dense(256, activation='relu')(merged_model)
x = Dropout(0.3)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.3)(x)
x = Dense(128, activation='relu')(x)
x = Dropout(0.3)(x)
output = Dense(10, activation='softmax')(x)
final_model = Model(inputs=[main_model.input, lower_model1.input], outputs=[output])
final_model.summary()
final_model.compile(loss="categorical_crossentropy", optimizer=Adam(0.001), metrics=['acc'])
print(K.eval(final_model.optimizer.lr))
#class_weight = compute_class_weight('balanced', np.unique(y_flat), y_flat)
final_model.fit(X,y, epochs=10, batch_size=64, shuffle=True, validation_split=0.3)
return main_model, lower_model1, merged_model

Training loss is decreasing but validation loss is not

I'm trying to do semantic segmentation on skin lesion. I used SegNet as my model. But after running this model, training loss was decreasing but validation loss was not decreasing. i.e. overfitting problem is occured. I tuned learning rate many times and reduced number of number dense layer but no solution came. I also used dropout but still overfitting is happening.
Here is the graph
Here is the code of my model:
def segnet(input_size=(512, 512, 1)):
# Encoding layer
img_input = Input(input_size)
x = Conv2D(64, (3, 3), padding='same', name='conv1',strides= (1,1))(img_input)
x = BatchNormalization(name='bn1')(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3), padding='same', name='conv2')(x)
x = BatchNormalization(name='bn2')(x)
x = Activation('relu')(x)
x = MaxPooling2D()(x)
x = Dropout(0.7)(x)
x = Conv2D(128, (3, 3), padding='same', name='conv3')(x)
x = BatchNormalization(name='bn3')(x)
x = Activation('relu')(x)
x = Conv2D(128, (3, 3), padding='same', name='conv4')(x)
x = BatchNormalization(name='bn4')(x)
x = Activation('relu')(x)
x = MaxPooling2D()(x)
x = Dropout(0.7)(x)
x = Conv2D(256, (3, 3), padding='same', name='conv5')(x)
x = BatchNormalization(name='bn5')(x)
x = Activation('relu')(x)
x = Conv2D(256, (3, 3), padding='same', name='conv6')(x)
x = BatchNormalization(name='bn6')(x)
x = Activation('relu')(x)
x = Conv2D(256, (3, 3), padding='same', name='conv7')(x)
x = BatchNormalization(name='bn7')(x)
x = Activation('relu')(x)
x = MaxPooling2D()(x)
x = Dropout(0.7)(x)
x = Conv2D(512, (3, 3), padding='same', name='conv8')(x)
x = BatchNormalization(name='bn8')(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), padding='same', name='conv9')(x)
x = BatchNormalization(name='bn9')(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), padding='same', name='conv10')(x)
x = BatchNormalization(name='bn10')(x)
x = Activation('relu')(x)
x = MaxPooling2D()(x)
x = Dropout(0.7)(x)
x = Conv2D(512, (3, 3), padding='same', name='conv11')(x)
x = BatchNormalization(name='bn11')(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), padding='same', name='conv12')(x)
x = BatchNormalization(name='bn12')(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), padding='same', name='conv13')(x)
x = BatchNormalization(name='bn13')(x)
x = Activation('relu')(x)
x = MaxPooling2D()(x)
x = Dropout(0.7)(x)
x = Dense(256, activation = 'relu', name='fc1')(x)
x = Dense(256, activation = 'relu', name='fc2')(x)
# Decoding Layer
x = UpSampling2D()(x)
x = Conv2DTranspose(512, (3, 3), padding='same', name='deconv1')(x)
x = BatchNormalization(name='bn14')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(512, (3, 3), padding='same', name='deconv2')(x)
x = BatchNormalization(name='bn15')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(512, (3, 3), padding='same', name='deconv3')(x)
x = BatchNormalization(name='bn16')(x)
x = Activation('relu')(x)
x = Dropout(0.7)(x)
x = UpSampling2D()(x)
x = Conv2DTranspose(512, (3, 3), padding='same', name='deconv4')(x)
x = BatchNormalization(name='bn17')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(512, (3, 3), padding='same', name='deconv5')(x)
x = BatchNormalization(name='bn18')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(256, (3, 3), padding='same', name='deconv6')(x)
x = BatchNormalization(name='bn19')(x)
x = Activation('relu')(x)
x = Dropout(0.7)(x)
x = UpSampling2D()(x)
x = Conv2DTranspose(256, (3, 3), padding='same', name='deconv7')(x)
x = BatchNormalization(name='bn20')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(256, (3, 3), padding='same', name='deconv8')(x)
x = BatchNormalization(name='bn21')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(128, (3, 3), padding='same', name='deconv9')(x)
x = BatchNormalization(name='bn22')(x)
x = Activation('relu')(x)
x = Dropout(0.7)(x)
x = UpSampling2D()(x)
x = Conv2DTranspose(128, (3, 3), padding='same', name='deconv10')(x)
x = BatchNormalization(name='bn23')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(64, (3, 3), padding='same', name='deconv11')(x)
x = BatchNormalization(name='bn24')(x)
x = Activation('relu')(x)
x = Dropout(0.7)(x)
x = UpSampling2D()(x)
x = Conv2DTranspose(64, (3, 3), padding='same', name='deconv12')(x)
x = BatchNormalization(name='bn25')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(1, (3, 3), padding='same', name='deconv13')(x)
x = BatchNormalization(name='bn26')(x)
x = Activation('sigmoid')(x)
pred = Reshape((input_size[0], input_size[1]))(x)
return Model(inputs=img_input, outputs=pred)
I have used the same dataset for another modle UNet but there was no overfit for UNet. This is giving overfit only for SegNet model.
For more information :
model = segnet(input_size = (224, 224, INPUT_CHANNELS))
model.compile(optimizer= Adam(lr=1e-5), loss= [dice_coef_loss]
, metrics=[iou, dice_coe, precision, recall, accuracy])
model_checkpoint = ModelCheckpoint(str(j+1) + '_skin_leison.hdf5',
monitor='loss',
verbose=1,
save_best_only=True)
callbacks_list = [model_checkpoint]
history = model.fit(X_train_cv,
y_train_cv,
epochs= 70,
callbacks = callbacks_list,
batch_size= 8,
validation_data=(X_valid_cv, y_valid_cv))

Keras Output tensors to a Model must be the output of a Keras `Layer` (thus holding past layer metadata)

I am trying to implement unpooling masks in Keras. I have a VGG encoder that outputs a specific feature map like relu5_1 and a list of unpooling masks.
def VGG19(input_tensor=None, input_shape=None, target_layer=1):
"""
VGG19, up to the target layer (1 for relu1_1, 2 for relu2_1, etc.)
"""
if input_tensor is None:
inputs = Input(shape=input_shape)
else:
inputs = Input(tensor=input_tensor, shape=input_shape)
layer, unpooling_masks = vgg_layers(inputs, target_layer)
model = Model(inputs, [layer, unpooling_masks], name='vgg19')
load_weights(model)
return model, unpooling_masks
def vgg_layers(inputs, target_layer):
unpooling_masks = []
# Block 1
x_b1 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(inputs)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x_b1)
before_pooling = x
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
unpooling_masks.append(make_unpooling_mask(x, before_pooling))
# Block 2
x_b2 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x_b2)
before_pooling = x
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
unpooling_masks.append(make_unpooling_mask(x, before_pooling))
# Block 3
x_b3 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x_b3)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x)
before_pooling = x
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
unpooling_masks.append(make_unpooling_mask(x, before_pooling))
# Block 4
x_b4 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x_b4)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x)
before_pooling = x
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
unpooling_masks.append(make_unpooling_mask(x, before_pooling))
# Block 5
x_b5 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
if target_layer == 5:
return x_b5, unpooling_masks
elif target_layer == 4:
return x_b4, unpooling_masks
elif target_layer == 3:
return x_b3, unpooling_masks
elif target_layer == 2:
return x_b2, unpooling_masks
elif target_layer == 1:
return x_b1, unpooling_masks
This is the unpooling function
def make_unpooling_mask(x, before_pooling):
t = UpSampling2D()(x)
mask = Lambda(lambda x: K.cast(K.greater(x[0],x[1]), dtype='float32'))([t, before_pooling])
return mask
I am getting this error
Exception has occurred: ValueError Output tensors to a Model must be
the output of a Keras Layer (thus holding past layer metadata).
Found: [<tf.Tensor 'lambda_1/Cast:0' shape=(?, 256, 256, 64)
dtype=float32>, <tf.Tensor 'lambda_2/Cast:0' shape=(?, 128, 128, 128)
dtype=float32>, <tf.Tensor 'lambda_3/Cast:0' shape=(?, 64, 64, 256)
dtype=float32>, <tf.Tensor 'lambda_4/Cast:0' shape=(?, 32, 32, 512)
dtype=float32>]
This happens when at the line that compiles the model model = Model(inputs, [layer, unpooling_masks], name='vgg19')
What can be done?
When invoking the Model API, the value for outputs argument should be tensor(or list of tensors), in this case it is a list of list of tensors, hence there is a problem. Just unpack the unpooling_masks list(*unpooling_masks) when calling Model.
model = Model(inputs, [layer, *unpooling_masks], name='vgg19')

Create Bagnet on Keras

I am currently trying to implement a Bagnet on keras following this article :https://www.lyrn.ai/2019/02/14/bagnet-imagenet-with-a-simple-bof-model/
I am building one model to evaluate a patch and another to evaluate the averaging of all my patchs. And I try to ensemble them with an input: (number of patchs,x_patch,y_patch,channel_patch). But it isnt working well. My main issues come from evaluating all of the patchs and average the result.
Here is my code. If anyone has suggestion on how to deal with that.
def build_patch_model(lr, l2,patch_number):
##############
# BRANCH MODEL
##############
regul = regularizers.l2(l2)
optim = Adam(lr=lr)
kwargs = {'padding':'same', 'kernel_regularizer':regul}
inp = Input(shape=patch_size) # patch qxqx3
#x = MaxPooling2D((2, 2), strides=(2, 2))(inp) # 24x24x128
#x = BatchNormalization()(x)
x = Conv2D(32, (1,1), activation='relu', **kwargs)(inp)
for _ in range(6): x = subblock(x, 32, **kwargs)
x = MaxPooling2D((2, 2), strides=(2, 2))(x) # 24x24x128
x = BatchNormalization()(x)
x = Conv2D(64, (1,1), activation='relu', **kwargs)(x)
for _ in range(6): x = subblock(x, 64, **kwargs)
x = MaxPooling2D((2, 2), strides=(2, 2))(x) # 24x24x128
x = BatchNormalization()(x)
x = Conv2D(128, (1,1), activation='relu', **kwargs)(x)
for _ in range(6): x = subblock(x, 128, **kwargs)
#x = MaxPooling2D((2, 2), strides=(2, 2))(x) # 24x24x128
#x = BatchNormalization()(x)
x = Conv2D(256, (1,1), activation='relu', **kwargs)(x)
for _ in range(6): x = subblock(x, 256, **kwargs)
x = MaxPooling2D((2, 2), strides=(2, 2))(x) # 24x24x128
x = BatchNormalization()(x)
x = Conv2D(512, (1,1), activation='relu', **kwargs)(x)
for _ in range(6): x = subblock(x, 512, **kwargs)
#x = MaxPooling2D((2, 2), strides=(2, 2))(x) # 24x24x128
#x = BatchNormalization()(x)
x = Conv2D(1080, (1,1), activation='relu', **kwargs)(x)
for _ in range(6): x = subblock(x, 1080, **kwargs)
x = MaxPooling2D((2, 2), strides=(2, 2))(x) # 24x24x128
x = BatchNormalization()(x)
x = Conv2D(2048, (1,1), activation='relu', **kwargs)(x)
for _ in range(12): x = subblock(x, 2048, **kwargs)
x = GlobalMaxPooling2D()(x) # 2048
y = Dense(5004, use_bias=False, activation="softmax", name='patch_heatmap')(x)
branch_model = Model(inp, y)
branch_model.compile(optim, loss='categorical_crossentropy', metrics=['categorical_crossentropy', 'acc'])
return branch_model
build_averaging_model():
##############
# HEAD MODEL #
##############
#x = Average()(input_head)
inp = Input(shape=(5004,))
x = Dense(5004,use_bias=False, activation="softmax", name='patch_sum')(inp)
head_model = Model(inp, x, name='head')
#x = head_model([xa, xb])
return head_model
def build_model(head_model,branch_model):
inp = Input(shape=(patch_number,patch_size[0],patch_size[1],patch_size[2]))
input_head = []
for i in range(patch_number):
input_head.append(branch_model(inp[i,:,:,:]))
x = Average()(input_head)
x = head_model(x)
model = Model(inp,x)
return model

Error in Image Segmentation using Unet and Keras

I am using a Unet model for satellite image segmentation with inputs 512x512x3. But on executing the model i am getting the following error:
ValueError: Cannot feed value of shape (3, 512, 512) for Tensor 'conv2d_19_target:0', which has shape '(?, ?, ?, ?)'. the code for the Unet model is :
from __future__ import print_function
import os
from skimage.transform import resize
from skimage.io import imsave
import numpy as np
from keras.models import Model
from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from data import load_train_data, load_test_data
K.set_image_data_format('channels_last') # TF dimension ordering in this code
img_rows = 512
img_cols = 512
image_channels=3
smooth = 1.
OUTPUT_MASK_CHANNELS = 1
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def get_unet():
inputs = Input((img_rows, img_cols, 3))
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)
conv_final = Conv2D(OUTPUT_MASK_CHANNELS, (1, 1),activation='sigmoid')(conv9)
#conv_final = Activation('sigmoid')(conv_final)
model = Model(inputs, conv_final, name="ZF_UNET_224")
#conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)
#model = Model(inputs=[inputs], outputs=[conv10])
model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])
return model
def preprocess(imgs):
imgs_p = np.ndarray((imgs.shape[0], img_rows, img_cols), dtype=np.uint8)
for i in range(imgs.shape[0]):
imgs_p[i] = resize(imgs[i], (img_cols, img_rows), preserve_range=True)
imgs_p = imgs_p[..., np.newaxis]
return imgs_p
def train_and_predict():
print('-'*30)
print('Loading and preprocessing train data...')
print('-'*30)
imgs_train, imgs_mask_train = load_train_data()
#imgs_train = preprocess(imgs_train)
#imgs_mask_train = preprocess(imgs_mask_train)
imgs_train = imgs_train.astype('float32')
mean = np.mean(imgs_train) # mean for data centering
std = np.std(imgs_train) # std for data normalization
imgs_train -= mean
imgs_train /= std
imgs_mask_train = imgs_mask_train.astype('float32')
imgs_mask_train /= 255. # scale masks to [0, 1]
print('-'*30)
print('Creating and compiling model...')
print('-'*30)
model = get_unet()
model_checkpoint = ModelCheckpoint('weights.h5', monitor='val_loss', save_best_only=True)
print('-'*30)
print('Fitting model...')
print('-'*30)
model.fit(imgs_train, imgs_mask_train, batch_size=3, epochs=20, verbose=2, shuffle=True,
validation_split=0.2,
callbacks=[model_checkpoint])
print('-'*30)
print('Loading and preprocessing test data...')
print('-'*30)
imgs_test, imgs_id_test = load_test_data()
imgs_test = preprocess(imgs_test)
imgs_test = imgs_test.astype('float32')
imgs_test -= mean
imgs_test /= std
print('-'*30)
print('Loading saved weights...')
print('-'*30)
model.load_weights('weights.h5')
print('-'*30)
print('Predicting masks on test data...')
print('-'*30)
imgs_mask_test = model.predict(imgs_test, verbose=1)
np.save('imgs_mask_test.npy', imgs_mask_test)
print('-' * 30)
print('Saving predicted masks to files...')
print('-' * 30)
pred_dir = 'preds'
if not os.path.exists(pred_dir):
os.mkdir(pred_dir)
for image, image_id in zip(imgs_mask_test, imgs_id_test):
image = (image[:, :, 0] * 255.).astype(np.uint8)
imsave(os.path.join(pred_dir, str(image_id) + '_pred.png'), image)
if __name__ == '__main__':
train_and_predict()
The error traceback is as follows:
File "/home/deeplearning/Downloads/Models/ultrasound-nerve-segmentation-master/train.py", line 158, in <module> train_and_predict()
File "/home/deeplearning/Downloads/Models/ultrasound-nerve-segmentation-master/train.py", line 124, in train_and_predict callbacks=[model_checkpoint])
File "/home/deeplearning/anaconda3/envs/myenv/lib/python3.6/site-packages/keras/engine/training.py", line 1037, in fit
validation_steps=validation_steps)
File "/home/deeplearning/anaconda3/envs/myenv/lib/python3.6/site-packages/keras/engine/training_arrays.py", line 199, in fit_loop
outs = f(ins_batch)
File "/home/deeplearning/anaconda3/envs/myenv/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2672, in __call__
return self._legacy_call(inputs)
File "/home/deeplearning/anaconda3/envs/myenv/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2654, in _legacy_call
**self.session_kwargs)
File "/home/deeplearning/anaconda3/envs/myenv/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 767, in run
run_metadata_ptr)
File "/home/deeplearning/anaconda3/envs/myenv/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 944, in _run
% (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
ValueError: Cannot feed value of shape (3, 512, 512) for Tensor 'conv2d_19_target:0', which has shape '(?, ?, ?, ?)'
Plz help me finding what wrong is going in it
You set K.set_image_data_format('channels_last'), but your input image (3 X 512 X 512) has channels first. Either change to K.set_image_data_format('channels_first')(which may not work for the UNET), or permute the dimensions of your input image with np.tranpose to have the input shape (512,512,3).

Categories

Resources