I'm trying to create a densenet but when I try to compile the model I get this error message. Here's my model:
from tensorflow import keras
from keras.utils import plot_model
dropoutRate = 0.2
def globalAvgPooling(x):
height = np.shape(x)[2]
width = np.shape(x)[1]
poolSize = [width, height]
return tf.keras.layers.AveragePooling2D(pool_size=poolSize, strides=1)(x)
def concatenation(layers):
return tf.keras.layers.concatenate(layers, axis=3)
class DenseNet():
def __init__(self, filters, numBlocks, numClasses, training):
self.filters = filters
self.numBlocks = numBlocks
self.training = training
self.numClasses = numClasses
self.model = self.denseNet()
def bottleneckLayer(self, inputX):
x = tf.keras.layers.BatchNormalization()(inputs=inputX, training=self.training)
x = tf.keras.activations.relu(x)
x = tf.keras.layers.Conv2D(use_bias=False, filters=self.filters, kernel_size=1, strides=1, padding='same')(x)
x = tf.layers.dropout(inputs=x, rate=dropoutRate, training=self.training)
x = tf.keras.layers.BatchNormalization()(inputs=inputX, training=self.training)
x = tf.keras.activations.relu(x)
x = tf.keras.layers.Conv2D(use_bias=False, filters=self.filters, kernel_size=3, strides=1, padding='same')(x)
x = tf.layers.dropout(inputs=x, rate=dropoutRate, training=self.training)
return x
def denseBlock(self, inputX, numLayers):
concatLayers = list()
concatLayers.append(inputX)
x = self.bottleneckLayer(inputX=inputX)
concatLayers.append(x)
for i in range(self.numBlocks - 1):
x = concatenation(concatLayers)
x = self.bottleneckLayer(inputX=x)
concatLayers.append(x)
x = concatenation(concatLayers)
return x
def transitionLayer(self, inputX):
x = tf.keras.layers.BatchNormalization()(inputs=inputX, training=self.training)
x = tf.keras.activations.relu(x)
x = tf.keras.layers.Conv2D(use_bias=False, filters=self.filters, kernel_size=1, strides=1, padding='same')(x)
x = tf.layers.dropout(inputs=x, rate=dropoutRate, training=self.training)
x = tf.keras.layers.AveragePooling2D(pool_size=[2,2], strides=2, padding='valid')(x)
return x
def denseNet(self):
inputs = keras.Input(shape=(32,32,3))
x = tf.keras.layers.Conv2D(use_bias=False, filters=self.filters, kernel_size=7, strides=1, padding='same')(inputs)
x = self.denseBlock(inputX=x, numLayers=6) #Dense block 1 with 6 layers
x = self.transitionLayer(inputX=x)
x = self.denseBlock(inputX=x, numLayers=12) #Dense block 2 with 12 layers
x = self.transitionLayer(inputX=x)
x = self.denseBlock(inputX=x, numLayers= 48) #Dense block 3 with 48 layers
x = self.transitionLayer(inputX=x)
x = self.denseBlock(inputX=x, numLayers=32) #Dense block 4 with 32 layers (final block)
x = globalAvgPooling(x=x)
x = tf.keras.layers.Softmax()(x)
outputs = tf.keras.layers.Dense(units=self.numClasses)(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return x
tf.compat.v1.disable_eager_execution()
growthK = 24
numBlock = 2
cameraModel = DenseNet(filters=growthK, numBlocks=numBlock, numClasses=4, training=True).model
Here is the error message I get:
ValueError: Graph disconnected: cannot obtain value for tensor
Tensor("dropout_109/dropout/mul_1:0", shape=(?, 32, 32, 24), dtype=float32)
at layer "concatenate_48". The following previous layers were accessed without issue:
['input_6', 'conv2d_120', 'batch_normalization_115', 'tf_op_layer_Relu_115', 'conv2d_122', 'dropout_108']
What am I doing wrong?
The error comes from the line:
x = tf.keras.layers.BatchNormalization()(inputs=inputX, training=self.training)
as I think you have a bad inputs parameter in it.
It should looks like:
x = tf.keras.layers.BatchNormalization()(inputs=x, training=self.training)
That is why value the Graph disconnected.
Related
I am launching the following neural network, and I suspect that there is a mistake with some variables (especially in the MeanShift function) with the initialization of weights and biases.
################
##### RCAN #####
################
def RCAN(n_RCAB=3, n_RG=5, rgb_mean = [0.4488], rgb_std = 1, **kwargs):
def _func(input_shape):
import sys
sys.setrecursionlimit(25000)
# Start of the Model
inputs = Input(input_shape)
scaled_inputs = tf.keras.layers.experimental.preprocessing.Rescaling(255)(inputs)
scaled_inputs = MeanShift(rgb_range=255, rgb_mean=rgb_mean, rgb_std=rgb_std)(inputs)
# Head module
x = Conv2D(64, 3, padding='same')(scaled_inputs)
_inp = x
# Body module
for i in range(n_RG):
x = ResidualGroup(x, n_RCAB)
x = Conv2D(64, 3, padding='same')(x)
add_global = tf.keras.layers.Add()([x, _inp])
# Tail module
x = UpSampling(add_global)
x = Conv2D(1, 3, padding='same')(x)
outputs = MeanShift(rgb_range=255, rgb_mean=rgb_mean, rgb_std=rgb_std, sign=1)(x)
outputs = tf.keras.layers.experimental.preprocessing.Rescaling(1.0 / 255)(outputs)
# Defining the model
model = Model(inputs=inputs, outputs=outputs)
return model
return _func
######################
##### ABOUT RCAN #####
######################
def RCAB(x):
_res = x
x = Conv2D(64, 3, padding='same')(x)
x = tf.keras.layers.LeakyReLU()(x)
x = Conv2D(64, 3, padding='same')(x)
x = ChannelAttention(x)
x = tf.keras.layers.Add()([x, _res])
return x
def ChannelAttention(x):
_res = x
avg_pool = tf.math.reduce_mean(x, axis=[1, 2, 3], keepdims=True)
feat_mix = Conv2D(4, 1, padding='same', activation='relu')(avg_pool)
feat_mix = Conv2D(64, 1, padding='same', activation='sigmoid')(feat_mix)
multi = tf.keras.layers.Multiply()([feat_mix, _res])
return multi
def ResidualGroup(x, n_RCAB):
skip_connection = x
for i in range(n_RCAB):
x = RCAB(x)
x = Conv2D(64, 3, padding='same')(x)
x = tf.keras.layers.Add()([x, skip_connection])
return x
def MeanShift(rgb_range, rgb_mean, rgb_std, sign=-1):
def _func(x):
# Initilize weights
weight_initer = tf.constant(value=1., dtype=tf.float32, shape=[1])
W = tf.Variable(weight_initer, name="Weight", dtype=tf.float32)
# Initilize bias
bias_initer = tf.constant(value=[m * rgb_range * sign / rgb_std for m in rgb_mean], dtype=tf.float32)
b = tf.Variable(bias_initer, name="Bias", dtype=tf.float32)
x = Conv2D(1, 1, padding="same", kernel_initializer=keras.initializers.Constant(W), bias_initializer=keras.initializers.Constant(b), activation="relu")(x)
return x
return _func
def UpSampling(x, act=False):
features = Conv2D(256, 3, padding='same')(x)
high_res = tf.nn.depth_to_space(features, 2)
if act:
high_res = tf.keras.layers.ReLU(max_value=1)(high_res)
return high_res
The error is when saving the weights (with checkpoints every 5 epochs), but the update of any version (2.4 or 2.9) of tensorflow isn't working.
ValueError: Unable to serialize VariableSpec(shape=(1,),
dtype=tf.float32, trainable=True, alias_id=None) to JSON, because the
TypeSpec class <class
'tensorflow.python.ops.resource_variable_ops.VariableSpec'> has not
been registered
Moreover, my inputs and target are percentile normalized. And I would like to know what layers or operations to add so that predictions matches best the targets (on the variables scaled_inputs and outputs of RCAN function for example).
I would appreciate any help. Thank you in advance.
EDIT:
Ok, found the problem: i think that kernel and bias were not trainable due to keras.initializers.Constant(W) (and same for b) in the Conv2D layer, or due to the tensorflow initializer (I was a bit lost because multiple ones exist). So I replaced it by: and it works
def MeanShift(rgb_range, rgb_mean, rgb_std, sign=-1):
def _func(x):
# Initilize weights and bias
W = tf.keras.initializers.Constant(1.)
b = tf.keras.initializers.Constant(rgb_mean[0] * rgb_range * sign / rgb_std)
x = Conv2D(1, 1, padding="same", kernel_initializer=W, bias_initializer=b)(x)
return x
return _func
I want to combine Resnet and LSTM model, but here I got some problem such Input 0 of layer "lstm_7" is incompatible with the layer: expected ndim=3, found ndim=2. Full shape received: (None, 64). How to solve the problem.
here the code for combining the model
def identity_block(input_tensor,units):
x = layers.Dense(units)(input_tensor)
x = layers.Activation('relu')(x)
x = layers.Dense(units)(x)
x = layers.Activation('relu')(x)
x = layers.Dense(units)(x)
x = layers.add([x, input_tensor])
x = layers.Activation('relu')(x)
return x
`def dens_block(input_tensor,units):
x = layers.Dense(units)(input_tensor)
x = layers.Activation('relu')(x)
x = layers.Dense(units)(x)
x = layers.Activation('relu')(x)
x = layers.Dense(units)(x)
shortcut = layers.Dense(units)(input_tensor)
x = layers.add([x, shortcut])
x = layers.Activation('relu')(x)
return x`
def ResNet50Regression():
"""Instantiates the ResNet50 architecture.
# Arguments
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as input for the model.
# Returns
A Keras model instance.
"""
Res_input = layers.Input(shape=(89,))
width = 64
x = dens_block(Res_input,width)
x = identity_block(x,width)
x = identity_block(x,width)
x = dens_block(x,width)
x = identity_block(x,width)
x = identity_block(x,width)
x = dens_block(x,width)
x = identity_block(x,width)
x = identity_block(x,width)
# x = layers.BatchNormalization()(x)
# x = layers.Dense(3)(x)
model = models.Model(inputs=Res_input, outputs=x)
return model
model1 = ResNet50Regression()
model = Sequential()
model.add(model1)
model.add(LSTM(64,activation='relu', return_sequences= False))
model.add(Dense(512,activation='relu'))
model.add(Dense(128,activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss = 'mse', optimizer=Adam(learning_rate=0.002), metrics=['mse'])
model.summary()
I am training a VQVAE with this dataset (64x64x3). I have downloaded it locally and loaded it with keras in Jupyter notebook. The problem is that when I ran fit() to train the model I get this error: ValueError: Layer "vq_vae" expects 1 input(s), but it received 2 input tensors. Inputs received: [<tf.Tensor 'IteratorGetNext:0' shape=(None, 64, 64, 3) dtype=float32>, <tf.Tensor 'IteratorGetNext:1' shape=(None,) dtype=int32>] . I have taken most of the code from here and adapted it myself. But for some reason I can't make it work for other datasets. You can ignore most of the code here and check it in the page, help is much appreciated.
The code I have so far:
img_height = 64
img_width = 64
dataset = tf.keras.utils.image_dataset_from_directory(directory="PATH",
image_size=(64, 64), batch_size=64, shuffle=True)
normalization_layer = tf.keras.layers.Rescaling(1./255)
normalized_ds = dataset.map(lambda x, y: (normalization_layer(x), y))
AUTOTUNE = tf.data.AUTOTUNE
train_ds = normalized_ds.cache().prefetch(buffer_size=AUTOTUNE)
VQVAE code:
class VectorQuantizer(layers.Layer):
def __init__(self, num_embeddings, embedding_dim, beta=0.25, **kwargs):
super().__init__(**kwargs)
self.embedding_dim = embedding_dim
self.num_embeddings = num_embeddings
self.beta = (beta)
w_init = tf.random_uniform_initializer()
self.embeddings = tf.Variable(initial_value=w_init(shape=(self.embedding_dim, self.num_embeddings), dtype="float32"),
trainable=True,
name="embeddings_vqvae",)
def call(self, x):
input_shape = tf.shape(x)
flattened = tf.reshape(x, [-1, self.embedding_dim])
# Quantization.
encoding_indices = self.get_code_indices(flattened)
encodings = tf.one_hot(encoding_indices, self.num_embeddings)
quantized = tf.matmul(encodings, self.embeddings, transpose_b=True)
quantized = tf.reshape(quantized, input_shape)
commitment_loss = self.beta * tf.reduce_mean((tf.stop_gradient(quantized) - x) ** 2)
codebook_loss = tf.reduce_mean((quantized - tf.stop_gradient(x)) ** 2)
self.add_loss(commitment_loss + codebook_loss)
# Straight-through estimator.
quantized = x + tf.stop_gradient(quantized - x)
return quantized
def get_code_indices(self, flattened_inputs):
# Calculate L2-normalized distance between the inputs and the codes.
similarity = tf.matmul(flattened_inputs, self.embeddings)
distances = (
tf.reduce_sum(flattened_inputs ** 2, axis=1, keepdims=True)
+ tf.reduce_sum(self.embeddings ** 2, axis=0)
- 2 * similarity
)
# Derive the indices for minimum distances.
encoding_indices = tf.argmin(distances, axis=1)
return encoding_indices
def get_vqvae(latent_dim=16, num_embeddings=64):
vq_layer = VectorQuantizer(num_embeddings, latent_dim,
name="vector_quantizer")
encoder = get_encoder(latent_dim)
decoder = get_decoder(latent_dim)
inputs = keras.Input(shape=(64, 64, 3))
encoder_outputs = encoder(inputs)
quantized_latents = vq_layer(encoder_outputs)
reconstructions = decoder(quantized_latents)
return keras.Model(inputs, reconstructions, name="vq_vae")
class VQVAETrainer(keras.models.Model):
def __init__(self, train_variance, latent_dim=32, num_embeddings=128, **kwargs):
super(VQVAETrainer, self).__init__(**kwargs)
self.train_variance = train_variance
self.latent_dim = latent_dim
self.num_embeddings = num_embeddings
self.vqvae = get_vqvae(self.latent_dim, self.num_embeddings)
self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
self.reconstruction_loss_tracker = keras.metrics.Mean(
name="reconstruction_loss"
)
self.vq_loss_tracker = keras.metrics.Mean(name="vq_loss")
#property
def metrics(self):
return [
self.total_loss_tracker,
self.reconstruction_loss_tracker,
self.vq_loss_tracker,]
def train_step(self, x):
with tf.GradientTape() as tape:
# Outputs from the VQ-VAE.
reconstructions = self.vqvae(x)
# Calculate the losses.
reconstruction_loss = (
tf.reduce_mean((x - reconstructions) ** 2) / self.train_variance
)
total_loss = reconstruction_loss + sum(self.vqvae.losses)
# Backpropagation.
grads = tape.gradient(total_loss, self.vqvae.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.vqvae.trainable_variables))
# Loss tracking.
self.total_loss_tracker.update_state(total_loss)
self.reconstruction_loss_tracker.update_state(reconstruction_loss)
self.vq_loss_tracker.update_state(sum(self.vqvae.losses))
# Log results.
return {
"loss": self.total_loss_tracker.result(),
"reconstruction_loss": self.reconstruction_loss_tracker.result(),
"vqvae_loss": self.vq_loss_tracker.result(),
}
get_vqvae().summary()
Encoder and Decoder (I have made changes here but I dont think this is the problem):
def get_encoder(latent_dim=16):
encoder_inputs = keras.Input(shape=(64, 64, 3))
x = layers.Conv2D(64, 3, padding="same")(encoder_inputs)
x = layers.Dropout(0.25)(x)
x = layers.Activation("relu")(x)
x = layers.UpSampling2D()(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.Conv2D(64, 3, strides=2, padding="same")(x)
x = layers.Dropout(0.25)(x)
x = layers.Activation("relu")(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.Conv2D(128, 3, strides=2, padding="same")(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.Activation("relu")(x)
x = layers.Conv2D(128, 3, strides=2, padding="same")(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.Activation("relu")(x)
x = layers.Conv2D(256, 3, strides=2, padding="same")(x)
x = layers.Activation("relu")(x)
x = layers.UpSampling2D()(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.Dense(4*4*128)(x)
encoder_outputs = layers.Conv2D(latent_dim, 1, padding="same")(x)
return keras.Model(encoder_inputs, encoder_outputs, name="encoder")
get_encoder().summary()
def get_decoder(latent_dim=16):
latent_inputs = keras.Input(shape=get_encoder().output.shape[1:])
x = layers.Conv2DTranspose(32, 3, padding="same")(latent_inputs)
x = layers.Dropout(0.25)(x)
x = layers.Activation("relu")(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.Conv2DTranspose(32, 3, padding="same")(latent_inputs)
x = layers.Dropout(0.25)(x)
x = layers.Activation("relu")(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.Conv2DTranspose(64, 3, padding="same")(x)
x = layers.Dropout(0.25)(x)
x = layers.Activation("relu")(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.Conv2DTranspose(128, 3, strides=2, padding="same")(x)
x = layers.Dropout(0.25)(x)
x = layers.Activation("relu")(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.Conv2DTranspose(256, 3, padding="same")(x)
x = layers.Dropout(0.25)(x)
x = layers.Activation("relu")(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.Conv2DTranspose(512, 3, strides=2, padding="same")(x)
x = layers.Dropout(0.25)(x)
x = layers.Activation("relu")(x)
x = layers.BatchNormalization(momentum=0.8)(x)
decoder_outputs = layers.Conv2DTranspose(1, 3, padding="same")(x)
return keras.Model(latent_inputs, decoder_outputs, name="decoder")
get_decoder().summary()
I get the error here:
vqvae_trainer = VQVAETrainer(data_variance, latent_dim=16,
num_embeddings=128)
vqvae_trainer.compile(optimizer=tf.keras.optimizers.Adam())
vqvae_trainer.fit(train_ds, epochs=30, batch_size=128)
This kind of model does not work with labels. Try running:
normalized_ds = dataset.map(lambda x, y: normalization_layer(x))
to discard the labels, since you are actually only interested in x.
Trying to implement resnet 50 over poly-u dataset.
what i am trying to do over here is that i want to make a model which classifies the palm print of individuals into seperate identities , i found that resnet is working best for this kind of problems
cant seems to get over this error tried many solutions online but cant resolve for some reason :
TypeError: Failed to convert object of type <class 'tensorflow.python.framework.sparse_tensor.SparseTensor'> to Tensor. Contents: SparseTensor(indices=Tensor("DeserializeSparse:0", shape=(None, 2), dtype=int64), values=Tensor("DeserializeSparse:1", shape=(None,), dtype=float32), dense_shape=Tensor("stack:0", shape=(2,), dtype=int64)). Consider casting elements to a supported type.
identity block
import keras
def identity_block(X,f,filters):
# retrieve filters
F1,F2,F3 = filters
X_shortcut = X
# first layer
X = Conv2D(filters = F1, kernel_size = (1,1),strides= (1,1),padding ='valid') (X)
X = BatchNormalization(axis = 3 )(X)
X = Activation('relu')(X)
# second layer
X = Conv2D(filters = F2, kernel_size = (f,f),strides= (1,1),padding ='same') (X)
X = BatchNormalization(axis = 3 )(X)
X = Activation('relu')(X)
# third layer
X = Conv2D(filters = F3, kernel_size = (1,1),strides= (1,1),padding ='valid') (X)
X = BatchNormalization(axis = 3 )(X)
# final step > adding shortcut value through relu activation
# X = Add()([X,X_shortcut])
X = tf.keras.layers.Add()([X,X_shortcut])
# X = keras.layers.concatenate() ([X,X_shortcut])
X = Activation('relu')(X)
return X
convolution block
def convolutional_block(X,f,filters,s =2):
# retrieve filters
F1,F2,F3 = filters
# save the input values
X_shortcut = X
# first layer
X = Conv2D(F1,(1,1),strides=(s,s))(X)
X = BatchNormalization(axis =3)(X)
X = Activation('relu')(X)
# second layer
X = Conv2D(filters = F2,kernel_size =(f,f),strides = (1,1), padding ="same")(X)
X = BatchNormalization(axis =3)(X)
X = Activation('relu')(X)
# third layer
X = Conv2D(filters = F3,kernel_size =(1,1),strides =(1,1), padding ="valid")(X)
X = BatchNormalization(axis = 3 )(X)
# shortcut path
X_shortcut = Conv2D(filters = F3,kernel_size =(1,1),strides = (s,s),padding ='valid')(X_shortcut)
X_shortcut = BatchNormalization( axis =3)(X_shortcut)
# final step > adding shortcut value through relu activation
# X = ADD()([X,X_shortcut])
X = tf.keras.layers.Add()([X,X_shortcut])
# X = keras.layers.concatenate() ([X,X_shortcut])
X = Activation('relu')(X)
return X
resnet50 architecture
def Resnet50(input_shape= (224,224,3),classes = 309 ):
# implementing the resnet 50 achitechture over here
# define the input with shape input_shape
X_input = Input(input_shape)
# zero padding
X = ZeroPadding2D((3,3))(X_input)
# stage 1
X = Conv2D(64,(7,7),strides = (2,2))(X)
X = BatchNormalization(axis =3 )(X)
X = Activation('relu')(X)
X = MaxPooling2D((3,3),strides =(2,2))(X)
# stage 2
X = convolutional_block(X,f=3 , filters = [64,64,256],s=1)
# these line of code are the conv laters from convolution block function
# X = Conv2d(F1,(1,1),strides=(s,s))(X)
# X = Conv2D(filters = F2,kernel_size =(f,f),strides = (1,1), padding ="same")(X)
# X = conv2D(F3 ,(1,1),strides = (s,s),name = conv_name_base +'2a')(X)
X = identity_block(X ,3,[ 64, 64, 256])
# same line from identity block
X = identity_block(X, 3, [ 64, 64,256])
# same line from identity block
# stage 3
X = convolutional_block(X,f =3 , filters = [128,128,512],s=2)
X = identity_block(X,3,[128,128,512])
X = identity_block(X,3,[128,128,512])
X = identity_block(X,3,[128,128,512])
# stage 4
X = convolutional_block(X,f = 3 , filters = [ 256, 256, 1024], s = 2 )
X = identity_block(X,3,[256,256,1024])
X = identity_block(X,3,[256,256,1024])
X = identity_block(X,3,[256,256,1024])
X = identity_block(X,3,[256,256,1024])
X = identity_block(X,3,[256,256,1024])
# stage 5
X = convolutional_block(X, f = 3 ,filters = [512,512,2048], s = 2 )
# X = identity_block(X,3 , [512,512,1024])
# X = identity_block(X,3 , [512,512,1024])
# AVGPOOL
X = AveragePooling2D((2,2),name='avg_pool')(X)
# achitech complete
# output
X = Flatten()(X)
X = Dense (classes, activation = 'softmax',name = 'fc'+str(classes),kernel_initializer = glorot_uniform(seed = 0))(X)
# Create model
model = Model(inputs = X_input , outputs =X , name='Resnet50')
return model
model = Resnet50(input_shape = (224,224,3), classes=309)
model.compile(optimizer = 'adam', loss='categorical_crossentropy', metrics = ['accuracy'])
model.fit(train_x,train_y,epochs= 10 , batch_size =32)
Looking at your resnet50 it seems your missing the last two identity blocks of stage 5
X = convolutional_block(X, f = 3 ,filters = [512,512,2048], s = 2 )
X = identity_block(X,3,[512,512,2048]]) # this part seems missing
X = identity_block(X,3,[512,512,2048]])
Btw you can just use keras applications for a shortcut
import tensorflow as tf
tf.keras.applications.ResNet50(include_top=True, classes=200, weights=None)
I am building a self-encoder model with one encoder and two decoders. I want the output of the encoder part to be connected to the decoder like a U-net.but when i try return a list which have four layers output,to connect the decoder,there have a "Graph disconnected:" error.is keras have Modellist like pytorch?
i cant just connection encoder and decoder. because i need do something for encoder output. and this model have two decoder. is have any keras API for do this .
keras:2.2.4
tensorflow:1.12
python:3.68
encoder part
def enc_flow(e_dims, ae_dims, lowest_dense_res):
def func(inp):
x0 = downscale(e_dims, 3, 1,False)(inp)
x1 = downscale(e_dims * 2, 3, 1,True)(x0)
x2 = downscale(e_dims * 4, 3, 1,True)(x1)
x3 = downscale(e_dims * 8, 3, 1,True)(x2)
x3 = Dense(lowest_dense_res * lowest_dense_res * ae_dims)(x3)
x3 = Reshape((lowest_dense_res, lowest_dense_res, ae_dims))(x3)
x4 = upscale(ae_dims,True)(x3)
par_list=[x0,x2,x3,x4]
return x4
return func
decoder part
def dec_flow(output_nc, d_ch_dims, add_residual_blocks=True):
dims = output_nc * d_ch_dims
def ResidualBlock(dim):
def func(inp):
x = Conv2D(dim, kernel_size=3, padding='same')(inp)
x = LeakyReLU(0.2)(x)
x = Conv2D(dim, kernel_size=3, padding='same')(x)
x = Add()([x, inp])
x = LeakyReLU(0.2)(x)
return x
return func
def func(inp): # input
print(type(inp))
x = upscale(dims * 8,True)(inp)
x = ResidualBlock(dims * 8)(x)
# x = Concatenate()([x,par_list[1]])
x = upscale(dims * 4,True)(x)
x = ResidualBlock(dims * 4)(x)
# x = Concatenate()([x,par_list[0]])
x = upscale(dims * 2,True)(x)
x = ResidualBlock(dims * 2)(x)
return Conv2D(output_nc, kernel_size=5, padding='same', activation='sigmoid')(x)
return func
when i try i will get error message.
Graph disconnected: cannot obtain value for tensor Tensor("
input_1:0", shape=(?, 128, 128, 3),
dtype=float32) at layer "input_1". The following previous layers were
accessed without issue: ['input_2', 'conv2d_10', 'space_attention_5',
'channel_attention_5', 'concatenate_5', 'conv2d_11', 'leaky_re_lu_6',
'pixel_shuffler_2', 'conv2d_12', 'leaky_re_lu_7', 'conv2d_13', 'add_1',
'leaky_re_lu_8', 'conv2d_14', 'space_attention_6', 'channel_attention_6',
'concatenate_6', 'conv2d_15', 'leaky_re_lu_9', 'pixel_shuffler_3',
'conv2d_16']