Implement CDCN using tensorflow - python

Recently, I want to implement CDCN in CVPR2020 using tensorflow2.8 + python3.9. This requires my custom layer acquire current conv2d layer's weight.
However, when I try to add my custom layer to the sequential model, error occurred:NotImplementedError: numpy() is only available when eager execution is enabled.
This is my code. Can anyone helps me? I have tried to add tf.compat.v1.enable_eager_execution(), but it doesn't work.
import numpy as np
import tensorflow.keras as tfk
import tensorflow as tf
class CDC(tfk.layers.Layer):
def __init__(self, output_dim, kernel_size=(3, 3), padding='same', activation=None, theta=0.7, **kwargs):
super(CDC, self).__init__()
self.theta = theta
self.activation = None
self.output_dim = output_dim
self.kernel_size = kernel_size
self.padding = padding
if activation is not None:
self.activation = tfk.activations.get(activation)
def build(self, input_shape):
self.conv = tfk.layers.Conv2D(self.output_dim, self.kernel_size, padding=self.padding, input_shape=input_shape)
self.conv.build(input_shape=input_shape)
self._kernel = self.conv.kernel
super(CDC, self).build(input_shape)
self.built = True
def call(self, inputs, training=None, mask=None):
vanillaOutput = self.conv(inputs)
weightSum = self.conv.kernel.numpy().sum(axis=0).sum(axis=0).sum(axis=0)
weightSum = np.reshape(weightSum, (1, 1, 1, self.output_dim))
weightSum = tf.constant(weightSum, dtype=tf.float32)
cDiff = tf.nn.conv2d(inputs, filters=weightSum, strides=self.conv.strides, padding=self.conv.padding.upper())
result = vanillaOutput - self.theta * cDiff
if self.activation is not None:
return self.activation(result)
return vanillaOutput

If you just want the sum of all elements in kernel
tf.math.reduce_sum()
Also replace the lines
weightSum = self.conv.kernel.numpy().sum(axis=0).sum(axis=0).sum(axis=0)
weightSum = np.reshape(weightSum, (1, 1, 1, self.output_dim))
weightSum = tf.constant(weightSum, dtype=tf.float32)
...
weightSum = tf.math.reduce_sum(tf.math.reduce_sum(tf.math.reduce_sum(self.conv.kernel,axis=0),axis=0),axis=0)
weightSum = tf.reshape(weightSum, (1, 1, 1, self.output_dim))

Related

ValueError: Dimensions must be equal, but are 2 and 3 for '{{node mean_squared_error/SquaredDifference}} = SquaredDiffere

I ran into this problem when I was working on a TCN model.This model only supports 2dimensional data but not 3dimensional data, how can I solve the problem?
ValueError: Dimensions must be equal, but are 2 and 3 for '{{node mean_squared_error/SquaredDifference}} = SquaredDifference[T=DT_FLOAT](TemporalConvNet/temporal_layer_24/Relu, Cast_1)' with input shapes: [?,7,2], [?,7,3].
try:
import tensorflow.python.keras as keras
except:
import tensorflow.keras as keras
import tensorflow as tf
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.engine.base_layer import InputSpec
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
class WeightNormConv1D(keras.layers.Conv1D):
def __init__(self, *args, **kwargs):
self.weight_norm = kwargs.pop('weight_norm')
super(WeightNormConv1D, self).__init__(*args, **kwargs)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
kernel_shape = self.kernel_size + (input_dim, self.filters)
kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
# weight normalization
if self.weight_norm:
self.g = self.add_weight(name='wn/g',
shape=(self.filters,),
initializer=tf.ones_initializer(),
trainable=True,
dtype=kernel.dtype)
self.kernel = tf.reshape(self.g, [1, 1, self.filters]) * nn_impl.l2_normalize(kernel, [0, 1])
else:
self.kernel = kernel
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.input_spec = InputSpec(ndim=self.rank + 2, axes={channel_axis: input_dim})
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
if not isinstance(op_padding, (list, tuple)):
op_padding = op_padding.upper()
self._convolution_op = nn_ops.Convolution(
input_shape,
filter_shape=self.kernel.get_shape(),
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=op_padding,
data_format=conv_utils.convert_data_format(self.data_format, self.rank + 2))
self.built = True
class TemporalLayer(keras.layers.Layer):
def __init__(self, input_channels, output_channels, kernel_size, strides, dilation_rate, padding, keep_pro,
weight_norm=True):
self.input_channels = input_channels
self.output_channels = output_channels
self.kernel_size = kernel_size
self.strides = strides
self.dilation_rate = dilation_rate
self.padding = padding
self.keep_pro = keep_pro
self.weight_norm = weight_norm
self.h1 = WeightNormConv1D(filters=self.output_channels, kernel_size=self.kernel_size, strides=self.strides,
data_format='channels_last', dilation_rate=self.dilation_rate, activation='relu',
kernel_initializer=tf.random_normal_initializer(0, 0.01),
bias_initializer=tf.zeros_initializer(), weight_norm=self.weight_norm)
self.h2 = WeightNormConv1D(filters=self.output_channels, kernel_size=self.kernel_size, strides=self.strides,
data_format='channels_last', dilation_rate=self.dilation_rate, activation='relu',
kernel_initializer=tf.random_normal_initializer(0, 0.01),
bias_initializer=tf.zeros_initializer(), weight_norm=self.weight_norm)
if self.input_channels != self.output_channels:
self.shou_cut = keras.layers.Conv1D(filters=self.output_channels, kernel_size=1,
kernel_initializer=tf.random_normal_initializer(0, 0.01),
bias_initializer=tf.zeros_initializer())
else:
self.shou_cut = None
super(TemporalLayer, self).__init__()
def call(self, inputs):
inputs_padding = tf.pad(inputs, [[0, 0], [self.padding, 0], [0, 0]])
h1_outputs = self.h1(inputs_padding)
h1_outputs = keras.layers.Dropout(rate=self.keep_pro)(h1_outputs)
h1_padding = tf.pad(h1_outputs, [[0, 0], [self.padding, 0], [0, 0]])
h2_outputs = self.h2(h1_padding)
h2_outputs = keras.layers.Dropout(rate=self.keep_pro)(h2_outputs)
if self.input_channels != self.output_channels:
res_x = self.shou_cut(inputs)
else:
res_x = inputs
return keras.activations.relu(keras.layers.add([res_x, h2_outputs,h2_outputs]))
class TemporalConvNet(keras.Model):
def __init__(self, input_channels, layers_channels, strides=1, kernel_size=3, keep_pro=0.8):
super(TemporalConvNet, self).__init__(name='TemporalConvNet')
self.input_channels = input_channels
self.layers_channels = layers_channels
self.strides = strides
self.kernel_size = kernel_size
self.keep_pro = keep_pro
self.temporal_layers = []
num_layers = len(self.layers_channels)
for i in range(num_layers):
dilation_rate = 2 ** i
tuple_padding = (self.kernel_size - 1) * dilation_rate,
padding = tuple_padding[0]
input_channels = self.input_channels if i == 0 else self.layers_channels[i - 1]
output_channels = self.layers_channels[i]
temporal_layer = TemporalLayer(input_channels, output_channels, self.kernel_size, self.strides,
dilation_rate, padding, self.keep_pro, True)
self.temporal_layers.append(temporal_layer)
def call(self, inputs):
for i in range(len(self.temporal_layers)):
if i == 0:
outputs = self.temporal_layers[i](inputs)
else:
outputs = self.temporal_layers[i](outputs)
return outputs
Here is some simple code to recreate the error.
model = TemporalConvNet(input_channels=input_channels, layers_channels=[32, 16, 8, 4, 2], kernel_size=3)
model.compile(optimizer=tf.keras.optimizers.Adam(0.001), loss=keras.losses.mean_squared_error, metrics=['mse'])
callbacks = [keras.callbacks.EarlyStopping(
monitor='val_loss',
min_delta=1e-3,
patience=100,
mode='min',
verbose=2
)]
model.fit(train_dataset, validation_data=valid_dataset, callbacks=callbacks, epochs=1000, verbose=2)
thank you

Error:Input to reshape is a tensor with 409600 values, but the requested shape requires a multiple of 25088 [[{{node pam_3/Reshape_1}}]]

I am trying to apply channel attention and position attention layer on last convolutional layer of VGG16. But stuck badly at the above error. I am new to coding in python and deep learning.
I am confused about shape values in Class CAM and class PAM.
Here is my code:
from keras import initializers
from keras import regularizers
from keras import constraints
class PAM(Layer):**strong text**
def __init__(self,
gamma_initializer=tf.zeros_initializer(),
gamma_regularizer=None,
gamma_constraint=None,
**kwargs):
super(PAM, self).__init__(**kwargs)
self.gamma_initializer = gamma_initializer
self.gamma_regularizer = gamma_regularizer
self.gamma_constraint = gamma_constraint
def build(self, input_shape):
self.gamma = self.add_weight(shape=(512,),
initializer=self.gamma_initializer,
name='gamma',
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
self.built = True
def compute_output_shape(self, input_shape):
return input_shape
def call(self, input):
input_shape = input.get_shape().as_list()
_, h, w, filters = input_shape
b = Conv2D(512, 3, use_bias=False, kernel_initializer='he_normal')(att_input)
c = Conv2D(512, 3, use_bias=False, kernel_initializer='he_normal')(att_input)
d = Conv2D(512, 3, use_bias=False, kernel_initializer='he_normal')(att_input)
vec_b = K.reshape(b, (-1, h * w, 512))
vec_cT = tf.transpose(K.reshape(c, (-1, h * w,512)), (0, 2, 1))
bcT = K.batch_dot(vec_b, vec_cT)
softmax_bcT = Activation('softmax')(bcT)
vec_d = K.reshape(d, (-1, h * w, 512))
bcTd = K.batch_dot(softmax_bcT, vec_d)
bcTd = K.reshape(bcTd, (-1, h, w, 512))
out = self.gamma*bcTd + att_input
return out
class CAM(Layer):
def __init__(self,
gamma_initializer=tf.zeros_initializer(),
gamma_regularizer=None,
gamma_constraint=None,
**kwargs):
super(CAM, self).__init__(**kwargs)
self.gamma_initializer = gamma_initializer
self.gamma_regularizer = gamma_regularizer
self.gamma_constraint = gamma_constraint
def build(self, input_shape):
self.gamma = self.add_weight(shape=(512,),
initializer=self.gamma_initializer,
name='gamma',
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
self.built = True
def compute_output_shape(self, input_shape):
return input_shape
def call(self, input):
input_shape = input.get_shape().as_list()
_, h, w, filters = input_shape
vec_a = K.reshape(input, (-1, h * w, 512))
vec_aT = tf.transpose(vec_a, (0, 2, 1))
aTa = K.batch_dot(vec_aT, vec_a)
softmax_aTa = Activation('softmax')(aTa)
aaTa = K.batch_dot(vec_a, softmax_aTa)
aaTa = K.reshape(aaTa, (-1, h, w, 512))
out = self.gamma*aaTa + att_input
return out
pam = PAM()(att_input)
pam = Conv2D(512, 3, padding='same', use_bias=False, kernel_initializer='he_normal')(pam)
pam = BatchNormalization(axis=3)(pam)
pam = Activation('relu')(pam)
pam = Dropout(0.5)(pam)
pam = Conv2D(512, 3, padding='same', use_bias=False, kernel_initializer='he_normal')(pam)
cam = CAM()(att_input)
cam = Conv2D(512, 3, padding='same', use_bias=False, kernel_initializer='he_normal')(cam)
cam = BatchNormalization(axis=3)(cam)
cam = Activation('relu')(cam)
cam = Dropout(0.5)(cam)
cam = Conv2D(512, 3, padding='same', use_bias=False, kernel_initializer='he_normal')(cam)
Make sure you have the input dimensions correct. Can't say exactly where the error is without looking at your code but whenever I've had an error like that it has almost always been a case where I overlooked one of the dimensions. Think carefully about how your input changes through the layers. Printing out the model summary might help. Good luck

Reassignment of weights in tensorflow 2/keras

I'm currently testing some modified versions of dropout in Keras and one of them involves adjusting the weights during the training of a customized dense layer. I however have not been able to run it without error yet. I suspect is has something to do with eager execution but I'm not sure.
class Linear(keras.layers.Layer):
def __init__(self, units, **kwargs):
super(Linear, self).__init__(**kwargs)
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs, training=False):
prob = 0.0/10
if training:
w = np.matrix(self.w)
# w = self.w
shape = w.shape
size = shape[0] * shape[1]
arr = np.random.choice([0,1], size=size, p=[prob, 1 - prob]) #random array of 1's and 0's
arr = arr.reshape(shape) #reshape it to same dimensions as weights
new_weights = np.multiply(arr, w) #element wise multiplication
self.w = new_weights
return tf.matmul(inputs, self.w) + self.b
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPooling2D())
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPooling2D())
model.add(layers.Conv2D(128, (3, 3), activation='relu',padding='same'))
model.add(layers.MaxPooling2D())
model.add(layers.Conv2D(4, (3, 3), activation='relu',padding='same'))
model.add(layers.MaxPooling2D())
model.add(layers.Flatten())
model.add(Linear(3)) #Custom layer
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss = 'CategoricalCrossentropy',
optimizer = 'adam',
metrics=['accuracy'])
epochs = 1
history = model.fit(train_dataset, validation_data=validation_dataset, epochs=epochs)
Error: TypeError: Expected binary or unicode string, got <tf.Tensor 'sequential_3/linear_3/mul:0' shape=(4, 3) dtype=float32>
self.w has to be tensorflow.Variable. However after multiplication in call() it becomes tensorflow.Tensor. Just find another way to do the same thing in call()
Try this code:
def call(self, inputs, training=False):
prob = 0.0/10
if training:
w = np.matrix(self.w)
shape = w.shape
size = shape[0] * shape[1]
arr = np.random.choice([0,1], size=size, p=[prob, 1 - prob]) #random array of 1's and 0's
arr = arr.reshape(shape) #reshape it to same dimensions as weights
# CHANGED 3 LINES BELOW:
arr = tf.convert_to_tensor(arr, dtype=tf.float32)
new_weights = tf.multiply(arr, self.w)
self.w.assign(new_weights) # Assign preserves tf.Variable
return tf.matmul(inputs, self.w) + self.b

Converting from TF 1.x to TF 2.0 keras

I have a model written in TF 1.x code using tf-slim API as well. Is it possible to convert that to tf.keras in TF 2.0 EXACTLY the way it is? For instance, have exactly the same amount of parameters and training?
In my case, I've tried doing so, but my model in tf.keras actually has about 5% LESS parameters than the one in TF 1.x. I also noticed my model in tf.keras has a much less smooth training stage too. Any thoughts? Thanks
Maybe I'm setting some of the parameters to initialize the layers differently? Any other suggestions would be greatly appreciated
This isn't my full model, but I use a lot of the components below:
Original TF.1x model:
import tensorflow as tf
from tensorflow.contrib import slim
def batch_norm_relu(inputs, is_training):
net = slim.batch_norm(inputs, is_training=is_training)
net = tf.nn.relu(net)
return net
def conv2d_transpose(inputs, output_channels, kernel_size):
upsamp = tf.contrib.slim.conv2d_transpose(
inputs,
num_outputs=output_channels,
kernel_size=kernel_size,
stride=2,
)
return upsamp
def conv2d_fixed_padding(inputs, filters, kernel_size, stride, rate):
net = slim.conv2d(inputs,
filters,
kernel_size,
stride=stride,
rate = rate,
padding=('SAME' if stride == 1 else 'VALID'),
activation_fn=None
)
return net
def block(inputs, filters, is_training, projection_shortcut, stride):
inputs = batch_norm_relu(inputs, is_training)
shortcut = inputs
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
conv_k1_s1_r1 = shortcut
conv_k3_s1_r1 = slim.conv2d(shortcut,
filters,
kernel_size = 3,
stride = 1,
rate = 1,
padding=('SAME' if stride == 1 else 'VALID'),
activation_fn=None
)
conv_k3_s1_r3 = slim.conv2d(shortcut,
filters,
kernel_size = 3,
stride = 1,
rate = 3,
padding=('SAME' if stride == 1 else 'VALID'),
activation_fn=None
)
conv_k3_s1_r5 = slim.conv2d(shortcut,
filters,
kernel_size = 3,
stride = 1,
rate = 5,
padding=('SAME' if stride == 1 else 'VALID'),
activation_fn=None
)
net = conv_k1_s1_r1 + conv_k3_s1_r1 + conv_k3_s1_r3 + conv_k3_s1_r5
net = batch_norm_relu(net, is_training)
net = conv2d_fixed_padding(inputs=net, filters=filters, kernel_size=1, stride=1, rate = 1)
outputs = shortcut + net
return outputs
Attempted TF 2.x.keras model same component:
import tensorflow as tf
class BatchNormRelu(tf.keras.layers.Layer):
"""Batch normalization + ReLu"""
def __init__(self, name=None):
super(BatchNormRelu, self).__init__(name=name)
self.bnorm = tf.keras.layers.BatchNormalization(momentum=0.999,
scale=False)
self.relu = tf.keras.layers.ReLU()
def call(self, inputs, is_training):
x = self.bnorm(inputs, training=is_training)
x = self.relu(x)
return x
class Conv2DTranspose(tf.keras.layers.Layer):
"""Conv2DTranspose layer"""
def __init__(self, output_channels, kernel_size, name=None):
super(Conv2DTranspose, self).__init__(name=name)
self.tconv1 = tf.keras.layers.Conv2DTranspose(
filters=output_channels,
kernel_size=kernel_size,
strides=2,
padding='same',
activation=tf.keras.activations.relu
)
def call(self, inputs):
x = self.tconv1(inputs)
return x
class Conv2DFixedPadding(tf.keras.layers.Layer):
"""Conv2D Fixed Padding layer"""
def __init__(self, filters, kernel_size, stride, rate, name=None):
super(Conv2DFixedPadding, self).__init__(name=name)
self.conv1 = tf.keras.layers.Conv2D(filters,
kernel_size,
strides=stride,
dilation_rate=rate,
padding=('same' if stride==1 else 'valid'),
activation=None
)
def call(self, inputs):
x = self.conv1(inputs)
return x
class block(tf.keras.layers.Layer):
def __init__(self,
filters,
stride,
projection_shortcut=True,
name=None):
super(block, self).__init__(name=name)
self.projection_shortcut = projection_shortcut
self.brelu1 = BatchNormRelu()
self.brelu2 = BatchNormRelu()
self.conv1 = tf.keras.layers.Conv2D(filters,
kernel_size=3,
strides=1,
dilation_rate=1,
padding=('same' if stride==1 else 'valid'),
activation=None
)
self.conv2 = tf.keras.layers.Conv2D(filters,
kernel_size=3,
strides=1,
dilation_rate=3,
padding=('same' if stride==1 else 'valid'),
activation=None
)
self.conv3 = tf.keras.layers.Conv2D(filters,
kernel_size=3,
strides=1,
dilation_rate=5,
padding=('same' if stride==1 else 'valid'),
activation=None
)
self.conv4 = Conv2DFixedPadding(filters, 1, 1, 1)
self.conv_sc = Conv2DFixedPadding(filters, 1, stride, 1)
def call(self, inputs, is_training):
x = self.brelu1(inputs, is_training)
shortcut = x
if self.projection_shortcut:
shortcut = self.conv_sc(x)
conv_k1_s1_r1 = shortcut
conv_k3_s1_r1 = self.conv1(shortcut)
conv_k3_s1_r3 = self.conv2(shortcut)
conv_k3_s1_r5 = self.conv3(shortcut)
x = conv_k1_s1_r1 + conv_k3_s1_r1 + conv_k3_s1_r3 + conv_k3_s1_r5
x = self.brelu2(x, is_training)
x = self.conv4(x)
outputs = shortcut + x
return outputs

Reflection padding Conv2D

I'm using keras to build a convolutional neural network for image segmentation and I want to use "reflection padding" instead of padding "same" but I cannot find a way to to do it in keras.
inputs = Input((num_channels, img_rows, img_cols))
conv1=Conv2D(32,3,padding='same',kernel_initializer='he_uniform',data_format='channels_first')(inputs)
Is there a way to implement a reflection layer and insert it in a keras model ?
The accepted answer above is not working in the current Keras version. Here is the version that's working:
class ReflectionPadding2D(Layer):
def __init__(self, padding=(1, 1), **kwargs):
self.padding = tuple(padding)
self.input_spec = [InputSpec(ndim=4)]
super(ReflectionPadding2D, self).__init__(**kwargs)
def compute_output_shape(self, s):
""" If you are using "channels_last" configuration"""
return (s[0], s[1] + 2 * self.padding[0], s[2] + 2 * self.padding[1], s[3])
def call(self, x, mask=None):
w_pad,h_pad = self.padding
return tf.pad(x, [[0,0], [h_pad,h_pad], [w_pad,w_pad], [0,0] ], 'REFLECT')
Found the solution! We have only to create a new class that takes a layer as input and use tensorflow predefined function to do it.
import tensorflow as tf
from keras.engine.topology import Layer
from keras.engine import InputSpec
class ReflectionPadding2D(Layer):
def __init__(self, padding=(1, 1), **kwargs):
self.padding = tuple(padding)
self.input_spec = [InputSpec(ndim=4)]
super(ReflectionPadding2D, self).__init__(**kwargs)
def get_output_shape_for(self, s):
""" If you are using "channels_last" configuration"""
return (s[0], s[1] + 2 * self.padding[0], s[2] + 2 * self.padding[1], s[3])
def call(self, x, mask=None):
w_pad,h_pad = self.padding
return tf.pad(x, [[0,0], [h_pad,h_pad], [w_pad,w_pad], [0,0] ], 'REFLECT')
# a little Demo
inputs = Input((img_rows, img_cols, num_channels))
padded_inputs= ReflectionPadding2D(padding=(1,1))(inputs)
conv1 = Conv2D(32, 3, padding='valid', kernel_initializer='he_uniform',
data_format='channels_last')(padded_inputs)
import tensorflow as tf
from keras.layers import Lambda
inp_padded = Lambda(lambda x: tf.pad(x, [[0,0], [27,27], [27,27], [0,0]], 'REFLECT'))(inp)
The solution from Akihiko did not work with the new keras version, so I came up with my own. The snippet pads a batch of 202x202x3 images to 256x256x3
As you can check in the documentation there is no such 'reflect' padding. Only 'same' and 'valid' are implemented in keras.
You maybe try to implemented on your own or find if somebody already did it. You should base yourself in the Conv2D class and check where self.padding member variable is used.
The accepted answer does not work if we have undefined dimensions! There will be an error when compute_output_shape function is called. Here is the simple work around to that.
class ReflectionPadding2D(Layer):
def __init__(self, padding=(1, 1), **kwargs):
self.padding = tuple(padding)
self.input_spec = [InputSpec(ndim=4)]
super(ReflectionPadding2D, self).__init__(**kwargs)
def compute_output_shape(self, s):
if s[1] == None:
return (None, None, None, s[3])
return (s[0], s[1] + 2 * self.padding[0], s[2] + 2 * self.padding[1], s[3])
def call(self, x, mask=None):
w_pad, h_pad = self.padding
return tf.pad(x, [[0, 0], [h_pad, h_pad], [w_pad, w_pad], [0, 0]], 'REFLECT')
def get_config(self):
config = super(ReflectionPadding2D, self).get_config()
print(config)
return config

Categories

Resources