Plot the U-Net network architecture based on the model code - python

I am currently working on medical image segmentation project and I am using U-Net architecture to train my dataset and perform segmentation. I wanted to view how my coded architecture looks like and i tried many ways to achive the same.I was not able to plot the architecture of my code on U-Net.
This is the code. It is the code for the U-net architecture
from torchvision.transforms import CenterCrop
from torch.nn import functional as F
import torch
class Block(Module):
def __init__(self, inChannels, outChannels):
super().__init__()
# store the convolution and RELU layers
self.conv1 = Conv2d(inChannels, outChannels, 3)
self.relu = ReLU()
self.conv2 = Conv2d(outChannels, outChannels, 3)
def forward(self, x):
# apply CONV => RELU => CONV block to the inputs and return it
return self.conv2(self.relu(self.conv1(x)))
class Encoder(Module):
def __init__(self, channels=(3, 16, 32, 64)):
super().__init__()
# store the encoder blocks and maxpooling layer
self.encBlocks = ModuleList(
[Block(channels[i], channels[i + 1])
for i in range(len(channels) - 1)])
self.pool = MaxPool2d(2)
def forward(self, x):
# initialize an empty list to store the intermediate outputs
blockOutputs = []
# loop through the encoder blocks
for block in self.encBlocks:
# pass the inputs through the current encoder block, store
# the outputs, and then apply maxpooling on the output
x = block(x)
blockOutputs.append(x)
x = self.pool(x)
# return the list containing the intermediate outputs
return blockOutputs
class Decoder(Module):
def __init__(self, channels=(64, 32, 16)):
super().__init__()
# initialize the number of channels, upsampler blocks, and
# decoder blocks
self.channels = channels
self.upconvs = ModuleList(
[ConvTranspose2d(channels[i], channels[i + 1], 2, 2)
for i in range(len(channels) - 1)])
self.dec_blocks = ModuleList(
[Block(channels[i], channels[i + 1])
for i in range(len(channels) - 1)])
#deco = self.upconvs + self.dec_blocks
#3return deco
def forward(self, x, encFeatures):
# loop through the number of channels
for i in range(len(self.channels) - 1):
# pass the inputs through the upsampler blocks
x = self.upconvs[i](x)
# crop the current features from the encoder blocks,
# concatenate them with the current upsampled features,
# and pass the concatenated output through the current
# decoder block
encFeat = self.crop(encFeatures[i], x)
x = torch.cat([x, encFeat], dim=1)
x = self.dec_blocks[i](x)
# return the final decoder output
return x
def crop(self, encFeatures, x):
# grab the dimensions of the inputs, and crop the encoder
# features to match the dimensions
(_, _, H, W) = x.shape
encFeatures = CenterCrop([H, W])(encFeatures)
# return the cropped features
return encFeatures
class UNet(Module):
def __init__(self, encChannels=(3, 16, 32, 64),
decChannels=(64, 32, 16),
nbClasses=1, retainDim=True,
outSize=(config.INPUT_IMAGE_HEIGHT, config.INPUT_IMAGE_WIDTH)):
super().__init__()
# initialize the encoder and decoder
self.encoder = Encoder(encChannels)
self.decoder = Decoder(decChannels)
# initialize the regression head and store the class variables
self.head = Conv2d(decChannels[-1], nbClasses, 1)
self.retainDim = retainDim
self.outSize = outSize
def forward(self, x):
# grab the features from the encoder
encFeatures = self.encoder(x)
# pass the encoder features through decoder making sure that
# their dimensions are suited for concatenation
decFeatures = self.decoder(encFeatures[::-1][0],
encFeatures[::-1][1:])
# pass the decoder features through the regression head to
# obtain the segmentation mask
map = self.head(decFeatures)
# check to see if we are retaining the original output
# dimensions and if so, then resize the output to match them
if self.retainDim:
map = F.interpolate(map, self.outSize)
# return the segmentation map
return map
I used plot_model function but i am not getting any result.
I used a code similar to this to visualize the U-Net architecture.
from keras.utils import plot_model
# Build your model
model = UNet
# Visualize the model
plot_model(model, to_file='model2.png')
Please help me plot a network architecture based on my U-Net code.

Related

Visualising the last layer node embeddings of a model in torch geometric

I'm doing my first graph convolutional neural network project with torch_geometric. I want to visualize the last layer node embeddings of my model and don't know how I should get it.
I trained my model on the CiteSeer dataset. You can get the full dataset as easily as this:
from torch_geometric.datasets import Planetoid
from torch_geometric.transforms import NormalizeFeatures
dataset = Planetoid(root="data/Planetoid", name='CiteSeer', transform=NormalizeFeatures())
My model is a simple two-layered model as this:
class GraphClassifier(torch.nn.Module):
def __init__(self, dataset, hidden_dim):
super(GraphClassifier, self).__init__()
self.conv1 = GCNConv(dataset.num_features, hidden_dim)
self.conv2 = GCNConv(hidden_dim, dataset.num_classes)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = F.relu(self.conv1(x, edge_index))
x = F.relu(self.conv2(x, edge_index))
return F.log_softmax(x, dim=1)
If you print my model you will get this:
model = GraphClassifier(dataset, 64)
print(model)
>>>
GraphClassifier(
(conv1): GCNConv(3703, 64)
(conv2): GCNConv(64, 6)
)
My model is trained successfully. I only want to visualize its last-layer node embeddings. To visualize that I have this function to use:
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import torch
# emb: (nNodes, hidden_dim)
# node_type: (nNodes,). Entries are torch.int64 ranged from 0 to num_class - 1
def visualize(emb: torch.tensor, node_type: torch.tensor):
z = TSNE(n_components=2).fit_transform(emb.detach().cpu().numpy())
plt.figure(figsize=(10,10))
plt.scatter(z[:, 0], z[:, 1], s=70, c=node_type, cmap="Set2")
plt.show()
I don't know how I should extract emb and node_type from my model to give to the visualize function. emb is the last layer of node embeddings of the model. How can I get these from my model?
It is solve by changing the model to this:
class GraphClassifier(torch.nn.Module):
def __init__(self, dataset, hidden_dim):
super(GraphClassifier, self).__init__()
self.conv1 = GCNConv(dataset.num_features, hidden_dim)
self.conv2 = GCNConv(hidden_dim, dataset.num_classes)
def forward(self, data, do_visualize=False):
x, edge_index = data.x, data.edge_index
x = F.relu(self.conv1(x, edge_index))
x = F.relu(self.conv2(x, edge_index))
if do_visualize: # NEW LINE
visualize(x, data.y) # NEW LINE
return F.log_softmax(x, dim=1)
Now if you call the forward function with do_visualize=Ture it will visualize. like this:
model = GraphClassifier(dataset, hidden_dim)
model.to(device)
model(dataset[0].to(device), do_visualize=True)

How to initialise (and check sanity) weights efficiently of layers within complex (nested) modules in PyTorch?

Looking for an efficient way to access nested Modules and Layers to set the weights
I am replicating the DCGAN Paper and my code works as expected. I found out that in the paper, the authors said that:
All weights were initialized from a zero-centered Normal distribution
with standard deviation 0.02
This awesome answer explains that it can be done using torch.nn.init.normal_(nn.Conv2d(1,1,1, 1,1 ).weight.data, 0.0, 0.02) but I have complex structure using ModuleList and others. What is the most efficient way of doing this?
By Complex, please look at the code below for my implementation:
'''
Implement the Deep Convolution Gan AKA DCGAN in Pytorch: Paper at https://arxiv.org/pdf/1511.06434v2.pdf
'''
import torch
import torch.nn as nn
class GeneratorBlock(nn.Module):
'''
Generator Block uses TransposedConv2D -> Batch Norm (except LAST block) -> Relu
Note: kernel_size = 4, stride = 2, padding = 1 is used in the paper. When BatchNorm is used, Bias is not used for Conv2D
'''
def __init__(self, in_channels, out_channels, kernel_size = 4, stride = 2, padding = 1, use_batchnorm:bool = True):
super().__init__()
self.use_batchnorm = use_batchnorm
self.transpose_conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size = kernel_size, stride=stride, padding=padding, bias = not self.use_batchnorm)
self.batch_norm = nn.BatchNorm2d(out_channels) if self.use_batchnorm else None
self.activation = nn.ReLU() # Paper uses Relu in Generator Network
def forward(self, x):
x = self.transpose_conv(x)
return self.activation(self.batch_norm(x)) if self.use_batchnorm else self.activation(x)
class Generator(nn.Module):
'''
Generate Images using Transposed Convolution. Input is a random noise of [Batch, 100, 1,1] Dimension and then upsampled
'''
def __init__(self, input_features = 100, base_feature = 128, final_channels:int = 1):
'''
We use nn.Sequantial here just to show the workings. If you want to make the layers dynamically using a loop, find nn.ModuleList() in the Descriminator block. Both works same
So we'll use 'base_feature = 64' as a base for input and output channels
args:
input_features: The shape of Random Noise from which an image will be generated
base_feature: The shape of feature map or number or channels which will act as out base. Other inputs and outputs will be calculated based on this
final_channels: The channels / features which will be sent to the Discriminator as an input
'''
super(Generator, self).__init__()
# in Descriminator, we do the same work using ModuleList(). Uses 4 blocks
self.blocks = nn.Sequential(
GeneratorBlock(in_channels = input_features, out_channels = base_feature * 8, stride = 1, padding = 0), # from Random Noise, Generate 1024 features
GeneratorBlock(in_channels = base_feature * 8, out_channels = base_feature * 4), # 1024 -> 512 features
GeneratorBlock(in_channels = base_feature * 4, out_channels = base_feature * 2), # 512 -> 256 features
GeneratorBlock(in_channels = base_feature * 2, out_channels = base_feature), # 256 -> 128 features
nn.ConvTranspose2d(base_feature, final_channels, kernel_size = 4, stride = 2, padding = 1)# 128 -> final feature. It is just GeneratorBlock without ReLu and BatchNorm ;)
)
self.activation = nn.Tanh() # To make the outputs between [-1,1]
def forward(self, x):
'''
Takes Random Noise as input and Generte features from that
'''
return self.activation(self.blocks(x))
class DiscriminatorBlock(nn.Module):
'''
Discriminator Block uses Conv2D -> Batch Norm (except FIRST block) -> LeakyRelu
Note: kernel_size = 4, stride = 2, padding = 1 is used in the paper. When BatchNorm is used, Bias is not used for Conv2D
'''
def __init__(self, in_channels, out_channels, kernel_size = 4, stride = 2, padding = 1, use_batchnorm:bool = True):
super().__init__()
self.use_batchnorm = use_batchnorm
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias = not self.use_batchnorm)
self.batch_norm = nn.BatchNorm2d(out_channels) if self.use_batchnorm else None
self.activation = nn.LeakyReLU(0.2)
def forward(self, x):
x = self.conv(x)
return self.activation(self.batch_norm(x)) if self.use_batchnorm else self.activation(x)
class Discriminator(nn.Module):
'''
CNNs to classify whether the image generated by the Generator are as good as the real ones
Feature Changes as :: 1 -> 64 -> 128 -> 256 -> 512 -> 1
'''
def __init__(self, input_features = 1, output_features = 1, middle_features = [64,128,256]):
'''
In the paper, they take in a feature of [Batch, 1, 64, 64] from the Generator and then output a single number per sample in the batch
'''
super().__init__()
self.layers = nn.ModuleList() # Just a fancy method of stacking layers using loop
# in the paper, the first layer does not use BatchNorm
self.layers.append(DiscriminatorBlock(input_features, middle_features[0], use_batchnorm = False)) # 1 -> 64 Because the input has 1 channel
for i, channel in enumerate(middle_features): # total 4 blocks are used in paper. 1 has already been used in the line above. 3 blocks are these
self.layers.append(DiscriminatorBlock(channel, channel*2)) # 64 -> 128 --- 128 -> 256 --- 256 -> 512
self.final_conv = nn.Conv2d(in_channels = middle_features[-1]*2, out_channels = output_features, kernel_size = 4, stride = 2, padding = 0) # Input from previous layer 512 -> 1
self.sigmoid_layer = nn.Sigmoid() # gives whether an image is real or fake or more precisely, how CLOSE is it to the real image
def forward(self, x):
for layer in self.layers:
x = layer(x)
return self.sigmoid_layer(self.final_conv(x))
def test_DCGAN_code():
noise = torch.rand(10,100,1,1)
image = Generator()(noise)
result = Discriminator()(image)
print('Model Built Successfully!!! Generating 10 random samples and their end results')
print(f"'Z' random Noise shape: {noise.shape} || Generator output shape: {image.shape} || Discriminator shape: {result.shape}")
You can simply iterate over all submodules, at the end of your __init__ method:
class Generator(nn.Module):
def __init__(self, ....):
# all code here
# ...
# init weights, at the very bottom of __init__
for sm in self.modules():
if isinstance(sm, nn.Conv2d):
# only conv2d will be initialized in this way
torch.nn.init.normal_(sm.weight.data, 0.0, 0.02)
done.
Found some answer to this. Just want to know it this is the right approach:
def initialise_weights(m):
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d, nn.BatchNorm2d)):
nn.init.normal_(m.weight.data, 0.0, 0.02)
def check_sanity(m):
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d, nn.BatchNorm2d)):
print(m.weight.data.mean(), m.weight.data.std())
gen = Generator()
gen = gen.apply(initialise_weights)
gen = gen.apply(check_sanity)
The accepted answer is the best answer (an alternative would be going to class _ConvNd and modify the source in other words replace the init.kaiming_uniform_(self.weight, a=math.sqrt(5))). All said and done though the best practice is to define another method called reset_parameters() put it at the end of your __init__(self, *args) and change the parameters there:
class Generator(nn.Module):
def __init__(self, *args) -> None:
...
self.reset_parameters()
def reset_parameters(self) -> None:
# comments
for sm in self.modules():
if isinstance(sm, nn.Conv2d):
torch.nn.init.normal_(
sm.weight.data,
mean=0.0,
std=0.02
)

How to add "Crop" in order to concatenate the skip connections in Encoder and Decoder Levels as described in UNET paper

I have implemented the following UNET Paper Code and here is the architecture:
The problem with this is that that at level 4, Encoder has features of shape 512 x 64 x 64 and the Decoder part will be having a different features shape as 512 x 56 x 56. So Then I looked closely to find these gray arrows for copy and crop. In the paper, there is no mentioning of how it is done but just 2 reference of crops as:
Every step in the expansive path consists of an upsampling of the
feature map followed by a 2x2 convolution (“up-convolution”) that halves the
number of feature channels, a concatenation with the correspondingly cropped
feature map from the contracting path, and two 3x3 convolutions, each followed by a ReLU. The cropping is necessary due to the loss of border pixels in
every convolution.
Could Someone please explain how could I make them compatible? The code Till the BottleNeck working perfectly and as I am stuck at Cropping part so could not test the logic and code flow for Decoder.
'''
Whole UNet is divided in 3 parts: Endoder -> BottleNeck -> Decoder. There are skip connections between 'Nth' level of Encoder with Nth level of Decoder.
There is 1 basic entity called "Convolution" Block which has 3*3 Convolution (or Transposed Convolution during Upsampling) -> ReLu -> BatchNorm
Then there is Maxpooling
'''
import torch
import torch.nn as nn
# ------------------- TESTING CODE -------------------
image = torch.randn(1,1, 572,572) # Batch of 1 Gray scale image as described in paper to test
enc = Encoder(1)
feat, skip = enc(image)
bot = BottleNeck()
feat = bot(feat)
# dec = Decoder()
# feats = dec(feat, skip) # Error Starts here in Decoder Block
# -----------------------------
class ConvolutionBlock(nn.Module):
'''
The basic Convolution Block Which Will have Convolution -> RelU -> Convolution -> RelU
'''
def __init__(self, input_features, out_features):
'''
args:
batch_norm was introduced after UNET so they did not know if it existed. Might be useful
'''
super().__init__()
self.network = nn.Sequential(
nn.Conv2d(input_features, out_features, kernel_size = 3, padding= 0), # padding is 0 by default, 1 means the input width, height == out width, height
nn.ReLU(),
nn.Conv2d(out_features, out_features, kernel_size = 3, padding = 0),
nn.ReLU(),
)
def forward(self, feature_map_x):
'''
feature_map_x could be the image itself or the
'''
return self.network(feature_map_x)
class Encoder(nn.Module):
'''
'''
def __init__(self, image_channels:int = 3, blockwise_features = [64, 128, 256, 512]):
'''
In UNET, the features start at 64 and keeps getting twice the size of the previous one till it reached BottleNeck
args:
image_channels: Channels in the Input Image. Typically it is any of the 1 or 3 (rarely 4)
blockwise_features = Each block has it's own input and output features. it means first ConV block will output 64 features, second 128 and so on
'''
super().__init__()
repeat = len(blockwise_features) # how many layers we need to add len of blockwise_features == len of out_features
self.layers = nn.ModuleList()
for i in range(repeat):
if i == 0:
in_filters = image_channels
out_filters = blockwise_features[0]
else:
in_filters = blockwise_features[i-1]
out_filters = blockwise_features[i]
self.layers.append(ConvolutionBlock(in_filters, out_filters))
self.maxpool = nn.MaxPool2d(kernel_size = 2, stride = 2) # Since There is No gradient for Maxpooling, You can instantiate a single layer for the whole operation
# https://datascience.stackexchange.com/questions/11699/backprop-through-max-pooling-layers
def forward(self, feature_map_x):
skip_connections = [] # i_th level of features from Encoder will be conatenated with i_th level of decoder before applying CNN
for layer in self.layers:
feature_map_x = layer(feature_map_x)
skip_connections.append(feature_map_x)
feature_map_x = self.maxpool(feature_map_x) # Use Max Pooling AFTER storing the Skip connections
return feature_map_x, skip_connections
class BottleNeck(nn.Module):
'''
ConvolutionBlock without Max Pooling
'''
def __init__(self, input_features = 512, output_features = 1024):
super().__init__()
self.layer = ConvolutionBlock(input_features, output_features)
def forward(self, feature_map_x):
return self.layer(feature_map_x)
class Decoder(nn.Module):
'''
'''
def __init__(self, blockwise_features = [512, 256, 128, 64]):
'''
Do exactly opposite of Encoder
'''
super().__init__()
self.upsample_layers = nn.ModuleList()
self.conv_layers = nn.ModuleList()
for i, feature in enumerate(blockwise_features):
self.upsample_layers.append(nn.ConvTranspose2d(in_channels = feature*2, out_channels = feature, kernel_size = 2, stride = 2)) # Takes in 1024-> 512, takes 512->254 ......
self.conv_layers.append(nn.ConvTranspose2d(in_channels = feature*2, out_channels = feature, kernel_size = 2, stride = 2)) # After Concatinating (512 + 512-> 1024), Use double Conv block
def forward(self, feature_map_x, skip_connections):
'''
Steps go as:
1. Upsample
2. Concat Skip Connection
3. Apply ConvolutionBlock
'''
for i, layer in enumerate(self.conv_layers): # 4 levels, 4 skip connections, 4 upsampling, 4 Double Conv Block
feature_map_x = self.upsample_layers[i](feature_map_x) # step 1
feature_map_x = torch.cat((skip_connections[-i-1], feature_map_x), dim = 1) # step 2
feature_map_x = self.conv_layers[i](feature_map_x)
return feature_map_x
I think the meaning is for center crop.
the code should be something like this:
def crop_image(image, new_size):
'''image: tensor of shape(batch_size,num_channels,height_width)
new_size: torch.size object with the new size of the image
for example original image with size(30,3,106,106) and new size is
(30,20,100,100)
'''
h_crop_size,w_crop_size= (image.shape[2]-new_shape[2])//2,
(image.shape[3]-new_shape[3])//2
h_start=h_crop_size
h_end= h_start+ new_shape[2]
w_start=w_crop_size
w_end=w_start+new_shape[3]
return image[:,:,h_start:h_end,w_start:w_end]

How to create a custom PreprocessingLayer in TF 2.2

I would like to create a custom preprocessing layer using the tf.keras.layers.experimental.preprocessing.PreprocessingLayer layer.
In this custom layer, placed after the input layer, I would like to normalize my image using tf.cast(img, tf.float32) / 255.
I tried to find some code or example showing how to create this preprocessing layer, but I couldn't find.
Please, can someone provide a full example creating and using the PreprocessingLayer layer ?
If you want to have a custom preprocessing layer, actually you don't need to use PreprocessingLayer. You can simply subclass Layer
Take the simplest preprocessing layer Rescaling as an example, it is under the tf.keras.layers.experimental.preprocessing.Rescaling namespace. However, if you check the actual implementation, it is just subclass Layer class Source Code Link Here but has #keras_export('keras.layers.experimental.preprocessing.Rescaling')
#keras_export('keras.layers.experimental.preprocessing.Rescaling')
class Rescaling(Layer):
"""Multiply inputs by `scale` and adds `offset`.
For instance:
1. To rescale an input in the `[0, 255]` range
to be in the `[0, 1]` range, you would pass `scale=1./255`.
2. To rescale an input in the `[0, 255]` range to be in the `[-1, 1]` range,
you would pass `scale=1./127.5, offset=-1`.
The rescaling is applied both during training and inference.
Input shape:
Arbitrary.
Output shape:
Same as input.
Arguments:
scale: Float, the scale to apply to the inputs.
offset: Float, the offset to apply to the inputs.
name: A string, the name of the layer.
"""
def __init__(self, scale, offset=0., name=None, **kwargs):
self.scale = scale
self.offset = offset
super(Rescaling, self).__init__(name=name, **kwargs)
def call(self, inputs):
dtype = self._compute_dtype
scale = math_ops.cast(self.scale, dtype)
offset = math_ops.cast(self.offset, dtype)
return math_ops.cast(inputs, dtype) * scale + offset
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'scale': self.scale,
'offset': self.offset,
}
base_config = super(Rescaling, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
So it proves that Rescaling preprocessing is just another normal layer.
The main part is the def call(self, inputs) function. You can create whatever complicated logic to preprocess your inputs and then return.
A easier documentation about custom layer can be find here
In a nutshell, you can do the preprocessing by layer, either by Lambda which for simple operation or by subclassing Layer to achieve your goal.
I think that the best and cleaner solution to do this is using a simple Lambda layer where you can wrap your pre-processing function
this is a dummy working example
import numpy as np
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
X = np.random.randint(0,256, (200,32,32,3))
y = np.random.randint(0,3, 200)
inp = Input((32,32,3))
x = Lambda(lambda x: x/255)(inp)
x = Conv2D(8, 3, activation='relu')(x)
x = Flatten()(x)
out = Dense(3, activation='softmax')(x)
m = Model(inp, out)
m.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
history = m.fit(X, y, epochs=10)

In Tensorflow - Is it possible to lock specific convolution filters in a layer, or to remove them altogether?

When using transfer learning in Tensorflow, I know that one can lock layers from further training, by doing:
for layer in pre_trained_model.layers:
layer.trainable = False
Is it possible to lock specific filters in the layer instead?
As in - if the whole layer contains 64 filters, is it possible to:
lock only some of them, that seem to contain reasonable filters and
re-train the ones that do not?
OR
remove the unreasonably-looking filters from layers and retrain without them ?
(for instance to see if the retrained filters will change a lot)
One possible solution is to implement custom layer that splits convolution into separate number of filters convolutions and sets each channel (which is a convolution with one output channel) to trainable or to not trainable. For example:
import tensorflow as tf
import numpy as np
class Conv2DExtended(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size, **kwargs):
self.filters = filters
self.conv_layers = [tf.keras.layers.Conv2D(1, kernel_size, **kwargs) for _ in range(filters)]
super().__init__()
def build(self, input_shape):
_ = [l.build(input_shape) for l in self.conv_layers]
super().build(input_shape)
def set_trainable(self, channels):
"""Sets trainable channels."""
for i in channels:
self.conv_layers[i].trainable = True
def set_non_trainable(self, channels):
"""Sets not trainable channels."""
for i in channels:
self.conv_layers[i].trainable = False
def call(self, inputs):
results = [l(inputs) for l in self.conv_layers]
return tf.concat(results, -1)
And usage example:
inputs = tf.keras.layers.Input((28, 28, 1))
conv = Conv2DExtended(filters=4, kernel_size=(3, 3))
conv.set_non_trainable([1, 2]) # only channels 0 and 3 are trainable
res = conv(inputs)
res = tf.keras.layers.Flatten()(res)
res = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)(res)
model = tf.keras.models.Model(inputs, res)
model.compile(optimizer=tf.keras.optimizers.SGD(),
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit(np.random.normal(0, 1, (10, 28, 28, 1)),
np.random.randint(0, 2, (10)),
batch_size=2,
epochs=5)

Categories

Resources