error when making my own activation function - python

I am trying to test an activation function on MNIST data
but it gave an error:
TypeError: Using a `tf.Tensor` as a Python `bool` is not allowed. Use `if t is not None:` instead of `if t:` to test if a tensor is defined, and use TensorFlow ops such as tf.cond to execute subgraphs conditioned on the value of a tensor.
Here is my activation function :
def fun1(x):
return np.sqrt(x) if x>=0 else (-(np.sqrt(-x)))
and here is the model:
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), kernel_regularizer=regularizers.l2(w_l2),
input_shape=input_shape))
model.add(BatchNormalization())
model.add(Activation(fun1))
model.add(Conv2D(64, (3, 3), kernel_regularizer=regularizers.l2(w_l2)))
model.add(BatchNormalization())
model.add(Activation(fun1))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, kernel_regularizer=regularizers.l2(w_l2)))
model.add(BatchNormalization())
model.add(Activation(fun1))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
model.summary()

In the custom activation function, x is a tensor, so Keras backend methods need to be used instead of numpy. Your implementation could be changed to something like this:
from keras import backend as K
def fun1(x):
return K.sqrt(K.abs(x))
For more examples, look at Keras defined activations:
https://github.com/keras-team/keras/blob/master/keras/activations.py

Related

TypeError: __init__() missing 1 required positional argument: 'units' issue [duplicate]

I am working in python and tensor flow but I miss 'units' argument and I do not know how to solve it, It looks like your post is mostly code; please add some more details.It looks like your post is mostly code; please add some more details.
here the code
def createModel():
model = Sequential()
# first set of CONV => RELU => MAX POOL layers
model.add(Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=inputShape))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(output_dim=NUM_CLASSES, activation='softmax'))
# returns our fully constructed deep learning + Keras image classifier
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
# use binary_crossentropy if there are two classes
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
return model
print("Reshaping trainX at..."+ str(datetime.now()))
#print(trainX.sample())
print(type(trainX)) # <class 'pandas.core.series.Series'>
print(trainX.shape) # (750,)
from numpy import zeros
Xtrain = np.zeros([trainX.shape[0],HEIGHT, WIDTH, DEPTH])
for i in range(trainX.shape[0]): # 0 to traindf Size -1
Xtrain[i] = trainX[i]
print(Xtrain.shape) # (750,128,128,3)
print("Reshaped trainX at..."+ str(datetime.now()))
print("Reshaping valX at..."+ str(datetime.now()))
print(type(valX)) # <class 'pandas.core.series.Series'>
print(valX.shape) # (250,)
from numpy import zeros
Xval = np.zeros([valX.shape[0],HEIGHT, WIDTH, DEPTH])
for i in range(valX.shape[0]): # 0 to traindf Size -1
Xval[i] = valX[i]
print(Xval.shape) # (250,128,128,3)
print("Reshaped valX at..."+ str(datetime.now()))
# initialize the model
print("compiling model...")
sys.stdout.flush()
model = createModel()
# print the summary of model
from keras.utils import print_summary
print_summary(model, line_length=None, positions=None, print_fn=None)
# add some visualization
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(model).create(prog='dot', format='svg'))
Try changing this line:
model.add(Dense(output_dim=NUM_CLASSES, activation='softmax'))
to
model.add(Dense(NUM_CLASSES, activation='softmax'))
I'm not experience in keras but I could not find a parameter called output_dim on the documentation page for Dense. I think you meant to provide units but labelled it as output_dim
The Keras Dense layer documentation is as follows:
keras.layers.Dense(units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
Using the following :
classifier.add(Dense(6, activation='relu', kernel_initializer='glorot_uniform',input_dim=11))
Will work as here the units means the output_dim saying that we need 6 neurons in the hidden layer. The weights are initialized with the uniform function and the input layer has 11 independent variables of the dataset (input_dim) to feed the above-hidden layer.
I think it's a version issue. In updated version of keras for Dense there is no "output_dim" argument.
You can see this documentation link for Dense arguments.
https://keras.io/api/layers/core_layers/dense/
tf.keras.layers.Dense(
units,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs
)
So the first argument is "units", Which is mandatory.
instead of this line:
model.add(Dense(output_dim=NUM_CLASSES, activation='softmax'))
use this:
model.add(Dense(units=NUM_CLASSES, activation='softmax'))
or
model.add(Dense(NUM_CLASSES, activation='softmax'))

how to write a custom loss function in keras?

Here is the model I am writing the custom loss function for:
model = Sequential()
model.add(Conv2D(2,2,padding="same", activation="relu", input_shape=(8,8,1)))
model.add(Dropout(0.2))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer='Adam', loss=custom_loss1, metrics=['accuracy'])
Here is the custom_loss1 function I wrote:
import keras.backend as kb
def custom_loss1(y_actual, y_predicted):
value = kb.mean(kb.sum(kb.square((y_actual-y_predict)/10)))
return value
But I am getting this error:
ValueError: No gradients provided for any variable: ['conv2d_45/kernel:0', 'conv2d_45/bias:0', 'dense_45/kernel:0', 'dense_45/bias:0'].
What to do?
I am unsure without actually running the code itself but did you try in tensorflow:
adj_diff = (y_actual - y_pred)/10
diff_squared = tf.math.square(adj_diff)
sum_diff = tf.math.add(diff_squared) ## or tf.math.reduce_sum depending on your dimensions
return tf.math.reduce_mean(sum_diff)

how to know which node is dropped after using keras dropout layer

From nick blog it is clear that in dropout layer of CNN model we drop some nodes on the basis of bernoulli. But how to verify it, i.e. how to check which node is not selected. In DropConnect we leave some weights so I think with the help of model.get_weights() we can verify, but how in the case of dropout layer.
model = Sequential()
model.add(Conv2D(2, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(4, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(8, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.binary_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
Another question is that it is mention in keras that dropout rate should float b/w 0 to 1. But for above model when I take dropout rate = 1.25, then also my model is working, how this happens?
Concerning your second question, if you see Keras code, in the call method form Dropout class:
def call(self, inputs, training=None):
if 0. < self.rate < 1.:
noise_shape = self._get_noise_shape(inputs)
def dropped_inputs():
return K.dropout(inputs, self.rate, noise_shape,
seed=self.seed)
return K.in_train_phase(dropped_inputs, inputs,
training=training)
return inputs
This means that if the rate is not between 0 and 1, it will do nothing.

How do you use Keras LeakyReLU in Python?

I am trying to produce a CNN using Keras, and wrote the following code:
batch_size = 64
epochs = 20
num_classes = 5
cnn_model = Sequential()
cnn_model.add(Conv2D(32, kernel_size=(3, 3), activation='linear',
input_shape=(380, 380, 1), padding='same'))
cnn_model.add(Activation('relu'))
cnn_model.add(MaxPooling2D((2, 2), padding='same'))
cnn_model.add(Conv2D(64, (3, 3), activation='linear', padding='same'))
cnn_model.add(Activation('relu'))
cnn_model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
cnn_model.add(Conv2D(128, (3, 3), activation='linear', padding='same'))
cnn_model.add(Activation('relu'))
cnn_model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
cnn_model.add(Flatten())
cnn_model.add(Dense(128, activation='linear'))
cnn_model.add(Activation('relu'))
cnn_model.add(Dense(num_classes, activation='softmax'))
cnn_model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
I want to use Keras's LeakyReLU activation layer instead of using Activation('relu'). However, I tried using LeakyReLU(alpha=0.1) in place, but this is an activation layer in Keras, and I get an error about using an activation layer and not an activation function.
How can I use LeakyReLU in this example?
All advanced activations in Keras, including LeakyReLU, are available as layers, and not as activations; therefore, you should use it as such:
from keras.layers import LeakyReLU
# instead of cnn_model.add(Activation('relu'))
# use
cnn_model.add(LeakyReLU(alpha=0.1))
Sometimes you just want a drop-in replacement for a built-in activation layer, and not having to add extra activation layers just for this purpose.
For that, you can use the fact that the activation argument can be a callable object.
lrelu = lambda x: tf.keras.activations.relu(x, alpha=0.1)
model.add(Conv2D(..., activation=lrelu, ...)
Since a Layer is also a callable object, you could also simply use
model.add(Conv2D(..., activation=tf.keras.layers.LeakyReLU(alpha=0.1), ...)
which now works in TF2. This is a better solution as this avoids the need to use a custom_object during loading as #ChristophorusReyhan mentionned.
you can import the function to make the code cleaner and then use it like any other activation.
if you choose not to define alpha, don't forget to add brackets "LeakyReLU()"
from tensorflow.keras.layers import LeakyReLU
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(512, activation=LeakyReLU()))
model.add(tf.keras.layers.Dense(512, activation=LeakyReLU(alpha=0.1)))

Keras model to Theano function

I am trying to convert a trained model (code given below) to a theano function. But I am getting the following error: AttributeError: 'Dense' object has no attribute 'output'.
The code for my model:
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same',
input_shape=(img_channels, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
The code I am using to convert the Keras model to a theano function by following this tutorial:
from keras import backend as K
get_last_layer_output = K.function([model.layers[0].input],
[model.layers[-1].output])
y=f(patches)
Can anyone please tell me what to do?
Try model.layers[-1].get_output(train=False). The original Keras tutorial may be outdated.

Categories

Resources