Adam optimizer: ValueError: No gradients provided for any variable - python

I am trying to optimize my filter activation using a pretrained model (vgg16) and reduce mean for filter score calculation. I am constantly getting an error that "No gradient provided for any variable".
I would really appreciate any help. Thanks!
Here you can see the code:
import numpy as np
import tensorflow as tf
from tensorflow import keras
np.random.seed(1)
image_f = np.random.normal(size=[1, 32, 32, 3], scale=0.01).astype(np.float32)
img = tf.nn.sigmoid(image_f)
tf.compat.v1.keras.backend.set_image_data_format('channels_last')
model = keras.applications.VGG16(weights="imagenet", include_top=False)
optimizer = tf.keras.optimizers.Adam(epsilon=1e-08, learning_rate=0.05)
layer_weight =keras.Model(inputs=model.inputs, outputs=model.get_layer(name="block3_conv1").output)
for i in range(5):
img = tf.Variable(img)
filter_activation = layer_weight(img)[:,:,:,5]
def compute_activation():
score = -1 * tf.reduce_mean(filter_activation)
print(score)
return score
optimizer.minimize(compute_activation, [img])
print(img)

I think the problem is your variable img is not included in the calculation of your loss function. I modified your code according to the documentation: https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Optimizer.
import numpy as np
import tensorflow as tf
from tensorflow import keras
np.random.seed(1)
image_f = np.random.normal(size=[1, 32, 32, 3], scale=0.01).astype(np.float32)
img = tf.nn.sigmoid(image_f)
tf.compat.v1.keras.backend.set_image_data_format('channels_last')
model = keras.applications.VGG16(weights="imagenet", include_top=False)
optimizer = tf.keras.optimizers.Adam(epsilon=1e-08, learning_rate=0.05)
layer_weight =keras.Model(inputs=model.inputs, outputs=model.get_layer(name="block3_conv1").output)
# Variable only need to define once
img = tf.Variable(img)
def compute_activation():
# Include variable img here
filter_activation = layer_weight(img)[:,:,:,5]
score = -1 * tf.reduce_mean(filter_activation)
print(score)
return score
for i in range(5):
optimizer.minimize(compute_activation, [img])
print(img)

Related

Pytorch - RuntimeError: Trying to backward through the graph a second time while trying to find a sparse feature map

I am going to find a sparse feature map for a multi-scale CNN. Because I am new to Pytorch, at first I attempted to implement a code for only one scale. I defined two optimizers, one for filters and one for the feature map. When I run the code, I face the the error. Any suggestion or help by you is greatly appreciated. Below: I brought the code:
The algorithm should work like Expectation Maximization algorithm. First, I fix the kernels, then try to optimize the sparse feature map (z) and then kernels (weights) are updated. Of course this is the thing I wanted to follow, I am not sure if it is true in the code. I attached an image file showing the algorithm in case I didn't convey correctly. enter image description here
import torch
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('device is:', device)
# dataset definition
mnist_trainset = datasets.MNIST(root='./data', train=True, download=True, transform=transforms.Compose([transforms.ToTensor()]))
mnist_testset = datasets.MNIST(root='./data', train=False, download=True, transform=transforms.Compose([transforms.ToTensor()]))
mnist_trainset.data = mnist_trainset.data[:10000]
mnist_testset.data = mnist_testset.data[:5000]
from torch.utils.data import DataLoader
train_dl = DataLoader(mnist_trainset, batch_size=16, shuffle=True)
test_dl = DataLoader(mnist_testset, batch_size=1024, shuffle=False)
from torch.optim import SGD
from torch.nn import Module
from torch.nn import Conv2d
from tqdm import tqdm
from torch.autograd import Variable
import scipy.signal as sps
class MNIST_ISTA(Module):
# define model elements
def __init__(self):
self.lambda_ = 0.5e-5
super(MNIST_ISTA, self).__init__()
self.scale1 = Conv2d(in_channels = 1, out_channels = 1, kernel_size=3, bias = False)
self.z = None
self.alpha = 1
def ista_(self, img_batch):
self.z = torch.normal(0, 1, size = (img_batch.shape[0], img_batch.shape[1], img_batch.shape[2], img_batch.shape[3]), requires_grad=True)
converged = False
optim = SGD([{'params': self.z, "lr": 1e-20 }])
while not converged:
z_old = self.z.clone().detach()
feature_map = self.scale1(img_batch)
output_image = sps.fftconvolve(feature_map.detach().numpy(), self.scale1.weight.detach().numpy())
output_image = torch.from_numpy(output_image)
loss = ((img_batch-pred)**2).sum() + self.alpha*torch.norm(self.z,p=1)
loss.backward()
optim.step()
self.z.grad.zero_()
self.z.data = self.soft_thresholding_(self.z, self.lambda_ )
converged = torch.norm(self.z - z_old)/torch.norm(z_old)<1e+8
def num_flat_features(self, x):
size = x.size()[1:]
num = 1
for s in size:
num *= s
return num
def soft_thresholding_(self, x, alpha):
with torch.no_grad():
rtn = F.relu(x-alpha)- F.relu(-x-alpha)
return rtn.data
def forward(self, img_batch):
self.ista_(img_batch)
pred = self.scale1(img_batch)
return pred
def zero_grad(self):
self.scale1.zero_grad()
ista_model = MNIST_ISTA()
optim = SGD([{'params': ista_model.scale1.weight, "lr": 1e-15}])
for epoch in range(5):
running_loss = 0
with torch.no_grad():
ista_model.scale1.weight.div_(torch.norm(ista_model.scale1.weight, dim=None, keepdim=True))
for data in tqdm(train_dl, desc='training', total=len(train_dl)):
img_batch = data[0]
pred = ista_model(img_batch)
loss = ((img_batch - pred) ** 2).sum()
running_loss += loss.item()
loss.backward()
optim.step()
ista_model.zero_grad()

How to get gradients of weights w.r.t. to a target neuron?

I have found code online to get the derivative of the total loss with respect to the deep learning weights. I am trying to find the derivative of the weights with respect to the loss of a single class instead of all classes.
I used the following code to get the gradient of an input image with respect to the total loss. If I visualize it, it shows the importance of the pixels for all predictions. But, I would like to compute the derivative of the input image with respect to a particular class (e.g. "lady_bug"). This should show the importance of the pixels for the prediction of lady_bug. Do you have an idea how I can do that?
from keras.applications.vgg19 import VGG19
import numpy as np
import cv2
from keras import backend as K
import matplotlib.pyplot as plt
from keras.applications.inception_v3 import decode_predictions
def get_model():
model = VGG19(include_top=True, weights='imagenet')
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def predict(model, images):
numeric_prediction = model.predict(images)
categorical_prediction = decode_predictions(numeric_prediction, top=1)
return [(x[0][1], x[0][2]) for x in categorical_prediction]
def get_test_image():
# Image
image_path = "lady_bug.jpg"
image = cv2.imread(image_path)
my_image = cv2.resize(image, (224,224))
my_image = np.expand_dims(my_image, axis=0)
return my_image
def visualize_sample(sample, file_path):
plt.figure()
plt.imshow(sample)
plt.savefig(file_path, bbox_inches='tight')
def test_input_gradient():
images = get_test_image()
model = get_model()
prediction = predict(model, images)
print(prediction)
gradients = K.gradients(model.output, model.input) #Gradient of output wrt the input of the model (Tensor)
print(gradients)
sess = K.get_session()
evaluated_gradients = sess.run(gradients[0], feed_dict={model.input:
images})
visualize_sample((evaluated_gradients[0]*(10**9.5)).clip(0,255), "test.png")
if __name__ == "__main__":
test_input_gradient()
Output:
[('ladybug', 0.53532666)]
[<tf.Tensor 'gradients/block1_conv1/convolution_grad/Conv2DBackpropInput:0' shape=(?, 224, 224, 3) dtype=float32>]
It seems the code is taking the gradients of the outputs wrt the inputs.
So, this is just taking a single slice from the outputs.
Warning: This considers a regular model output. I have no idea of what you're doing in decode predictions and the following list.
gradients = K.gradients(model.output[:, lady_bug_class], model.input)

Fréchet Inception Distance parameters choice in Tensorflow?

How to choose the value of classifier_fn in tensorflow, I couldn't find any example about it:
tf.contrib.gan.eval.frechet_classifier_distance(
real_images,
generated_images,
classifier_fn,
num_batches=1
)
If you need the inception distance, then you can use a less generic function called tf.contrib.gan.eval.frechet_inception_distance which doesn't ask for a classifier_fn argument:
fid = tf.contrib.gan.eval.frechet_inception_distance(real_images, fake_images)
However, when I had tried to use this function using v1.14 with eager execution mode, I got errors of various kinds. So eventually, I've decided to go with a custom solution. Probably it would be helpful for you as well.
I encountered the following implementation by Jason Brownlee that seems to match the description from the original paper:
import numpy as np
import scipy.linalg
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input
from tensorflow.compat.v1 import ConfigProto
from skimage.transform import resize
tf.enable_eager_execution()
config = ConfigProto()
config.gpu_options.allow_growth = True
tf.keras.backend.set_session(tf.Session(config=config))
def scale_images(images, new_shape):
return np.asarray([resize(image, new_shape, 0) for image in images])
def calculate_fid(model, images1, images2):
f1, f2 = [model.predict(im) for im in (images1, images2)]
mean1, sigma1 = f1.mean(axis=0), np.cov(f1, rowvar=False)
mean2, sigma2 = f2.mean(axis=0), np.cov(f2, rowvar=False)
sum_sq_diff = np.sum((mean1 - mean2)**2)
cov_mean = scipy.linalg.sqrtm(sigma1.dot(sigma2))
if np.iscomplexobj(cov_mean):
cov_mean = cov_mean.real
fid = sum_sq_diff + np.trace(sigma1 + sigma2 - 2.0*cov_mean)
return fid
if __name__ == '__main__':
input_shape = (299, 299, 3)
inception = InceptionV3(include_top=False, pooling='avg', input_shape=input_shape)
(dataset, _), _ = keras.datasets.cifar10.load_data()
dataset = dataset[:100]
dataset = scale_images(dataset, input_shape)
noise = preprocess_input(np.clip(255*np.random.uniform(size=dataset.shape), 0, 255))
noise = scale_images(noise, input_shape)
print('FID:', calculate_fid(inception, dataset, noise))
So we're performing the following steps:
re-scale images to the shape expected by InceptionV3;
transform the images using inception_v3.preprocess_input;
pass both tensors through InceptionV3 network (without top layer);
use the formula from the original paper with the computed features as input parameters.
Here is an excerpt from the mentioned paper.

Trouble feeding data into tensorflow graph

I have trained a neural network model on MNIST dataset using the script mnist_3.1_convolutional_bigger_dropout.py provided in this tutorial.
I wanted to test the trained model on the custom dataset, hence I wrote a small script predict.py which loads the trained model and feed the data to it. I tried 2 methods for preprocessing images so that they are compatible with MNIST format.
Method 1: Resizing the image to 28x28
Method 2: Technique mentioned here is used
Both of these methods result in the error
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Placeholder_2' with dtype float
predict.py
# Importing libraries
from scipy.misc import imread
import tensorflow as tf
import numpy as np
import cv2 as cv
import glob
from test import imageprepare
files = glob.glob('data2/*.*')
#print(files)
# Method 1
'''
img_data = []
for fl in files:
img = imageprepare(fl)
img = img.reshape(img.shape[0], img.shape[1], 1)
img_data.append(img)
'''
# Method 2
dig_cont = [cv.imread(fl, 0) for fl in files]
#print(len(dig_cont))
img_data = []
for i in range(len(dig_cont)):
img = cv.resize(dig_cont[i], (28, 28))
img = img.reshape(img.shape[0], img.shape[1], 1)
img_data.append(img)
print("Restoring Model ...")
sess = tf.Session()
# Step-1: Recreate the network graph. At this step only graph is created.
tf_saver = tf.train.import_meta_graph('model/model.meta')
# Step-2: Now let's load the weights saved using the restore method.
tf_saver.restore(sess, tf.train.latest_checkpoint('model'))
print("Model restored")
x = tf.get_default_graph().get_tensor_by_name('X:0')
print('x :', x.shape)
y = tf.get_default_graph().get_tensor_by_name('Y:0')
print('y :', y.shape)
dict_data = {x: img_data}
result = sess.run(y, feed_dict=dict_data)
print(result)
print(result.shape)
sess.close()
The problem is fixed, I forgot to pass the value of variable pkeep. I had to make the following changes to make it work.
dict_data = {x: img_data, pkeep: 1.0}
instead of
dict_data = {x: img_data}

tf.gradients only returns [None]

I tried to build my own deep dream algorithm with this code using the Inception Neural Network from Google:
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
#I am using the Kadenze CADL helper function ---> https://github.com/pkmital/CADL/tree/master/session-4/libs
import inception
img = np.random.rand(1,1920,1080,3)
net = inception.get_inception_model()
tf.import_graph_def(net['graph_def'], name='inception')
graph = tf.get_default_graph()
layer = graph.get_tensor_by_name('inception/mixed5b_pool_reduce_pre_relu:0')
gradient = tf.gradients(tf.reduce_mean(layer), img)
sess = tf.Session()
init = tf.global_variables_initializer()
iters = 1440
sess.run(init)
for i in range(iters):
print(i+1)
grad = sess.run(gradient[0])[0]
img += grad
plt.imshow(img[0])
plt.savefig('output/'+str(i+1)+'.png')
plt.close('all')
But the line tf.gradients(tf.reduce_mean(layer), img) only returns [None]. This (of course) causes an error. Can anyone tell me how to fix it?

Categories

Resources