Hi i'm trying to implement an FFT in my model. I isolated the fft layer to better see the effect, but when I call my model on any data it returns the input, unaffected.
Here's my code with sample data:
import matplotlib.pyplot as plt
from keras.layers import Input, Lambda
from keras.models import Model
import tensorflow as tf
import numpy as np
def fftModel1D(input_shape):
x_input = Input(input_shape)
x = Lambda(lambda v: tf.cast(tf.spectral.fft(tf.cast(v,dtype=tf.complex64)),tf.float32))(x_input)
return Model(inputs=x_input, outputs=[x])
model = fftModel1D((1000, 1))
testData = np.asarray([np.expand_dims(np.sin(np.linspace(0, 100, 1000)), 1)])
pred = model.predict(testData)[0]
fig, axes = plt.subplots(1, 2)
axes[0].plot(np.squeeze(testData))
axes[1].plot(np.squeeze(pred))
plt.show()
This currently shows identical plots of sin(x) while I'm expecting the FFT on the second graph.
I'm using Python 3.6.8, Keras 2.2.4, Tensorflow 1.13.1
Since the input has 2 dimensions (shape is (1000, 1)), using tf.fft2D seems to work.
Related
Tensor("flatten_3/Identity:0", shape=(None, 100), dtype=float32)
Hi I have tried to print tensor as numpy array as follows:
import tensorflow as tf
import numpy as np
from keras.layers import Input
print(tf.executing_eagerly())
x = Input(shape=(32,))
print(x.numpy())
To be honest I'm sure there is a cleaner way to visualize your input tensor, but here's a hacky one for what it's worth:
import tensorflow as tf
def tf_print(x):
tf.print("my tensor:")
tf.print(x)
return x
iput = tf.keras.layers.Input(shape=(1,), dtype='int32')
printt = tf.keras.layers.Lambda(tf_print)(iput) # branch that prints your tensor
oput = tf.keras.layers.Flatten()(iput) # branch that is the rest of your model
model = tf.keras.Model(inputs=[iput], outputs=[oput, printt])
model(4)
I'm not sure how to modify my code to get keras activations. I've seen conflicting examples of K.function() inputs and am not sure if I'm getting outputs per layer our activations.
Here is my code
activity = 'Downstairs'
layer = 1
seg_x = create_segments_and_labels(df[df['ActivityEncoded']==mapping[activity]],TIME_PERIODS,STEP_DISTANCE,LABEL)[0]
get_layer_output = K.function([model_m.layers[0].input],[model_m.layers[layer].output])
layer_output = get_layer_output([seg_x])[0]
try:
ax = sns.heatmap(layer_output[0].transpose(),cbar=True,cbar_kws={'label':'Activation'})
except:
ax = sns.heatmap(layer_output.transpose(),cbar=True,cbar_kws={'label':'Activation','rotate':180})
ax.set_xlabel('Kernel',fontsize=30)
ax.set_yticks(range(0,len(layer_output[0][0])+1,10))
ax.set_yticklabels(range(0,len(layer_output[0][0])+1,10))
ax.set_xticks(range(0,len(layer_output[0])+1,5))
ax.set_xticklabels(range(0,len(layer_output[0])+1,5))
ax.set_ylabel('Filter',fontsize=30)
ax.xaxis.labelpad = 10
ax.set_title('Filter vs. Kernel\n(Layer=' + model_m.layers[layer].name + ')(Activity=' + activity + ')',fontsize=35)
Suggestions here on stack overflow just do it as I do:
Keras, How to get the output of each layer?
Example 4 adds k's learning phase to the mix but my output is still the same.
https://www.programcreek.com/python/example/93732/keras.backend.function
Am I getting output or activations? Documentation implies i might need layers.activations but I haven't made that work.
My code, or the code passing in learning phase both get this heatmap.
https://imgur.com/a/5fI6N0B
For layers defined as e.g. Dense(activation='relu'), layer.outputs will fetch the (relu) activations. To get layer pre-activations, you'll need to set activation=None (i.e. 'linear'), followed by an Activation layer. Example below.
from keras.layers import Input, Dense, Activation
from keras.models import Model
import numpy as np
import matplotlib.pyplot as plt
import keras.backend as K
ipt = Input(shape=(8,))
x = Dense(10, activation=None)(ipt)
x = Activation('relu')(x)
out = Dense(1, activation='sigmoid')(x)
model = Model(ipt, out)
model.compile('adam', 'binary_crossentropy')
X = np.random.randn(16, 8)
outs1 = get_layer_outputs(model, model.layers[1], X, 1) # Dense
outs2 = get_layer_outputs(model, model.layers[2], X, 1) # Activation
plt.hist(np.ndarray.flatten(outs1), bins=200); plt.show()
plt.hist(np.ndarray.flatten(outs2), bins=200); plt.show()
Function used:
def get_layer_outputs(model, layer, input_data, learning_phase=1):
layer_fn = K.function([model.input, K.learning_phase()], layer.output)
return layer_fn([input_data, learning_phase])
I have a very minimal example of an autoencoder:
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
First I create a data set with the highly correlated variables A and B (that are already normalized)
X = pd.DataFrame( (np.random.randn(1000,2)), columns=["A", "B"] )
X["B"] = X["A"] + X["B"]/4
Then I setup the autoencoder and train it
aeInput = Input(2)
encode = Dense(2, activation='relu')(aeInput)
aeOutput = Dense(2, activation='relu')(encode)
AE = Model(aeInput, aeOutput, name="autoencoder")
AE.compile(optimizer='adam', loss="mean_squared_error", )
TrainAE = AE.fit( x=X, y=X, epochs=100, batch_size=2**5,)
training looks good and converges smoothly, but when I look at the result the output is mainly zeros.
f, ax = plt.subplots(figsize=(8, 8))
sns.kdeplot( X, shade=False, axis=ax)
sns.kdeplot( AE.predict(X), shade=False, axis=ax)
This seems very odd to me, because the encoding layer is as large as the input, so a trivial and loss-free solution would simple be to wire the first A neuron straight through, with an activation of 1 and same for the second neuron encoding for B. Why is this not happening? Is there any parameter I use falsely?
One issue is that your final layer has the relu activation, which has a minimum of 0. If you would like to predict numbers less than 0 on the final layer, you can change the activation to "linear," like this
aeOutput = Dense(2, activation='linear')(encode)
How to choose the value of classifier_fn in tensorflow, I couldn't find any example about it:
tf.contrib.gan.eval.frechet_classifier_distance(
real_images,
generated_images,
classifier_fn,
num_batches=1
)
If you need the inception distance, then you can use a less generic function called tf.contrib.gan.eval.frechet_inception_distance which doesn't ask for a classifier_fn argument:
fid = tf.contrib.gan.eval.frechet_inception_distance(real_images, fake_images)
However, when I had tried to use this function using v1.14 with eager execution mode, I got errors of various kinds. So eventually, I've decided to go with a custom solution. Probably it would be helpful for you as well.
I encountered the following implementation by Jason Brownlee that seems to match the description from the original paper:
import numpy as np
import scipy.linalg
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input
from tensorflow.compat.v1 import ConfigProto
from skimage.transform import resize
tf.enable_eager_execution()
config = ConfigProto()
config.gpu_options.allow_growth = True
tf.keras.backend.set_session(tf.Session(config=config))
def scale_images(images, new_shape):
return np.asarray([resize(image, new_shape, 0) for image in images])
def calculate_fid(model, images1, images2):
f1, f2 = [model.predict(im) for im in (images1, images2)]
mean1, sigma1 = f1.mean(axis=0), np.cov(f1, rowvar=False)
mean2, sigma2 = f2.mean(axis=0), np.cov(f2, rowvar=False)
sum_sq_diff = np.sum((mean1 - mean2)**2)
cov_mean = scipy.linalg.sqrtm(sigma1.dot(sigma2))
if np.iscomplexobj(cov_mean):
cov_mean = cov_mean.real
fid = sum_sq_diff + np.trace(sigma1 + sigma2 - 2.0*cov_mean)
return fid
if __name__ == '__main__':
input_shape = (299, 299, 3)
inception = InceptionV3(include_top=False, pooling='avg', input_shape=input_shape)
(dataset, _), _ = keras.datasets.cifar10.load_data()
dataset = dataset[:100]
dataset = scale_images(dataset, input_shape)
noise = preprocess_input(np.clip(255*np.random.uniform(size=dataset.shape), 0, 255))
noise = scale_images(noise, input_shape)
print('FID:', calculate_fid(inception, dataset, noise))
So we're performing the following steps:
re-scale images to the shape expected by InceptionV3;
transform the images using inception_v3.preprocess_input;
pass both tensors through InceptionV3 network (without top layer);
use the formula from the original paper with the computed features as input parameters.
Here is an excerpt from the mentioned paper.
I would like my keras model to resize the input image using OpenCV or similar.
I have seen the use of ImageGenerator, but I would prefer to write my own generator and simply resize the image in the first layer with keras.layers.core.Lambda.
How would I do this?
If you are using tensorflow backend then you can use tf.image.resize_images() function to resize the images in Lambda layer.
Here is a small example to demonstrate the same:
import numpy as np
import scipy.ndimage
import matplotlib.pyplot as plt
from keras.layers import Lambda, Input
from keras.models import Model
from keras.backend import tf as ktf
# 3 channel images of arbitrary shape
inp = Input(shape=(None, None, 3))
try:
out = Lambda(lambda image: ktf.image.resize_images(image, (128, 128)))(inp)
except :
# if you have older version of tensorflow
out = Lambda(lambda image: ktf.image.resize_images(image, 128, 128))(inp)
model = Model(input=inp, output=out)
model.summary()
X = scipy.ndimage.imread('test.jpg')
out = model.predict(X[np.newaxis, ...])
fig, Axes = plt.subplots(nrows=1, ncols=2)
Axes[0].imshow(X)
Axes[1].imshow(np.int8(out[0,...]))
plt.show()