Tensorflow Prediction always zero - python

I am Tensorflow newbie. I have model generated using convNetKerasLarge.py and saved as tflite model.
I am trying to test this saved model as follows
import tensorflow as tf
import numpy as np
import glob
from skimage.transform import resize
from skimage import io
# out of previously used training and test set
start = 4001
# no of images
row_count = 1
end = start + row_count
n_image_rows = 106
n_image_cols = 106
np_val_images = np.zeros(shape=(1, 1))
np_val_labels = np.zeros(shape=(1, 1))
def prepare_validation_set():
global np_val_images
global np_val_labels
positive_samples = glob.glob('datasets/drunk_resize_frontal_faces/pos/*')[start:end]
# negative_samples = glob.glob('datasets/drunk_resize_frontal_faces/neg/*')[start:end]
# negative_samples = random.sample(negative_samples, len(positive_samples))
val_images = []
val_labels = []
for i in range(len(positive_samples)):
val_images.append(resize(io.imread(positive_samples[i]), (n_image_rows, n_image_cols)))
val_labels.append(1)
# for i in range(len(negative_samples)):
# val_images.append(resize(io.imread(negative_samples[i]), (n_image_rows, n_image_cols)))
# val_labels.append(0)
np_val_images = np.array(val_images)
np_val_labels = np.array(val_labels)
def run_tflite_model(tflite_file, index):
prepare_validation_set()
# Initialize the interpreter
interpreter = tf.lite.Interpreter(model_path=str(tflite_file))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()[0]
output_details = interpreter.get_output_details()[0]
test_image = np_val_images[index]
test_image = np.expand_dims(test_image, axis=0).astype(input_details["dtype"])
interpreter.set_tensor(input_details["index"], test_image)
interpreter.invoke()
output = interpreter.get_tensor(output_details["index"])[0]
print(output_details)
prediction = output.argmax()
print(prediction)
if __name__ == '__main__':
test_image_index = 1
tflite_model_file = "models/converted/model.tflite"
run_tflite_model(tflite_model_file, 0)
If I run this I am getting prediction as 0 even though label should be 1 since I am inputing a positive image. (FYI: Test loss: 0.08881912380456924 Test accuracy: 0.9729166626930237 with 10 epochs). I am confident that there a mistake in my code which causes this please help me find it.

The script you linked normalize the data before the training by subtracting the mean (here 0.5) and dividing by the standard deviation (here 1):
mean = np.array([0.5,0.5,0.5])
std = np.array([1,1,1])
X_train = X_train.astype('float')
X_test = X_test.astype('float')
for i in range(3):
X_train[:,:,:,i] = (X_train[:,:,:,i]- mean[i]) / std[i]
X_test[:,:,:,i] = (X_test[:,:,:,i]- mean[i]) / std[i]
If you don't repeat the same operations before doing a prediction with the model, the input you are passing to the model will not have the same characteristics as the that you trained with.
You could fix it by subtracting the mean (0.5) to the image when preparing the data, i.e:
np_val_images = np.array(val_images) - 0.5

Related

How to make the keras model.predict() faster on large datasets?

I am running the point cloud CNN by pointNet. I have it running well up until I try to predict on a very large dataset. This set has ~20k files. Now I know some may say multiprocessing, however I am a little newer and do not know how to do this with keras models. (I do have access to higher computing capabilities through a university) I also am wondering if there is a way to run this using GPU? Or even changing batch size. I am trying to create an end product where it predicts and I translate that to the string of the classes in the model. Does having more epochs above effect prediction time? Here is the portion of code where I am running this...
import os
import glob
from sklearn import metrics
import trimesh
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import pandas as pd
#### Create folder for results
def createFolder(directory): # creates a folder in the working directory to save all files produced into the folder.
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
createFolder('MODEL_OUTCOMES')
#####################################################
########## Loading in data set from folder ##########
#####################################################
DATA_DIR ="/home/sat164/pytorch/Data_Set_Spaghetti"
num_epocs = 1 # number of epocs
def parse_dataset(num_points=7000):
train_points = []
train_labels = []
test_points = []
test_labels = []
class_map = {}
real_names = {}
folders = glob.glob(DATA_DIR + "/*")
for i, folder in enumerate(folders):
# Lets you know it Has found the Folder
pathname = os.path.basename(folder)
print(f"processing class: {pathname}")
# store folder name with ID by getting base name of folder
class_map[i] = os.path.basename(folder)
# real_names[i] = folder.split("/")[-1] # if you want the actual names
# Iterate through the data set to find Train and Test files
train_files = glob.glob(os.path.join(folder, "Train/*"))
test_files = glob.glob(os.path.join(folder, "Test/*"))
# Train data
for f in train_files:
a = np.loadtxt(f, dtype = float)
train_points.append(a)
train_labels.append(i)
# Test data
for f in test_files:
b = np.loadtxt(f, dtype = float)
test_points.append(b)
test_labels.append(i)
return (np.array(train_points),
np.array(test_points),
np.array(train_labels),
np.array(test_labels),
class_map,
)
NUM_POINTS = 7000 # How many points to sample
# -- Each has 7000 lines of x,y,z
# so must be 7000 unless increased in CoordinateSaveMe.py
NUM_CLASSES = 2 # binary (GPCR or not)
BATCH_SIZE = 16 # Smaller Batch sizes are less memory and potentially better generalization
train_points, test_points, train_labels, test_labels, CLASS_MAP = parse_dataset(
NUM_POINTS
)
# Augmentation function to jitter and shuffle the train dataset.
def augment(points, label):
# jitter points
points += tf.random.uniform(points.shape, -0.005, 0.005, dtype=tf.float64)
# shuffle points
points = tf.random.shuffle(points)
return points, label
# set the shuffle buffer size to the entire size of the dataset
# as prior to this the data is ordered by class
train_dataset = tf.data.Dataset.from_tensor_slices((train_points, train_labels))
test_dataset = tf.data.Dataset.from_tensor_slices((test_points, test_labels))
train_dataset = train_dataset.shuffle(len(train_points)).map(augment).batch(BATCH_SIZE)
test_dataset = test_dataset.shuffle(len(test_points)).batch(BATCH_SIZE)
print("\n\n\n********** DATA SET COMPLETE **********\n\n\n")
#####################################
########## Build the Model ##########
#####################################
def conv_bn(x, filters):
x = layers.Conv1D(filters, kernel_size=1, padding="valid")(x)
x = layers.BatchNormalization(momentum=0.00)(x)
return layers.Activation("relu")(x)
def dense_bn(x, filters):
x = layers.Dense(filters)(x)
x = layers.BatchNormalization(momentum=0.00)(x)
return layers.Activation("relu")(x)
### Point net uses Two components ###
# The primary MLP network, and the transformer net (T-net).
# The T-net aims to learn an affine transformation matrix by its own mini network.
# The T-net is used twice.
# The first time to transform the input features (n, 3) into a canonical representation.
# The second is an affine transformation for alignment in feature space (n, 3).
# As per the original paper we constrain the transformation to be close to an orthogonal matrix
# (i.e. ||X*X^T - I|| = 0).
class OrthogonalRegularizer(keras.regularizers.Regularizer):
def __init__(self, num_features, l2reg=0.001):
self.num_features = num_features
self.l2reg = l2reg
self.eye = tf.eye(num_features)
def __call__(self, x):
x = tf.reshape(x, (-1, self.num_features, self.num_features))
xxt = tf.tensordot(x, x, axes=(2, 2))
xxt = tf.reshape(xxt, (-1, self.num_features, self.num_features))
return tf.reduce_sum(self.l2reg * tf.square(xxt - self.eye))
### We can define a general function to build T-net layers ###
def tnet(inputs, num_features):
# Initalise bias as the indentity matrix
bias = keras.initializers.Constant(np.eye(num_features).flatten())
reg = OrthogonalRegularizer(num_features)
x = conv_bn(inputs, 32*2)
x = conv_bn(x, 64*2)
x = conv_bn(x, 512*2)
x = layers.GlobalMaxPooling1D()(x)
x = dense_bn(x, 256*2)
x = dense_bn(x, 128*2)
x = layers.Dense(
num_features * num_features,
kernel_initializer="zeros",
bias_initializer=bias,
activity_regularizer=reg,
)(x)
feat_T = layers.Reshape((num_features, num_features))(x)
# Apply affine transformation to input features
return layers.Dot(axes=(2, 1))([inputs, feat_T])
inputs = keras.Input(shape=(NUM_POINTS, 3))
x = tnet(inputs, 3)
x = conv_bn(x, 32)
x = conv_bn(x, 32)
x = tnet(x, 32)
x = conv_bn(x, 32)
x = conv_bn(x, 64)
x = conv_bn(x, 512)
x = layers.GlobalMaxPooling1D()(x)
x = dense_bn(x, 256)
x = layers.Dropout(0.3)(x)
x = dense_bn(x, 128)
x = layers.Dropout(0.3)(x)
outputs = layers.Dense(NUM_CLASSES, activation="softmax")(x)
model = keras.Model(inputs=inputs, outputs=outputs, name="GpcrHunter")
model.summary()
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.Adam(learning_rate=0.001),
metrics=["sparse_categorical_accuracy"],)
history = model.fit(train_dataset, epochs=num_epocs, validation_data=test_dataset)
This is the Area I am struggling with.
import time
path = "/home/sat164/pytorch/predict_Whole/Test/spaghetti_whole_set"
folders = glob.glob(path + "/*.txt")
dataset_labels= []
testSet = []
for i in folders:
a = np.loadtxt(i, dtype = float)
testSet.append(a)
dataset_labels.append(os.path.basename(i))
testSet = np.array(testSet)
dataset_labels = np.array(dataset_labels)
test_dataset = tf.data.Dataset.from_tensor_slices((testSet, dataset_labels))
test_dataset = test_dataset.shuffle(len(testSet)).batch(10000)
t0= time.clock()
preds2 = model.predict(test_dataset)
preds2 = tf.math.argmax(preds2,-1)
prediction = []
for i in range(len(dataset_labels)):
prediction.append(CLASS_MAP[preds2[i].numpy()])
df = pd.DataFrame()
df["Prediction"] = prediction[0::1]
df["Real-label"] = dataset_labels[0::1]
df.to_excel('MODEL_OUTCOMES/Alpha_fold_results.xlsx', index = False)
t1 = time.clock() - t0
print("FINISHED.......Time elapsed: ", t1)
I have tried looking online for ways to do the prediction, it seems to work well with a small amount of files like 10-100 then it seems to really increase in time. It seems like it takes a long time to predict and I cannot figure out how to make the model multiprocess in a way. I have tried changing batch size but haven't noticed much difference, not sure if that even helps. I feel as though there is an easier way but I don’t fully understand how the models predict especially how to use just a frozen spot on the model that is good accuracy on the validation/training set.
THANKS

Tensorflow rotate with random uniform take 1 positional argument but 2 were given

I have the following code that uses tensorflow to calculate a custom average loss when the image is consistently rotated:
import tensorflow as tf
import cv2
#initialize x_hat
img = cv2.imread("4.jpg")
x_hat = tf.Variable(img,name = 'x_hat') #img we want to attack
#tf.function
def cost2():
image=x_hat
#Now it will generate 100 samples rotated
num_samples = 100
average_loss = 0
for j in range(num_samples):
#ADD ROTATION (there may be a problem here)
rotated = tf.keras.preprocessing.image.random_rotation(image,
tf.random.uniform(shape=(),minval=40, maxval=90),channel_axis=2)
#get logits
rotated_logits, _ = resnet(rotated)
#get average CUSTOM loss
average_loss+=-1 * tf.nn.softmax_cross_entropy_with_logits(logits=rotated_logits, labels=labels)/ num_samples
return average_loss
and here is how I call it
learning_rate = 1e-1
optim = tf.optimizers.SGD (learning_rate=learning_rate)
epsilon = 2.0/255.0 # a really small perturbation
below = x - epsilon
above = x + epsilon
demo_steps = 200
# projected gradient descent
for i in range(demo_steps):
loss = optim.minimize(cost2, var_list=[x_hat])
if (i+1) % 10 == 0:
print('step %d, loss=%g' % (i+1, loss.numpy()))
projected = tf.clip_by_value(tf.clip_by_value(x_hat, below, above), 0, 1)
with tf.control_dependencies([projected]):
x_hat.assign(projected)
adv_robust = x_hat.numpy()
However, the following error returns to me once I run the code:
TypeError: in user code:
<ipython-input-183-abde02909da7>:14 cost2 *
rotated = tf.keras.preprocessing.image.random_rotation(image,
tf.random.uniform(shape=(),minval=40, maxval=90),channel_axis=2)
/home/me/.local/lib/python3.8/site-
packages/keras_preprocessing/image/affine_transformations.py:55 random_rotation *
theta = np.random.uniform(-rg, rg)
mtrand.pyx:1111 numpy.random.mtrand.RandomState.uniform **
TypeError: __array__() takes 1 positional argument but 2 were given
I am on Tensorflow 2.4.0 and the random_rotation and random.uniform functions are correct according to the TF 2.4.0 documentation HERE and HERE. So, what am I missing here?
The error might be coming from using TF tensors. As stated in the docs you linked regarding random_rotation:
Performs a random rotation of a Numpy image tensor.
Meaning you cannot use TF tensors with this operation. If you are in eager execution mode you can use tensor.numpy():
import tensorflow as tf
image = tf.random.normal((180, 180, 3))
rotated = tf.keras.preprocessing.image.random_rotation(image.numpy(),
tf.random.uniform(shape=(),minval=40, maxval=90).numpy(),channel_axis=2)
Otherwise, it is recommended to use the preprocessing layer: tf.keras.layers.RandomRotation, since using numpy in graph mode (for example in a function decorated with #tf.function) is not recommended.
Here is an example using the tf.keras.layers.RandomRotation:
import tensorflow as tf
import os
import matplotlib.pyplot as plt
_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True)
PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
BATCH_SIZE = 1
IMG_SIZE = (160, 160)
train_ds = tf.keras.utils.image_dataset_from_directory(train_dir,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE)
data_augmentation = tf.keras.Sequential([
tf.keras.layers.RandomRotation(tf.random.uniform(shape=(),minval=40, maxval=90)),
])
for image, _ in train_ds.take(1):
plt.figure(figsize=(10, 10))
first_image = image[0]
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
augmented_image = data_augmentation(tf.expand_dims(first_image, 0), training=True)
plt.imshow(augmented_image[0] / 255)
plt.axis('off')

How to convert numpy array to image dataset?

Here I used PIL Library to load image and it's a single data not for image dataset and convert numpy array using numpy Library. It's perfectly working for single image data.
Now, I want to convert into numpy array from image dataset. where will be presented training, testing and validation data.
below I share the code for converting single image data to numpy array.
Imported needed library
from PIL import Image
from numpy import asarray
load the image
image = Image.open('flower/1.jpg')
convert image to numpy array
data = asarray(image)
#data is array format of image
If you just want to convert the numpy array back to image then following code snipped should work. If you want replicate the process for entire dataset then you need to call it on every single image. How you do it would depend on the model you're trying to build(image classification, object detection etc) and what you're using to build it(tensorflow, theano, etc)
Solution 1
from PIL import Image
from numpy import asarray
image = Image.open('flower/1.jpg')
data = asarray(image)
img_w, img_h = 200, 200
img = Image.fromarray(data, 'RGB')
img.save('test.png')
img.show()
Since you're working on an image classification problem. The following code could serve you well. Customize it as per your problem. I've commented in the code where you need to make the changes.
Solution 2
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import os
import numpy as np
import pandas as pd
import cv2
from glob import glob
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.applications import MobileNetV2 #Change Here: Select the classification architecture you need
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.optimizers import Adam
from sklearn.model_selection import train_test_split
def build_model(size, num_classes):
inputs = Input((size, size, 3))
backbone = MobileNetV2(input_tensor=inputs, include_top=False, weights="imagenet") #Change Here: Select the classification architecture you need
backbone.trainable = True
x = backbone.output
x = GlobalAveragePooling2D()(x)
x = Dropout(0.2)(x) #Chage Here: Try different droput values b/w .2 to .8
x = Dense(1024, activation="relu")(x)
x = Dense(num_classes, activation="softmax")(x)
model = tf.keras.Model(inputs, x)
return model
def read_image(path, size):
image = cv2.imread(path, cv2.IMREAD_COLOR)
train_datagen = ImageDataGenerator(rescale=1./255,
rotation_range=30, #Change Here: Select any rotation range b/w 10 to 90
zoom_range = 0.3,
width_shift_range=0.2, #Change Here: Select width shift as per your images. My advice- try b/w .2 to .5
height_shift_range=0.2, #Change Here: Select height shift as per your images., My advice try b/w .2 to .5
horizontal_flip = 'true')
image = train_datagen.flow_from_directory(path, shuffle=False, batch_size=10, seed=10) #Change Here: Select batch_size as per your need
image = cv2.resize(image, (size, size))
image = image / 255.0
image = image.astype(np.float32)
return image
def parse_data(x, y):
x = x.decode()
num_class = 120 #Change Here: num_class should be equal to types of blood cells you have in your dataset i.e. number of labels
size = 224 #Change Here: Select size as per your chosen model architecture
image = read_image(x, size)
label = [0] * num_class
label[y] = 1
label = np.array(label)
label = label.astype(np.int32)
return image, label
def tf_parse(x, y):
x, y = tf.numpy_function(parse_data, [x, y], [tf.float32, tf.int32])
x.set_shape((224, 224, 3))
y.set_shape((120))
return x, y
def tf_dataset(x, y, batch=8): #Change Here: Choose default batch size as per your needs
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.map(tf_parse)
dataset = dataset.batch(batch)
dataset = dataset.repeat()
return dataset
if __name__ == "__main__":
path = "/content/gdrive/My Drive/Dog Breed Classification/" #Change Here: Give path to your parent directory
train_path = os.path.join(path, "train/*")
test_path = os.path.join(path, "test/*")
labels_path = os.path.join(path, "labels.csv") #Change Here: Give name of your csv file
labels_df = pd.read_csv(labels_path)
breed = labels_df["breed"].unique() #Change Here: replace breed with the column name, denoting class, in your csv file
print("Number of Breed: ", len(breed))
breed2id = {name: i for i, name in enumerate(breed)} #Change Here: replace breed & id with the column names denoting class & image file in your csv file
#repeat the same every place where breed or id is mentioned
ids = glob(train_path)
labels = []
for image_id in ids:
# print(image_id,"\n\n\n")
image_id = image_id.split("/")[-1]
breed_name = list(labels_df[labels_df.id == image_id]["breed"])[0]
breed_idx = breed2id[breed_name]
labels.append(breed_idx)
## Spliting the dataset
train_x, valid_x = train_test_split(ids, test_size=0.2, random_state=42) #Change Here: select test size as per your need. My advice go between .2 to .3
train_y, valid_y = train_test_split(labels, test_size=0.2, random_state=42)
## Parameters
size = 224 #Change Here: Select size as per your chosen model architecture
num_classes = 120 #Change Here: num_class should be equal to types of blood cells you have in your dataset i.e. number of labels
lr = 1e-4 #Change Here: Select as per you need. My advice chose any where b/w 1e-4 to 1e-2
batch = 16 #Change Here: Select as per your need
epochs = 50 #Change Here: Select as per your need
## Model
model = build_model(size, num_classes)
model.compile(loss="categorical_crossentropy", optimizer=Adam(lr), metrics=["acc"])
# model.summary()
## Dataset
train_dataset = tf_dataset(train_x, train_y, batch=batch)
valid_dataset = tf_dataset(valid_x, valid_y, batch=batch)
## Training
callbacks = [
ModelCheckpoint("/content/gdrive/My Drive/Dog Breed Classification/Model/model-1-{epoch:02d}.h5", #Change Here :Give the path where you want to store your model
verbose=1, save_best_only=True),
ReduceLROnPlateau(factor=0.1, patience=5, min_lr=1e-6)] #Change Here: Set factor, patience, min_lr as per your need. My advice leave as it is and then change to see if model performance improves.
train_steps = (len(train_x)//batch) + 1
valid_steps = (len(valid_x)//batch) + 1
model.fit(train_dataset,
steps_per_epoch=train_steps,
validation_steps=valid_steps,
validation_data=valid_dataset,
epochs=epochs,
callbacks=callbacks)

tensorflow kmeans doesn't seem to take new initial points

I'm finding the best cluster set in my data by getting a result which has the lowest average distance from many k means trials on Tensorflow.
But my code doesn't update initial centroids in each trial so all results are same.
Here's my code1 - tensor_kmeans.py
import numpy as np
import pandas as pd
import random
import tensorflow as tf
from tensorflow.contrib.factorization import KMeans
from sklearn import metrics
import imp
import pickle
# load as DataFrame
pkl = 'fasttext_words_k.pkl'
with open(pkl, 'rb') as f:
unique_words_in_fasttext = pickle.load(f).T
vector =[]
for i in range(len(unique_words_in_fasttext)):
vector.append(list(unique_words_in_fasttext.iloc[i,:]))
vector = [np.array(f) for f in vector ]
# Import data
full_data_x = vector
# Parameters
num_steps = 100 # Total steps to train
batch_size = 1024 # The number of samples per batch
n_clusters = 1300 # The number of clusters
num_classes = 100 # The 10 digits
num_rows = 13074
num_features = 300 # Each image is 28x28 pixels
### tensor kmeans ###
# Input images
X = tf.placeholder(tf.float32, shape=[None , num_features])
# Labels (for assigning a label to a centroid and testing)
# Y = tf.placeholder(tf.float32, shape=[None, num_classes])
# K-Means Parameters
kmeans = KMeans(inputs=X, num_clusters=n_clusters, distance_metric='cosine',
use_mini_batch=True, initial_clusters="random")
# Build KMeans graph
training_graph = kmeans.training_graph()
if len(training_graph) > 6: # Tensorflow 1.4+
(all_scores, cluster_idx, scores, cluster_centers_initialized,
cluster_centers_var, init_op, train_op) = training_graph
else:
(all_scores, cluster_idx, scores, cluster_centers_initialized,
init_op, train_op) = training_graph
cluster_idx = cluster_idx[0] # fix for cluster_idx being a tuple
avg_distance = tf.reduce_mean(scores)
# Initialize the variables (i.e. assign their default value)
init_vars = tf.global_variables_initializer()
# Start TensorFlow session
sess = tf.Session()
# Run the initializer
sess.run(init_vars, feed_dict={X: full_data_x})
sess.run(init_op, feed_dict={X: full_data_x})
# Training
for i in range(1, num_steps + 1):
_, d, idx = sess.run([train_op, avg_distance, cluster_idx],
feed_dict={X: full_data_x})
if i % 10 == 0 or i == 1:
print("Step %i, Avg Distance: %f" % (i, d))
labels = list(range(num_rows))
# Assign a label to each centroid
# Count total number of labels per centroid, using the label of each training
# sample to their closest centroid (given by 'idx')
counts = np.zeros(shape=(n_clusters, num_classes))
for i in range(len(idx)):
counts[idx[i]] += labels[i]
# Assign the most frequent label to the centroid
labels_map = [np.argmax(c) for c in counts]
labels_map = tf.convert_to_tensor(labels_map)
# Evaluation ops
# Lookup: centroid_id -> label
cluster_label = tf.nn.embedding_lookup(labels_map, cluster_idx)
# assign variables
cluster_list_k = idx
and here's a code outside the code1.
k_li=[]
rotation = 50
best_labels = []
best_k = -1
for i in range(rotation):
import tensor_kmeans
k_li.append(tensor_kmeans.k)
if len(k_li) > 0:
for i in range(len(k_li)):
if k_li[i] > best_k:
best_labels = tensor_kmeans.cluster_list_k
best_k = k_li[i]
tensor_kmeans = imp.reload(tensor_kmeans)
Where can I find the problem?
I'm waiting your answer, thank you.
Each time you call KMeans() you should use a new random_seed, i.e.
kmeans = KMeans(inputs=X, num_clusters=n_clusters, distance_metric='cosine',
use_mini_batch=True, initial_clusters="random", random_seed=SOME_NEW_VALUE)
Otherwise the function KMeans() will assume random_seed=0, so that the results are reproducible (i.e. the results are always the same).
A simple way to resolve your issue would be to make a function out of code1 - tensor_kmeans.py, then calling this function with a new random_seed (as input parameter) for each trial.

How to feed Cifar10 trained model with my own image and get label as output?

I am trying to use the trained model based on the Cifar10 tutorial and would like to feed
it with an external image 32x32 (jpg or png).
My goal is to be able to get the label as an output.
In other words, I want to feed the Network with a single jpeg image of size 32 x 32, 3 channels with no label as an input and have the inference process give me the tf.argmax(logits, 1).
Basically I would like to be able to use the trained cifar10 model on an external image and see what class it will spit out.
I have been trying to do that based on the Cifar10 Tutorial and unfortunately always have issues. especially with the Session concept and the batch concept.
Any help doing that with Cifar10 would be greatly appreciated.
Here is the implemented code so far with compilation issues :
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import time
import tensorflow.python.platform
from tensorflow.python.platform import gfile
import numpy as np
import tensorflow as tf
import cifar10
import cifar10_input
import os
import faultnet_flags
from PIL import Image
FLAGS = tf.app.flags.FLAGS
def evaluate():
filename_queue = tf.train.string_input_producer(['/home/tensor/.../inputImage.jpg'])
reader = tf.WholeFileReader()
key, value = reader.read(filename_queue)
input_img = tf.image.decode_jpeg(value)
init_op = tf.initialize_all_variables()
# Problem in here with Graph / session
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(1):
image = input_img.eval()
print(image.shape)
Image.fromarray(np.asarray(image)).show()
# Problem in here is that I have only one image as input and have no label and would like to have
# it compatible with the Cifar10 network
reshaped_image = tf.cast(image, tf.float32)
height = FLAGS.resized_image_size
width = FLAGS.resized_image_size
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image, width, height)
float_image = tf.image.per_image_whitening(resized_image) # reshaped_image
num_preprocess_threads = 1
images = tf.train.batch(
[float_image],
batch_size=128,
num_threads=num_preprocess_threads,
capacity=128)
coord.request_stop()
coord.join(threads)
logits = faultnet.inference(images)
# Calculate predictions.
#top_k_predict_op = tf.argmax(logits, 1)
# print('Current image is: ')
# print(top_k_predict_op[0])
# this does not work since there is a problem with the session
# and the Graph conflicting
my_classification = sess.run(tf.argmax(logits, 1))
print ('Predicted ', my_classification[0], " for your input image.")
def main(argv=None):
evaluate()
if __name__ == '__main__':
tf.app.run() '''
Some basics first:
First you define your graph: image queue, image preprocessing, inference of the convnet, top-k accuracy
Then you create a tf.Session() and work inside it: starting the queue runners, and calls to sess.run()
Here is what your code should look like
# 1. GRAPH CREATION
filename_queue = tf.train.string_input_producer(['/home/tensor/.../inputImage.jpg'])
... # NO CREATION of a tf.Session here
float_image = ...
images = tf.expand_dims(float_image, 0) # create a fake batch of images (batch_size=1)
logits = faultnet.inference(images)
_, top_k_pred = tf.nn.top_k(logits, k=5)
# 2. TENSORFLOW SESSION
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
top_indices = sess.run([top_k_pred])
print ("Predicted ", top_indices[0], " for your input image.")
EDIT:
As #mrry suggests, if you only need to work on a single image, you can remove the queue runners:
# 1. GRAPH CREATION
input_img = tf.image.decode_jpeg(tf.read_file("/home/.../your_image.jpg"), channels=3)
reshaped_image = tf.image.resize_image_with_crop_or_pad(tf.cast(input_img, width, height), tf.float32)
float_image = tf.image.per_image_withening(reshaped_image)
images = tf.expand_dims(float_image, 0) # create a fake batch of images (batch_size = 1)
logits = faultnet.inference(images)
_, top_k_pred = tf.nn.top_k(logits, k=5)
# 2. TENSORFLOW SESSION
with tf.Session() as sess:
sess.run(init_op)
top_indices = sess.run([top_k_pred])
print ("Predicted ", top_indices[0], " for your input image.")
The original source code in cifar10_eval.py can also be used for testing own individual images as it is shown in the following console output
nbatfai#robopsy:~/Robopsychology/repos/gpu/tensorflow/tensorflow/models/image/cifar10$ python cifar10_eval.py --run_once True 2>/dev/null
[ -0.63916457 -3.31066918 2.32452989 1.51062226 15.55279636
-0.91585422 1.26451302 -4.11891603 -7.62230825 -4.29096413]
deer
nbatfai#robopsy:~/Robopsychology/repos/gpu/tensorflow/tensorflow/models/image/cifar10$ python cifar2bin.py matchbox.png input.bin
nbatfai#robopsy:~/Robopsychology/repos/gpu/tensorflow/tensorflow/models/image/cifar10$ python cifar10_eval.py --run_once True 2>/dev/null
[ -1.30562115 12.61497402 -1.34208572 -1.3238833 -6.13368177
-1.17441642 -1.38651907 -4.3274951 2.05489922 2.54187846]
automobile
nbatfai#robopsy:~/Robopsychology/repos/gpu/tensorflow/tensorflow/models/image/cifar10$
and code snippet
#while step < num_iter and not coord.should_stop():
# predictions = sess.run([top_k_op])
print(sess.run(logits[0]))
classification = sess.run(tf.argmalogits[0], 0))
cifar10classes = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
print(cifar10classes[classification])
#true_count += np.sum(predictions)
step += 1
# Compute precision # 1.
precision = true_count / total_sample_count
# print('%s: precision # 1 = %.3f' % (datetime.now(), precision))
More details can be found in the post How can I test own image to Cifar-10 tutorial on Tensorflow?

Categories

Resources