Broadcasting a keras model with pyspark [duplicate] - python

I am using Caffe to do image classification, can I am using MAC OS X, Pyhton.
Right now I know how to classify a list of images using Caffe with Spark python, but if I want to make it faster, I want to use Spark.
Therefore, I tried to apply the image classification on each element of an RDD, the RDD created from a list of image_path. However, Spark does not allow me to do so.
Here is my code:
This is the code for image classification:
# display image name, class number, predicted label
def classify_image(image_path, transformer, net):
image = caffe.io.load_image(image_path)
transformed_image = transformer.preprocess('data', image)
net.blobs['data'].data[...] = transformed_image
output = net.forward()
output_prob = output['prob'][0]
pred = output_prob.argmax()
labels_file = caffe_root + 'data/ilsvrc12/synset_words.txt'
labels = np.loadtxt(labels_file, str, delimiter='\t')
lb = labels[pred]
image_name = image_path.split(images_folder_path)[1]
result_str = 'image: '+image_name+' prediction: '+str(pred)+' label: '+lb
return result_str
This this the code generates Caffe parameters and apply the classify_image method on each element of the RDD:
def main():
sys.path.insert(0, caffe_root + 'python')
caffe.set_mode_cpu()
model_def = caffe_root + 'models/bvlc_reference_caffenet/deploy.prototxt'
model_weights = caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'
net = caffe.Net(model_def,
model_weights,
caffe.TEST)
mu = np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy')
mu = mu.mean(1).mean(1)
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1))
transformer.set_mean('data', mu)
transformer.set_raw_scale('data', 255)
transformer.set_channel_swap('data', (2,1,0))
net.blobs['data'].reshape(50,
3,
227, 227)
image_list= []
for image_path in glob.glob(images_folder_path+'*.jpg'):
image_list.append(image_path)
images_rdd = sc.parallelize(image_list)
transformer_bc = sc.broadcast(transformer)
net_bc = sc.broadcast(net)
image_predictions = images_rdd.map(lambda image_path: classify_image(image_path, transformer_bc, net_bc))
print image_predictions
if __name__ == '__main__':
main()
As you can see, here I tried to broadcast the caffe parameters, transformer_bc = sc.broadcast(transformer), net_bc = sc.broadcast(net)
The error is:
RuntimeError: Pickling of "caffe._caffe.Net" instances is not enabled
Before I am doing the broadcast, the error was :
Driver stacktrace.... Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):....
So, do you know, is there any way I can classify images using Caffe and Spark but also take advantage of Spark?

When you work with complex, non-native objects initialization has to moved directly to the workers for example with singleton module:
net_builder.py:
import cafe
net = None
def build_net(*args, **kwargs):
... # Initialize net here
return net
def get_net(*args, **kwargs):
global net
if net is None:
net = build_net(*args, **kwargs)
return net
main.py:
import net_builder
sc.addPyFile("net_builder.py")
def classify_image(image_path, transformer, *args, **kwargs):
net = net_builder.get_net(*args, **kwargs)
It means you'll have to distribute all required files as well. It can be done either manually or using SparkFiles mechanism.
On a side note you should take a look at the SparkNet package.

Related

Creating your own dataset in tensorflow

I am faced with the task of classifying sound by spectrograms. I have a solution to this problem in one way (I will convert all audio recordings into spectrograms -> save them as pictures and train a neural network for this), but I want to go the simpler way, that is, not save pictures, but immediately convert audio files into tensors, but there is a problem, I can't find any useful information on how to create my data set from tensors in TensorFlow. I will give an example of such code on Pytorch.
class SoundDataset(Dataset):
def __init__(self, file_names, labels):
self.file_names = file_names
self.labels = labels
def __getitem__(self,index):
#format the file path and load the file
path = self.file_names[index]
scale, sr = librosa.load(path)
filter_banks = librosa.filters.mel(n_fft=2048, sr=22050, n_mels=10)
mel_spectrogram = librosa.feature.melspectrogram(scale, sr=sr, n_fft=2048, hop_length=512, n_mels=32)
log_mel_spectrogram = librosa.power_to_db(mel_spectrogram)
trch = torch.from_numpy(log_mel_spectrogram)
if log_mel_spectrogram.shape !=(10,87):
delta = 87 - log_mel_spectrogram.shape[1]
trch = torch.nn.functional.pad(trch, (0,delta))
return trch,self.labels[index]
def __len__(self):
return len(self.file_names)
Here a class is being created that takes paths to audio recordings and converts them into tensors, and will pad zeros if the tensors do not fit the shapes. How can I create the same class for TensorFlow. Next is an example of code that creates tuples with file paths and their class and creates an object of the Sound Data set class and generates a dataset from these files accordingly. All this is written for Pytorch. Tell me how it can be implemented for TensorFlow.
path = '/content/drive/MyDrive/МДМА/audiodata/for-rerecorded/training/'
files = []
labels = []
lbl = '1 0'.split()
for lab in lbl:
if lab == '0':
c = 'fake'
else:
c ='real'
names = os.listdir(path+c)
for n in names:
pth = path+c+'/'+n
files.append(pth)
labels.append(int(lab))
train_dataset = SoundDataset(files, labels)
train_loader = torch.utils.data.DataLoader(train_dataset,batch_size = 20)
If you read the documentation there are code patterns.
This is not tested but if you load the index from another data structure which has mapped the files to the indexes then this code can help.
import librosa
import pathlib
import tensorflow as tf
DATASET_PATH = 'data/mini_speech_commands'
data_dir = pathlib.Path(DATASET_PATH)
if not data_dir.exists():
tf.keras.utils.get_file(
'mini_speech_commands.zip',
origin="http://storage.googleapis.com/download.tensorflow.org/data/mini_speech_commands.zip",
extract=True,
cache_dir='.', cache_subdir='data')
def load_audio(filename):
scale, sr = librosa.load(filename)
mel_spectrogram = librosa.feature.melspectrogram(scale, sr=sr, n_fft=2048, hop_length=512, n_mels=32)
log_mel_spectrogram = librosa.power_to_db(mel_spectrogram)
spectrogram_numpy = log_mel_spectrogram.numpy()
if log_mel_spectrogram.shape !=(10,87):
delta = 87 - log_mel_spectrogram.shape[1]
spectrogram_numpy = tf.pad(spectrogram_numpy, (0,delta))
return spectrogram_numpy #return index
read_audio = lambda x: tf.py_function(load_audio,
[x],
tf.float64)
filenames = tf.io.gfile.glob(str(data_dir) + '/*/*')
files_ds = tf.data.Dataset.from_tensor_slices(filenames)
waveform_ds = files_ds.map(
map_func=read_audio)
The code to pad is converted using TensorFlow directly and you have to test it.
Update : Another way using keras.utils.Sequence is shown in this thread

UnimplementedError: File system scheme '[local]' not implemented

I am getting an error while implementing TensorFlow in TPU
UnimplementedError: File system scheme '[local]' not implemented (file: '1.png')
I know this question has been answered before but my issue is different,
I am getting this error when I do
for i, j in train_dataset.take(3):
print(i,j)
It works with train_dataset.take(3)
Here are my functions
def decode(img,image_size=(IMG_SIZE, IMG_SIZE)):
bits = tf.io.read_file(img)
image = tf.image.decode_jpeg(bits, channels=3)
image = tf.cast(image, tf.float32) / 255.0
image = tf.image.resize(image, image_size)
image = tf.image.random_flip_left_right(image, seed=2020)
image = tf.image.random_flip_up_down(image, seed=2020)
image = tf.image.random_crop(image,size=[IMG_SIZE,IMG_SIZE,3],seed=2020 )
image = tf.image.random_brightness(image,max_delta=0.5 )
image = tf.image.rot90(image)
return image
def decode_image(img,labels=None ):
if labels is None:
return decode(img)
else:
return decode(img),labels
train_image=tf.data.Dataset.from_tensor_slices((train.iloc[:,0],train.iloc[:,1::] ))
train_dataset=train_image.map(decode_image, num_parallel_calls=AUTO).repeat().shuffle(512).batch(BATCH_SIZE).prefetch(AUTO)
test_image=tf.data.Dataset.from_tensor_slices((test.iloc[:,0]))
test_dataset=test_image.map(decode_image, num_parallel_calls=AUTO).batch(BATCH_SIZE)
How should I resolve it?
It might an issue with the path. SO I am adding how is set path
This is how the directory looks like
weights
images
-train
--train
---train
----img1
----img2
---csv
-val
--val
---img1
When I run
GCS_DS_PATH = KaggleDatasets().get_gcs_path('images')
!gsutil ls $GCS_DS_PATH
I got following
gs://kds-aab923e1c9bc934f088881f1e537365b8f18fe192b3b3dc14e272a37/train/
gs://kds-aab923e1c9bc934f088881f1e537365b8f18fe192b3b3dc14e272a37/val/
This is how my paths are set
def train_format_path(st):
return GCS_DS_PATH + '/train/train/train/' + st
def test_format_path(st):
return GCS_DS_PATH + '/val/val/' + st
train_paths = train.ID.apply(train_format_path).values
test_paths = test.ID.apply(test_format_path).values
With train_paths[0]
I got
'gs://kds-aab923e1c9bc934f088881f1e537365b8f18fe192b3b3dc14e272a37/train/train/train/1.png'
As suggested by #Allen Wang, the solution is to use train_paths instead of train to pass images.
This is what I have changes to make it work
train_image=tf.data.Dataset.from_tensor_slices((train_paths,train.iloc[:,1::] ))
train_dataset=train_image.map(decode_image, num_parallel_calls=AUTO).repeat().shuffle(512).batch(BATCH_SIZE).prefetch(AUTO)
test_image=tf.data.Dataset.from_tensor_slices((test_paths))
test_dataset=test_image.map(decode_image, num_parallel_calls=AUTO).batch(BATCH_SIZE)

Prediction failed: Error when checking input: expected dense_input to have shape (2898,) but got array with shape (1,)

I am using the following script predictor.py in order to get predictions from a Keras model hosted in GCP AI Platform.
import os
import pickle
import tensorflow as tf
import numpy as np
import logging
class MyPredictor(object):
def __init__(self, model, bow_model):
self._model = model
self._bow_model = bow_model
def predict(self, instances, **kwargs):
vectors = self.embedding([instances])
vectors = vectors.tolist()
output = self._model.predict(vectors)
return output
def embedding(self, statement):
vector = self._bow_model.transform(statement).toarray()
#vector = vector.to_list()
return vector
#classmethod
def from_path(cls, model_dir):
model_path = os.path.join(model_dir, 'model.h5')
model = tf.keras.models.load_model(model_path, compile = False)
preprocessor_path = os.path.join(model_dir, 'bow.pkl')
with open(preprocessor_path, 'rb') as f:
bow_model = pickle.load(f)
return cls(model, bow_model)
However i get
Prediction failed: Error when checking input: expected dense_input to have shape (2898,) but got array with shape (1,)
The problem seems to be due to the dimensions of my input data when trying to do the actual predictions, in line output = self._model.predict([vectors]). The model is expecting a vector of shape (2898, )
I am finding this quite odd... since when I print the shape and dimensions of the vector I get the following
This is the shape
(1, 2898)
This is the dim number
2
This is the vector
[[0 0 0 ... 0 0 0]]
So the dimensions and the shape is fine and it should really be working....
Furthermore, I did a test to get the predictions of the model stored locally and it works fine. This is the test file:
import os
import pickle
import tensorflow as tf
import numpy as np
class MyPredictor(object):
def __init__(self, model, bow_model):
self._model = model
self._bow_model = bow_model
def predict(self, instances, **kwargs):
print("These are the instances ", instances)
vector = self.embedding([instances])
output = self._model.predict(vector)
return output
def embedding(self, statement):
vector = self._bow_model.transform(statement).toarray()
#vector = vector.to_list()
return vector
model_path = 'model.h5'
model = tf.keras.models.load_model(model_path, compile = False)
preprocessor_path = 'bow.pkl'
with open(preprocessor_path, 'rb') as f:
bow_model = pickle.load(f)
instances = 'test'
predictor = MyPredictor(model, bow_model)
outputs = predictor.predict(instances)
print(outputs)
Solved it!
It was as silly as adding a set of parenthesis to this line output = self._model.predict([vectors])
After that I got another error regarding the output of the prediction not being json serializable. This I solved simply by adding .tolist() to the return return output.to_list()
import os
import pickle
import tensorflow as tf
import numpy as np
import logging
class MyPredictor(object):
def __init__(self, model, bow_model):
self._model = model
self._bow_model = bow_model
def predict(self, instances, **kwargs):
vectors = self.embedding([instances])
vectors = vectors.tolist()
output = self._model.predict([vectors])
return output.to_list()
def embedding(self, statement):
vector = self._bow_model.transform(statement).toarray()
#vector = vector.to_list()
return vector
#classmethod
def from_path(cls, model_dir):
model_path = os.path.join(model_dir, 'model.h5')
model = tf.keras.models.load_model(model_path, compile = False)
preprocessor_path = os.path.join(model_dir, 'bow.pkl')
with open(preprocessor_path, 'rb') as f:
bow_model = pickle.load(f)
return cls(model, bow_model)

Tensorflow: model wrapper that can release GPU resources

Here is a wrapper for tensorflow .pb frozen model (imagenet classification):
import tensorflow as tf
import numpy as np
import cv2
from numba import cuda
class ModelWrapper():
def __init__(self, model_filepath):
self.graph_def = self.load_graph_def(model_filepath)
self.graph = self.load_graph(self.graph_def)
self.set_inputs_and_outputs()
self.sess = tf.Session(graph=self.graph)
print(self.__class__.__name__, 'call __init__') #
def load_graph_def(self, model_filepath):
# Expects frozen graph in .pb format
with tf.gfile.GFile(model_filepath, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
def load_graph(self, graph_def):
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name="")
return graph
def set_inputs_and_outputs(self):
input_list = []
for op in self.graph.get_operations(): # tensorflow.python.framework.ops.Operation
if op.type == "Placeholder":
input_list.append(op.name)
print('Inputs:', input_list)
all_name_list = []
input_name_list = []
for node in self.graph_def.node: # tensorflow.core.framework.node_def_pb2.NodeDef
all_name_list.append(node.name)
input_name_list.extend(node.input)
output_list = list(set(all_name_list) - set(input_name_list))
print('Outputs:', output_list)
self.inputs = []
self.input_tensor_names = [name + ":0" for name in input_list]
for input_tensor_name in self.input_tensor_names:
self.inputs.append(self.graph.get_tensor_by_name(input_tensor_name))
self.outputs = []
self.output_tensor_names = [name + ":0" for name in output_list]
for output_tensor_name in self.output_tensor_names:
self.outputs.append(self.graph.get_tensor_by_name(output_tensor_name))
input_dim_list = []
for op in self.graph.get_operations(): # tensorflow.python.framework.ops.Operation
if op.type == "Placeholder":
bs = op.get_attr('shape').dim[0].size
h = op.get_attr('shape').dim[1].size
w = op.get_attr('shape').dim[2].size
c = op.get_attr('shape').dim[3].size
input_dim_list.append([bs, h, w ,c])
assert len(input_dim_list) == 1
_, self.input_img_h, self.input_img_w, _ = input_dim_list[0]
def predict(self, img):
h, w, c = img.shape
if h != self.input_img_h or w != self.input_img_w:
img = cv2.resize(img, (self.input_img_w, self.input_img_h))
batch = img[np.newaxis, ...]
feed_dict = {self.inputs[0]: batch}
outputs = self.sess.run(self.outputs, feed_dict=feed_dict) # (1, 1001)
output = outputs[0]
return output
def __del__(self):
print(self.__class__.__name__, 'call __del__') #
import time #
time.sleep(3) #
cuda.close()
What I'm trying to do is to clean up GPU memory after I don't need model anymore, in this example I just create and delete model in the loop, but in real life it can be several different models.
wget https://storage.googleapis.com/download.tensorflow.org/models/inception_v3_2016_08_28_frozen.pb.tar.gz
tar -xvzf inception_v3_2016_08_28_frozen.pb.tar.gz
rm -f imagenet_slim_labels.txt
rm -f inception_v3_2016_08_28_frozen.pb.tar.gz
import os
import time
import tensorflow as tf
import numpy as np
from model_wrapper import ModelWrapper
MODEL_FILEPATH = './inception_v3_2016_08_28_frozen.pb'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
def create_and_delete_in_loop():
for i in range(10):
print('-'*60)
print('i:', i)
model = ModelWrapper(MODEL_FILEPATH)
input_batch = np.zeros((model.input_img_h, model.input_img_w, 3), np.uint8)
y_pred = model.predict(input_batch)
print('y_pred.shape', y_pred.shape)
print('np.argmax(y_pred)', np.argmax(y_pred))
del model
if __name__ == "__main__":
create_and_delete_in_loop()
print('START WAITING')
time.sleep(10)
print('END OF THE PROGRAM!')
Output:
------------------------------------------------------------
i: 0
Inputs: ['input']
Outputs: ['InceptionV3/Predictions/Reshape_1']
ModelWrapper call __init__
y_pred.shape (1, 1001)
np.argmax(y_pred) 112
ModelWrapper call __del__
------------------------------------------------------------
i: 1
Inputs: ['input']
Outputs: ['InceptionV3/Predictions/Reshape_1']
ModelWrapper call __init__
Segmentation fault (core dumped)
What is the proper way of releasing GPU memory?
TL;DR Run your function as a new process+ .
tf.reset_default_graph() is not guaranteed to release memory#. When a process dies, all the memory it was given (including your GPU Memory) will be released. Not only does this help keep things neatly organized, but also, you can analyze how much CPU, GPU, RAM, GPU Memory each process consumes.
For example, if you had these functions,
def train_model(x, y, params):
model = ModelWrapper(params.filepath)
model.fit(x, y, epochs=params.epochs)
def predict_model(x, params):
model = ModelWrapper(params.filepath)
y_pred = model.predict(x)
print(y_pred.shape)
You can use it like,
import multiprocessing
for i in range(8):
print(f"Training Model {i} from {params.filepath}")
process_train = multiprocessing.Process(train_model, args=(x_train, y_train, params))
process_train.start()
process_train.join()
print("Predicting")
process_predict = multiprocessing.Process(predict_model, args=(x_train, params))
process_predict.start()
process_predict.join()
This way python fires a new process for your tasks, which can run with their own memory.
Bonus Tip: You can also choose to run them in parallel if you have many CPUs and GPUs available: you just need to call process_train.join() after the loop in that case. If you had eight GPUs, you can use this parent script to serve parameters, while each of the individual processes shall run on a different GPU.
# I tried a variety of things, separately and together, before I started using processes,
tf.reset_default_graph()
K.clear_session()
cuda.select_device(0); cuda.close()
model = get_new_model() # overwrite
model = None
del model
gc.collect()
+ I also considered using threads, subprocess.Popen, but I was satisfied with multiprocessing since it offered full decoupling that made it a lot easier to manage and allocate resources.

KeyError: 'data' in Caffe

While I was following the deepdream iPython notebook which is here: https://github.com/google/deepdream/blob/master/dream.ipynb, I successfully ran the code and initialized the network until i get this error:
I0218 20:53:01.108750 12174 net.cpp:283] Network initialization done.
I0218 20:53:06.017426 12174 net.cpp:816] Ignoring source layer data
I0218 20:53:06.139768 12174 net.cpp:816] Ignoring source layer loss
Traceback (most recent call last):
File "/home/andrew/PycharmProjects/deepmeme/deepmeme.py", line 122, in <module>
<IPython.core.display.Image object>
frame = deepdream(net, frame)
File "/home/andrew/PycharmProjects/deepmeme/deepmeme.py", line 78, in deepdream
octaves = [preprocess(net, base_img)]
File "/home/andrew/PycharmProjects/deepmeme/deepmeme.py", line 43, in preprocess
return np.float32(np.rollaxis(img, 2)[::-1]) - net.transformer.mean['data']
KeyError: 'data'
This is my code for the python file:
import sys
sys.path.append("/home/andrew/caffe/python")
from cStringIO import StringIO
import numpy as np
import scipy.ndimage as nd
import PIL.Image
from IPython.display import clear_output, Image, display
from google.protobuf import text_format
import caffe
# If your GPU supports CUDA and Caffe was built with CUDA support,
# uncomment the following to run Caffe operations on the GPU.
# caffe.set_mode_gpu()
# caffe.set_device(0) # select GPU device if multiple devices exist
def showarray(a, fmt='jpeg'):
a = np.uint8(np.clip(a, 0, 255))
f = StringIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
model_path = '/home/andrew/caffe/models/bvlc_reference_caffenet/' # substitute your path here
net_fn = model_path + 'deploy.prototxt'
param_fn = model_path + 'caffe_train_iter_500.caffemodel'
# Patching model to be able to compute gradients.
# Note that you can also manually add "force_backward: true" line to "deploy.prototxt".
model = caffe.io.caffe_pb2.NetParameter()
text_format.Merge(open(net_fn).read(), model)
model.force_backward = True
open('deploy.prototxt', 'w').write(str(model))
net = caffe.Classifier('/home/andrew/caffe/models/bvlc_reference_caffenet/deploy.prototxt', '/home/andrew/caffe/models/bvlc_reference_caffenet/caffenet_train_iter_500.caffemodel', caffe.TEST)
# a couple of utility functions for converting to and from Caffe's input image layout
def preprocess(net, img):
return np.float32(np.rollaxis(img, 2)[::-1]) - net.transformer.mean['data']
def deprocess(net, img):
return np.dstack((img + net.transformer.mean['data'])[::-1])
def objective_L2(dst):
dst.diff[:] = dst.data
def make_step(net, step_size=1.5, end='inception_4c/output',
jitter=32, clip=True, objective=objective_L2):
'''Basic gradient ascent step.'''
src = net.blobs['data'] # input image is stored in Net's 'data' blob
dst = net.blobs[end]
ox, oy = np.random.randint(-jitter, jitter+1, 2)
src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2) # apply jitter shift
net.forward(end=end)
objective(dst) # specify the optimization objective
net.backward(start=end)
g = src.diff[0]
# apply normalized ascent step to the input image
src.data[:] += step_size/np.abs(g).mean() * g
src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2) # unshift image
if clip:
bias = net.transformer.mean['data']
src.data[:] = np.clip(src.data, -bias, 255-bias)
def deepdream(net, base_img, iter_n=10, octave_n=4, octave_scale=1.4,
end='inception_4c/output', clip=True, **step_params):
# prepare base images for all octaves
octaves = [preprocess(net, base_img)]
for i in xrange(octave_n-1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
src = net.blobs['data']
detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
src.reshape(1,3,h,w) # resize the network's input image size
src.data[0] = octave_base+detail
for i in xrange(iter_n):
make_step(net, end=end, clip=clip, **step_params)
# visualization
vis = deprocess(net, src.data[0])
if not clip: # adjust image contrast if clipping is disabled
vis = vis*(255.0/np.percentile(vis, 99.98))
showarray(vis)
print octave, i, end, vis.shape
clear_output(wait=True)
# extract details produced on the current octave
detail = src.data[0]-octave_base
# returning the resulting image
return deprocess(net, src.data[0])
img = np.float32(PIL.Image.open('/home/andrew/caffe/examples/images/cat.jpg'))
showarray(img)
net.blobs.keys()
frame = img
frame_i = 0
h, w = frame.shape[:2]
s = 0.05 # scale coefficient
for i in xrange(100):
frame = deepdream(net, frame)
PIL.Image.fromarray(np.uint8(frame)).save("frames/%04d.jpg"%frame_i)
frame = nd.affine_transform(frame, [1-s,1-s,1], [h*s/2,w*s/2,0], order=1)
frame_i += 1
Image(filename='frames/0029.jpg')
Does anybody know what's happening? I am using my own data that I successfully trained a model with.
From the deepdream iPython notebook:
net = caffe.Classifier('tmp.prototxt', param_fn,
mean = np.float32([104.0, 116.0, 122.0]), # ImageNet mean, training set dependent
channel_swap = (2,1,0)) # the reference model has channels in BGR order instead of RGB
vs your:
net = caffe.Classifier('/home/andrew/caffe/models/bvlc_reference_caffenet/deploy.prototxt', '/home/andrew/caffe/models/bvlc_reference_caffenet/caffenet_train_iter_500.caffemodel', caffe.TEST)
You do not seem to include a mean when you create a caffe.Classifier.
See the definition of caffe.Classifier.
If you don't have a mean, you could probably just remove the mention of mean from preprocess/deprocess:
def preprocess(net, img):
return np.float32(np.rollaxis(img, 2)[::-1])
def deprocess(net, img):
return np.dstack((img)[::-1])

Categories

Resources