Tensorflow image_dataset_from_directory function label shape - python

I am a tad new to Tensorflow and I am having trouble running this simple CNN.
I have my images separated into separate directories for each class, which I load into train_dataset using image_dataset_from_directory.
from the documentation, this should yield a tuple (images, labels), where images has shape (batch_size, image_size[0], image_size[1], num_channels), and labels are a float32 tensor of shape (batch_size, num_classes). num_channels is 3 as the images are rgb
However when I try to fit using my model, I get an error saying that the predictions are [32,5] and labels shape [160]. It seems to me the batches in the labels have 'collapsed'.
Here's some snippets:
BATCH_SIZE = 32
EPOCHS = 1
IMG_SIZE=(300, 300)
SEED = 1
train_dataset = tf.keras.preprocessing.image_dataset_from_directory(
directory='train/train_images/', label_mode='categorical', class_names=class_names, color_mode='rgb', batch_size=BATCH_SIZE, image_size=IMG_SIZE)
IMG_SHAPE = IMG_SIZE + (3,)
n_classes = len(train_dataset.class_names)
def build_model():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(input_shape=IMG_SHAPE, kernel_size=(5, 5), filters=32, activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=(3, 3)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Dense(units=n_classes, activation='softmax')
])
return model
model = build_model()
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=['accuracy'])
model.fit(train_dataset, epochs = EPOCHS, batch_size = BATCH_SIZE)
Error Message:
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-19-86d96e744ef0> in <module>
----> 1 model.fit(train_dataset, epochs = EPOCHS, batch_size = BATCH_SIZE)
/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in _method_wrapper(self, *args, **kwargs)
106 def _method_wrapper(self, *args, **kwargs):
107 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
--> 108 return method(self, *args, **kwargs)
109
110 # Running inside `run_distribute_coordinator` already.
/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1096 batch_size=batch_size):
1097 callbacks.on_train_batch_begin(step)
-> 1098 tmp_logs = train_function(iterator)
1099 if data_handler.should_sync:
1100 context.async_wait()
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
805 # In this case we have created variables on the first call, so we run the
806 # defunned version which is guaranteed to never create variables.
--> 807 return self._stateless_fn(*args, **kwds) # pylint: disable=not-callable
808 elif self._stateful_fn is not None:
809 # Release the lock early so that multiple threads can perform the call
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py in __call__(self, *args, **kwargs)
2827 with self._lock:
2828 graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
-> 2829 return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
2830
2831 #property
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _filtered_call(self, args, kwargs, cancellation_manager)
1846 resource_variable_ops.BaseResourceVariable))],
1847 captured_inputs=self.captured_inputs,
-> 1848 cancellation_manager=cancellation_manager)
1849
1850 def _call_flat(self, args, captured_inputs, cancellation_manager=None):
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _call_flat(self, args, captured_inputs, cancellation_manager)
1922 # No tape is watching; skip to running the function.
1923 return self._build_call_outputs(self._inference_function.call(
-> 1924 ctx, args, cancellation_manager=cancellation_manager))
1925 forward_backward = self._select_forward_and_backward_functions(
1926 args,
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/function.py in call(self, ctx, args, cancellation_manager)
548 inputs=args,
549 attrs=attrs,
--> 550 ctx=ctx)
551 else:
552 outputs = execute.execute_with_cancellation(
/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
58 ctx.ensure_initialized()
59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:
62 if name is not None:
InvalidArgumentError: logits and labels must have the same first dimension, got logits shape [32,5] and labels shape [160]
[[node sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits (defined at <ipython-input-18-1904262c6a7b>:1) ]] [Op:__inference_train_function_928]
Function call stack:
train_function

I think you should explicitly compile your model before executing. For that, you may omit 'input_shape' parameter in first layer. See model.compile in keras documentation. make sure that you keep loss as "categorical_crossentropy" or tf.keras.losses.CategoricalCrossentroy() . Then try again. I hope this helps.
Also it would help if you share your file structure.

Related

Python - Code Error direct from textbook - Neural Networks

The below code is giving an error, this came directly from the textbook "Machine Learning with Python Cookbook". Can anyone see the issue?
Code:
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense,Dropout,Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
K.set_image_data_format("channels_first")
np.random.seed(0)
channels=1
height=28
width=28
(data_train,target_train),(data_test,target_test)=mnist.load_data()
data_train=data_train.reshape(data_train.shape[0],channels,height,width)
data_test=data_test.reshape(data_test.shape[0],channels,height,width)
features_train=data_train/255
features_test=data_test/255
target_train=np_utils.to_categorical(target_train)
target_test=np_utils.to_categorical(target_test)
number_of_classes=target_test.shape[1]
net=Sequential()
net.add(Conv2D(filters=64, kernel_size=(5,5), input_shape=(channels,width,height), activation="relu" ))
net.add(MaxPooling2D(pool_size=(2,2)))
net.add(Dropout(0.5))
net.add(Flatten())
net.add(Dense(128,activation="relu"))
net.add(Dropout(0.5))
net.add(Dense(number_of_classes,activation="softmax"))
net.compile(loss="categorical_crossentropy", optimizer="rmsprop",metrics=["accuracy"])
net.fit(features_train,target_train,epochs=2, verbose=0, batch_size=1000,validation_data=(features_test,target_test))
This is the Log. The bottom of the error looks like it relates to the MaxPooling2D Layer but the error doesn't make much sense to me. "Default MaxPoolingOp only supports NHWC on device type CPU"
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-33-7b4b0fc491f3> in <module>
36 net.add(Dense(number_of_classes,activation="softmax"))
37 net.compile(loss="categorical_crossentropy", optimizer="rmsprop",metrics=["accuracy"])
---> 38 net.fit(features_train,target_train,epochs=2, verbose=0, batch_size=1000,validation_data=(features_test,target_test))
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in _method_wrapper(self, *args, **kwargs)
106 def _method_wrapper(self, *args, **kwargs):
107 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
--> 108 return method(self, *args, **kwargs)
109
110 # Running inside `run_distribute_coordinator` already.
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1096 batch_size=batch_size):
1097 callbacks.on_train_batch_begin(step)
-> 1098 tmp_logs = train_function(iterator)
1099 if data_handler.should_sync:
1100 context.async_wait()
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds)
838 # Lifting succeeded, so variables are initialized and we can run the
839 # stateless function.
--> 840 return self._stateless_fn(*args, **kwds)
841 else:
842 canon_args, canon_kwds = \
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\eager\function.py in __call__(self, *args, **kwargs)
2827 with self._lock:
2828 graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
-> 2829 return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
2830
2831 #property
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _filtered_call(self, args, kwargs, cancellation_manager)
1841 `args` and `kwargs`.
1842 """
-> 1843 return self._call_flat(
1844 [t for t in nest.flatten((args, kwargs), expand_composites=True)
1845 if isinstance(t, (ops.Tensor,
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _call_flat(self, args, captured_inputs, cancellation_manager)
1921 and executing_eagerly):
1922 # No tape is watching; skip to running the function.
-> 1923 return self._build_call_outputs(self._inference_function.call(
1924 ctx, args, cancellation_manager=cancellation_manager))
1925 forward_backward = self._select_forward_and_backward_functions(
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\eager\function.py in call(self, ctx, args, cancellation_manager)
543 with _InterpolateFunctionError(self):
544 if cancellation_manager is None:
--> 545 outputs = execute.execute(
546 str(self.signature.name),
547 num_outputs=self._num_outputs,
C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\eager\execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
57 try:
58 ctx.ensure_initialized()
---> 59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:
InvalidArgumentError: Default MaxPoolingOp only supports NHWC on device type CPU
[[node sequential_78/max_pooling2d_3/MaxPool (defined at <ipython-input-33-7b4b0fc491f3>:38) ]] [Op:__inference_train_function_1036301]
Function call stack:
train_function
Thanks
Many thanks, this seems to have resolved the initial issue. However I now seem to get a different error
ValueError: Negative dimension size caused by subtracting 5 from 1 for '{{node conv2d/Conv2D}} = Conv2D[T=DT_FLOAT, data_format="NHWC", dilations=[1, 1, 1, 1], explicit_paddings=[], padding="VALID", strides=[1, 1, 1, 1], use_cudnn_on_gpu=true](conv2d_input, conv2d/Conv2D/ReadVariableOp)' with input shapes: [?,1,28,28], [5,5,28,64].
All I changed was as suggested.

CSV MNIST data set: ValueError: Shapes (None, 10) and (None, 28, 10) are incompatible

I want to classify the MINST data set (csv) with keras. This is my code but after running it I got this error. Do you know how can I solve it ValueError: Shapes (None, 10) and (None, 28, 10) are incompatible
from keras import models
import numpy as np
from keras import layers
import tensorflow as tf
from tensorflow.keras.models import Sequential
from keras.utils import np_utils
from tensorflow.keras.layers import Dense, Dropout, LSTM, BatchNormalization
from keras.utils import to_categorical, plot_model
mnist = tf.keras.datasets.mnist
#Load dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential()
model.add(Dense(units=32, activation='sigmoid',input_shape=(x_train.shape[1:])))
model.add(Dense(units=64, activation='sigmoid'))
model.add(Dense(units=10,activation='softmax'))
model.compile(optimizer="sgd", loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(x_train, y_train, batch_size=32, epochs=100, validation_split=.3)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['training', 'validation'], loc='best')
plt.show()
Here I got error from the code. I know it is cause because of input shape but I do not know how should define that. x_train.shape is (60000, 28, 28) and y_train.shape is (60000, 10)
ValueError Traceback (most recent call last)
<ipython-input-112-7c9220a71c0e> in <module>
1 model.compile(optimizer="sgd", loss='categorical_crossentropy', metrics=['accuracy'])
----> 2 history = model.fit(x_train, y_train, batch_size=32, epochs=100, validation_split=.3)
3
4 plt.plot(history.history['accuracy'])
5 plt.plot(history.history['val_accuracy'])
~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in _method_wrapper(self, *args, **kwargs)
64 def _method_wrapper(self, *args, **kwargs):
65 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
---> 66 return method(self, *args, **kwargs)
67
68 # Running inside `run_distribute_coordinator` already.
~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
846 batch_size=batch_size):
847 callbacks.on_train_batch_begin(step)
--> 848 tmp_logs = train_function(iterator)
849 # Catch OutOfRangeError for Datasets of unknown size.
850 # This blocks until the batch has finished executing.
~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds)
578 xla_context.Exit()
579 else:
--> 580 result = self._call(*args, **kwds)
581
582 if tracing_count == self._get_tracing_count():
~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds)
625 # This is the first call of __call__, so we have to initialize.
626 initializers = []
--> 627 self._initialize(args, kwds, add_initializers_to=initializers)
628 finally:
629 # At this point we know that the initialization is complete (or less
~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in _initialize(self, args, kwds, add_initializers_to)
503 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
504 self._concrete_stateful_fn = (
--> 505 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
506 *args, **kwds))
507
~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2444 args, kwargs = None, None
2445 with self._lock:
-> 2446 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2447 return graph_function
2448
~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _maybe_define_function(self, args, kwargs)
2775
2776 self._function_cache.missed.add(call_context_key)
-> 2777 graph_function = self._create_graph_function(args, kwargs)
2778 self._function_cache.primary[cache_key] = graph_function
2779 return graph_function, args, kwargs
~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2655 arg_names = base_arg_names + missing_arg_names
2656 graph_function = ConcreteFunction(
-> 2657 func_graph_module.func_graph_from_py_func(
2658 self._name,
2659 self._python_function,
~\anaconda3\lib\site-packages\tensorflow\python\framework\func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
979 _, original_func = tf_decorator.unwrap(python_func)
980
--> 981 func_outputs = python_func(*func_args, **func_kwargs)
982
983 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in wrapped_fn(*args, **kwds)
439 # __wrapped__ allows AutoGraph to swap in a converted function. We give
440 # the function a weak reference to itself to avoid a reference cycle.
--> 441 return weak_wrapped_fn().__wrapped__(*args, **kwds)
442 weak_wrapped_fn = weakref.ref(wrapped_fn)
443
~\anaconda3\lib\site-packages\tensorflow\python\framework\func_graph.py in wrapper(*args, **kwargs)
966 except Exception as e: # pylint:disable=broad-except
967 if hasattr(e, "ag_error_metadata"):
--> 968 raise e.ag_error_metadata.to_exception(e)
969 else:
970 raise
th
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
ValueError: Shapes (None, 10) and (None, 28, 10) are incompatible
Since dense layers, are not able to handle 2D data like images, you should first flatten input to a vector, then pass it to your model, otherwise, you will get the other dimensions in the output, and then your labels and logits (model output) are not compatible and you will get error.
Add a flatten layer to your model like this:
model.add(Flatten(input_shape=(x_train.shape[1:]))) #add this
model.add(Dense(units=32, activation='sigmoid'))
model.add(Dense(units=64, activation='sigmoid'))
model.add(Dense(units=10,activation='softmax'))

InvalidArgumentError: Conv2DCustomBackpropFilterOp only supports NHWC - pruning neural network

Im having problems when I use the library tensorflow_model_optimization.
I am developing a code to prune an already trained neural network.
I imported the weights from an h5 file and so I use tensorflor_model_optimization to prune my neural network.
I have this error when I call the fit method:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-26-ce9759e4dd53> in <module>
----> 1 model_for_pruning.fit_generator(base_treinamento, steps_per_epoch = 6000 /64, epochs = 5, validation_data = base_teste, validation_steps = 30, callbacks=callbacks)
~\anaconda3\envs\supernova\lib\site-packages\tensorflow\python\keras\engine\training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
1859 use_multiprocessing=use_multiprocessing,
1860 shuffle=shuffle,
-> 1861 initial_epoch=initial_epoch)
1862
1863 def evaluate_generator(self,
~\anaconda3\envs\supernova\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1098 _r=1):
1099 callbacks.on_train_batch_begin(step)
-> 1100 tmp_logs = self.train_function(iterator)
1101 if data_handler.should_sync:
1102 context.async_wait()
~\anaconda3\envs\supernova\lib\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds)
826 tracing_count = self.experimental_get_tracing_count()
827 with trace.Trace(self._name) as tm:
--> 828 result = self._call(*args, **kwds)
829 compiler = "xla" if self._experimental_compile else "nonXla"
830 new_tracing_count = self.experimental_get_tracing_count()
~\anaconda3\envs\supernova\lib\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds)
853 # In this case we have created variables on the first call, so we run the
854 # defunned version which is guaranteed to never create variables.
--> 855 return self._stateless_fn(*args, **kwds) # pylint: disable=not-callable
856 elif self._stateful_fn is not None:
857 # Release the lock early so that multiple threads can perform the call
~\anaconda3\envs\supernova\lib\site-packages\tensorflow\python\eager\function.py in __call__(self, *args, **kwargs)
2941 filtered_flat_args) = self._maybe_define_function(args, kwargs)
2942 return graph_function._call_flat(
-> 2943 filtered_flat_args, captured_inputs=graph_function.captured_inputs) # pylint: disable=protected-access
2944
2945 #property
~\anaconda3\envs\supernova\lib\site-packages\tensorflow\python\eager\function.py in _call_flat(self, args, captured_inputs, cancellation_manager)
1917 # No tape is watching; skip to running the function.
1918 return self._build_call_outputs(self._inference_function.call(
-> 1919 ctx, args, cancellation_manager=cancellation_manager))
1920 forward_backward = self._select_forward_and_backward_functions(
1921 args,
~\anaconda3\envs\supernova\lib\site-packages\tensorflow\python\eager\function.py in call(self, ctx, args, cancellation_manager)
558 inputs=args,
559 attrs=attrs,
--> 560 ctx=ctx)
561 else:
562 outputs = execute.execute_with_cancellation(
~\anaconda3\envs\supernova\lib\site-packages\tensorflow\python\eager\execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
58 ctx.ensure_initialized()
59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:
62 if name is not None:
InvalidArgumentError: Conv2DCustomBackpropFilterOp only supports NHWC.
[[node gradient_tape/sequential_3/prune_low_magnitude_conv2d_18/Conv2D/Conv2DBackpropFilter (defined at <ipython-input-24-fc85f8818d30>:1) ]] [Op:__inference_train_function_10084]
Errors may have originated from an input operation.
Input Source operations connected to node gradient_tape/sequential_3/prune_low_magnitude_conv2d_18/Conv2D/Conv2DBackpropFilter:
sequential_3/prune_low_magnitude_activation_18/Relu (defined at C:\Users\Pichau\anaconda3\envs\supernova\lib\site-packages\tensorflow_model_optimization\python\core\sparsity\keras\pruning_wrapper.py:270)
Function call stack:
train_function
enter code here
My code:
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
arquivo = open('model.json', 'r')
estrutura_rede = arquivo.read()
arquivo.close()
model = model_from_json(estrutura_rede)
model.load_weights('model.h5')
gerador_treinamento = ImageDataGenerator(rescale=None)
base_treinamento = gerador_treinamento.flow_from_directory('data/train', target_size = (51,51), batch_size = 64, class_mode = 'binary')
gerador_teste = ImageDataGenerator(rescale=None)
base_teste = gerador_teste.flow_from_directory('data/test', target_size = (51,51), batch_size = 64, class_mode = 'binary')
import tempfile
import tensorflow as tf
import tensorflow_model_optimization as tfmot
prune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude
batch_size = 128
epochs = 2
validation_split = 0.1
num_images = int(len(base_treinamento) * (1 - validation_split))
end_step = np.ceil(num_images / batch_size).astype(np.int32) * epochs
model_for_pruning.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
model_for_pruning.summary()
logdir = tempfile.mkdtemp()
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep(),
tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),
]
model_for_pruning.fit_generator(base_treinamento, steps_per_epoch = 6000 /64, epochs = 5, validation_data = base_teste, validation_steps = 30, callbacks=callbacks)
python: 3.6.12
tensorflow: 2.2.0
tensorflow-model-optimization: 0.5.0
Can someone help me?
Changing the runtime from CPU to GPU worked for me.
If you are running it on CPU consider changing it to GPU.
This does not seem like a pruning issue, but instead a problem with the data format of the model. It looks like one Conv2D layer should be using NHWC format (data_format="channels_last").
Could you share the code for the model?

UnimplementedError with Neural Network Using Linear Regression and Tensorflow2

I'm just working through my own sandbox project wanting to try and implement NLP but with a linear regression as an outcome. As reference, the dataset I am working with comes Kaggle wine-reviews which has the wine reviews and a corresponding score of 1 through 100 hence why I'm using linear regression instead of classification.
But I am getting an error message and I'm not sure if it's a result of a data type or dimensionality problem, and I'm not sue why or how to resolve it.
I'll provide the code bellow, and some intermediate outcomes displaying the dimensions of some of the objects as I'm assuming that that might be useful in solving this.
df = pd.read_csv('winemag-data_first150k.csv', encoding='ISO-8859-1')
y = df['points'].astype(int)
X = df['description'].astype(str)
# split up the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
MAX_VOCAB_SIZE = 35000
tokenizer = Tokenizer(num_words=MAX_VOCAB_SIZE)
tokenizer.fit_on_texts(X_train)
sequences_train = tokenizer.texts_to_sequences(X_train)
sequences_test = tokenizer.texts_to_sequences(X_test)
word2idx = tokenizer.word_index
V = len(word2idx)
print('Found %s unique tokens.' % V)
Found 33012 unique tokens.
data_train = pad_sequences(sequences_train)
print('Shape of data train tensor:', data_train.shape)
# get sequence length
T = data_train.shape[1]
Shape of data train tensor: (101123, 136)
data_test = pad_sequences(sequences_test, maxlen=T)
print('Shape of data test tensor:', data_test.shape)
Shape of data test tensor: (49807, 136)
# Create the model
# We get to choose embedding dimensionality
D = 20
# Hidden state dimensionality
M = 15
i = Input(shape=(T,))
x = Embedding(V + 1, D)(i)
x = LSTM(M, return_sequences=True)(x)
x = GlobalMaxPooling1D()(x)
x = Dense(1)(x)
model = Model(i, x)
model.compile(optimizer='adam', loss='mse')
# learning rate scheduler
def schedule(epoch, lr):
if epoch >= 50:
return 0.0001
return 0.001
scheduler = tf.keras.callbacks.LearningRateScheduler(schedule)
# Train the model
r = model.fit(X, y, epochs=200, callbacks=[scheduler])
And then I get the error message along with a warning about dimensionality:
Epoch 1/200
WARNING:tensorflow:Model was constructed with shape (None, 136) for input Tensor("input_10:0", shape=(None, 136), dtype=float32), but it was called on an input with incompatible shape (None, 1).
WARNING:tensorflow:Model was constructed with shape (None, 136) for input Tensor("input_10:0", shape=(None, 136), dtype=float32), but it was called on an input with incompatible shape (None, 1).
---------------------------------------------------------------------------
UnimplementedError Traceback (most recent call last)
<ipython-input-121-0f68916ec23b> in <module>
13
14 # Train the model
---> 15 r = model.fit(X, y, epochs=200, callbacks=[scheduler])
~\anaconda3\envs\newenvt\lib\site-packages\tensorflow\python\keras\engine\training.py in _method_wrapper(self, *args, **kwargs)
106 def _method_wrapper(self, *args, **kwargs):
107 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
--> 108 return method(self, *args, **kwargs)
109
110 # Running inside `run_distribute_coordinator` already.
~\anaconda3\envs\newenvt\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1096 batch_size=batch_size):
1097 callbacks.on_train_batch_begin(step)
-> 1098 tmp_logs = train_function(iterator)
1099 if data_handler.should_sync:
1100 context.async_wait()
~\anaconda3\envs\newenvt\lib\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
~\anaconda3\envs\newenvt\lib\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds)
838 # Lifting succeeded, so variables are initialized and we can run the
839 # stateless function.
--> 840 return self._stateless_fn(*args, **kwds)
841 else:
842 canon_args, canon_kwds = \
~\anaconda3\envs\newenvt\lib\site-packages\tensorflow\python\eager\function.py in __call__(self, *args, **kwargs)
2827 with self._lock:
2828 graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
-> 2829 return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
2830
2831 #property
~\anaconda3\envs\newenvt\lib\site-packages\tensorflow\python\eager\function.py in _filtered_call(self, args, kwargs, cancellation_manager)
1841 `args` and `kwargs`.
1842 """
-> 1843 return self._call_flat(
1844 [t for t in nest.flatten((args, kwargs), expand_composites=True)
1845 if isinstance(t, (ops.Tensor,
~\anaconda3\envs\newenvt\lib\site-packages\tensorflow\python\eager\function.py in _call_flat(self, args, captured_inputs, cancellation_manager)
1921 and executing_eagerly):
1922 # No tape is watching; skip to running the function.
-> 1923 return self._build_call_outputs(self._inference_function.call(
1924 ctx, args, cancellation_manager=cancellation_manager))
1925 forward_backward = self._select_forward_and_backward_functions(
~\anaconda3\envs\newenvt\lib\site-packages\tensorflow\python\eager\function.py in call(self, ctx, args, cancellation_manager)
543 with _InterpolateFunctionError(self):
544 if cancellation_manager is None:
--> 545 outputs = execute.execute(
546 str(self.signature.name),
547 num_outputs=self._num_outputs,
~\anaconda3\envs\newenvt\lib\site-packages\tensorflow\python\eager\execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
57 try:
58 ctx.ensure_initialized()
---> 59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:
UnimplementedError: Cast string to float is not supported
[[node functional_11/Cast (defined at <ipython-input-121-0f68916ec23b>:15) ]] [Op:__inference_train_function_17206]
Function call stack:
train_function
I'm not entirely sure what needs to be changed, but any suggestions would be appreciated.
From comments
Passing X to model.fit, which literally has string values, a neural
network cannot be input string values (paraphrased from Dr. Snoopy)

CNN Model Training - Resource Exhaustion (Python & Tensorflow)

I am using Microsoft Azure to train a CNN (Convolutional Neural Network) to recognize 11 classes of food using 16k images. The Virtual Machine I'm using is a "STANDARD_NC24_PROMO" with the following specs:
24 vCPUs, 4 GPUs, 224 GB memory, 1440 GB storage.
The problem is that at a simple run of the program I get the following error about Resource Exhaustion:
2-conv-256-nodes-0-dense-1576530179
Train on 10636 samples, validate on 2660 samples
Epoch 1/10
32/10636 [..............................] - ETA: 57:51
---------------------------------------------------------------------------
ResourceExhaustedError Traceback (most recent call last)
<ipython-input-10-ee913a07a18b> in <module>
86 model.compile(loss="sparse_categorical_crossentropy",optimizer="adam",metrics=["accuracy"])
87 ### TRAIN
---> 88 model.fit(train_images, train_labels,validation_split=0.20, epochs=10,use_multiprocessing=True)
89
90 loss, acc = model.evaluate(test_images, test_labels, verbose = 0)
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
726 max_queue_size=max_queue_size,
727 workers=workers,
--> 728 use_multiprocessing=use_multiprocessing)
729
730 def evaluate(self,
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py in fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, **kwargs)
322 mode=ModeKeys.TRAIN,
323 training_context=training_context,
--> 324 total_epochs=epochs)
325 cbks.make_logs(model, epoch_logs, training_result, ModeKeys.TRAIN)
326
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py in run_one_epoch(model, iterator, execution_function, dataset_size, batch_size, strategy, steps_per_epoch, num_samples, mode, training_context, total_epochs)
121 step=step, mode=mode, size=current_batch_size) as batch_logs:
122 try:
--> 123 batch_outs = execution_function(iterator)
124 except (StopIteration, errors.OutOfRangeError):
125 # TODO(kaftan): File bug about tf function and errors.OutOfRangeError?
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py in execution_function(input_fn)
84 # `numpy` translates Tensors to values in Eager mode.
85 return nest.map_structure(_non_none_constant_value,
---> 86 distributed_function(input_fn))
87
88 return execution_function
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py in __call__(self, *args, **kwds)
455
456 tracing_count = self._get_tracing_count()
--> 457 result = self._call(*args, **kwds)
458 if tracing_count == self._get_tracing_count():
459 self._call_counter.called_without_tracing()
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/tensorflow_core/python/eager/def_function.py in _call(self, *args, **kwds)
518 # Lifting succeeded, so variables are initialized and we can run the
519 # stateless function.
--> 520 return self._stateless_fn(*args, **kwds)
521 else:
522 canon_args, canon_kwds = \
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py in __call__(self, *args, **kwargs)
1821 """Calls a graph function specialized to the inputs."""
1822 graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
-> 1823 return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
1824
1825 #property
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py in _filtered_call(self, args, kwargs)
1139 if isinstance(t, (ops.Tensor,
1140 resource_variable_ops.BaseResourceVariable))),
-> 1141 self.captured_inputs)
1142
1143 def _call_flat(self, args, captured_inputs, cancellation_manager=None):
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py in _call_flat(self, args, captured_inputs, cancellation_manager)
1222 if executing_eagerly:
1223 flat_outputs = forward_function.call(
-> 1224 ctx, args, cancellation_manager=cancellation_manager)
1225 else:
1226 gradient_name = self._delayed_rewrite_functions.register()
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py in call(self, ctx, args, cancellation_manager)
509 inputs=args,
510 attrs=("executor_type", executor_type, "config_proto", config),
--> 511 ctx=ctx)
512 else:
513 outputs = execute.execute_with_cancellation(
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/tensorflow_core/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
65 else:
66 message = e.message
---> 67 six.raise_from(core._status_to_exception(e.code, message), None)
68 except TypeError as e:
69 keras_symbolic_tensors = [
/anaconda/envs/azureml_py36/lib/python3.6/site-packages/six.py in raise_from(value, from_value)
ResourceExhaustedError: OOM when allocating tensor with shape[32,256,98,98] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc
[[node sequential_7/conv2d_14/Conv2D (defined at /anaconda/envs/azureml_py36/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py:1751) ]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.
[Op:__inference_distributed_function_7727]
Function call stack:
distributed_function
I will attach below the bit of code that does the training:
for dense_layer in dense_layers:
for layer_size in layer_sizes:
for conv_layer in conv_layers:
NAME="{}-conv-{}-nodes-{}-dense-{}".format(conv_layer,
layer_size, dense_layer, int(time.time()))
print(NAME)
model = Sequential()
model.add(Conv2D(layer_size,(3,3),input_shape=(IMG_SIZE, IMG_SIZE, 1)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.5))
for l in range(conv_layer-1):
model.add(Conv2D(layer_size,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.5))
model.add(Flatten())
for l in range(dense_layer):
model.add(Dense(layer_size))
model.add(Activation("relu"))
#The output layer with 11 neurons
model.add(Dense(11))
model.add(Activation("softmax"))
### COMPILE MODEL
model.compile(loss="sparse_categorical_crossentropy",
optimizer="adam",
metrics=["accuracy"])
### TRAIN
model.fit(train_images, train_labels,validation_split=0.20, epochs=10)
loss, acc = model.evaluate(test_images, test_labels, verbose = 0)
print(acc * 100)
if maxacc<acc*100:
maxacc=acc*100
maxname=NAME
maxdict[maxacc]=maxname
print("\n\n",maxacc," ",maxname)
My laptop which is nowhere near as good has no problem executing this, yet running it on azure gives me that error. The iteration variables don't matter as I still get the error no matter what their values are.
Any help would be greatly appreciated, thank you for your time!
I would like to add that the program is not even working with this small amount of layers:
dense_layers = [0]
layer_sizes = [32]
conv_layers = [1]
Unfortunately, I never used azure for training some kind of networks. But I would try:
simplify your network and setup, maybe use a powerful single gpu first. Also, figure out what hyperparameter has to change to make it fail after you got it to work with a simpler approach
reduce the batch size. Most of gpu OOM exceptions are due to too many data that is processed at once.
There is a lot of optimization happening that might cause it to work locally but that works slightly different for multi gpu machines.

Categories

Resources