I am doing a personal project where I use Computer Vision and a Backtracking algorithm to solve sudoku puzzles. This error is suddenly popping up when I tried to setup the project on a new computer. This is the file where I train my model for the CV part.
from tabnanny import verbose
from turtle import pu
import numpy
import cv2
import matplotlib.pyplot as plot
from keras.models import model_from_json
json_file =open('model/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loadedModel = model_from_json(loaded_model_json)
loadedModel.load_weights('model/model.h5')
print("Loaded saved model from disk.")
def predictNumber(image):
imageResize = cv2.resize(image,(28,28))
imageResizeCopy = imageResize.reshape(1, 1, 28, 28)
#loadedModelPred = loadedModel.predict_classes(imageResizeCopy, verbose=0)
loadedModelPred = numpy.argmax(loadedModel.predict(imageResizeCopy), axis=1)
return loadedModelPred[0]
def extract(puzzle):
puzzle = cv2.resize(puzzle, (450,450))
grid = numpy.zeros([9,9])
for i in range(9):
for j in range(9):
image = puzzle[i*50:(i+1)*50,j*50:(j+1)*50]
if image.sum()>25000:
grid[i][j] = predictNumber(image)
else:
grid[i][j] =0;
return grid.astype(int)
This above code block is part of the code that apparently brings up the following error.
2022-09-17 21:29:46.532 Uncaught app exception
Traceback (most recent call last):
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\streamlit\runtime\scriptrunner\script_runner.py", line 556, in _run_script
exec(code, module.__dict__)
File "C:\Users\kvnka\OneDrive - Trinity College Dublin\GitHub\sudoku-solver\app.py", line 27, in <module>
grid = numberExtract.extract(image)
File "C:\Users\kvnka\OneDrive - Trinity College Dublin\GitHub\sudoku-solver\cv\numberExtract.py", line 31, in extract
grid[i][j] = predictNumber(image)
File "C:\Users\kvnka\OneDrive - Trinity College Dublin\GitHub\sudoku-solver\cv\numberExtract.py", line 20, in predictNumber
loadedModelPred = numpy.argmax(loadedModel.predict(imageResizeCopy), axis=1)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\utils\traceback_utils.py", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\tensorflow\python\eager\execute.py", line 54, in quick_execute
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Graph execution error:
Detected at node 'sequential_1/max_pooling2d_1/MaxPool' defined at (most recent call last):
File "C:\Program Files\Python310\lib\threading.py", line 973, in _bootstrap
self._bootstrap_inner()
File "C:\Program Files\Python310\lib\threading.py", line 1016, in _bootstrap_inner
self.run()
File "C:\Program Files\Python310\lib\threading.py", line 953, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\streamlit\runtime\scriptrunner\script_runner.py", line 295, in _run_script_thread
self._run_script(request.rerun_data)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\streamlit\runtime\scriptrunner\script_runner.py", line 556, in _run_script
exec(code, module.__dict__)
File "C:\Users\kvnka\OneDrive - Trinity College Dublin\GitHub\sudoku-solver\app.py", line 27, in <module>
grid = numberExtract.extract(image)
File "C:\Users\kvnka\OneDrive - Trinity College Dublin\GitHub\sudoku-solver\cv\numberExtract.py", line 31, in extract
grid[i][j] = predictNumber(image)
File "C:\Users\kvnka\OneDrive - Trinity College Dublin\GitHub\sudoku-solver\cv\numberExtract.py", line 20, in predictNumber
loadedModelPred = numpy.argmax(loadedModel.predict(imageResizeCopy), axis=1)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\engine\training.py", line 2344, in predict
tmp_batch_outputs = self.predict_function(iterator)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\engine\training.py", line 2131, in predict_function
return step_function(self, iterator)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\engine\training.py", line 2117, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\engine\training.py", line 2105, in run_step
ras\engine\sequential.py", line 412, in call
return super().call(inputs, training=training, mask=mask)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\engine\functional.py", line 510, in call
return self._run_internal_graph(inputs, training=training, mask=mask)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\engine\functional.py", line 667, in _run_interras\engine\sequential.py", line 412, in call
return super().call(inputs, training=training, mask=mask)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\engine\functional.py", line 510, in call return self._run_internal_graph(inputs, training=training, mask=mask)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\engine\functional.py", line 667, in _run_internal_graph
outputs = node.layer(*args, **kwargs)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\engine\base_layer.py", line 1107, in __call__ outputs = call_fn(inputs, *args, **kwargs)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\utils\traceback_utils.py", line 96, in error_handler
return fn(*args, **kwargs) File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\layers\pooling\base_pooling2d.py", line 84, in call
outputs = self.pool_function(
Node: 'sequential_1/max_pooling2d_1/MaxPool'
Default MaxPoolingOp only supports NHWC on device type CPU
[[{{node sequential_1/max_pooling2d_1/MaxPool}}]] [Op:__inference_predict_function_290]
I'm a bit clueless about what's causing this error.
The problem is at the line:
imageResizeCopy = imageResize.reshape(1, 1, 28, 28)
NHWC stands for (n_samples, height, width, channels) but you are reshaping your image in a channel first format (n_samples, channels, height, width).
Channel first is usually used for PyTorch by the way, while TensorFlow's default format is channel last. You only have to reshape your image accordingly.
You want to obtain a shape like this: (1, 28, 28, 1).
Related
I am trying to learn the CNN model in deep learning and I am using a Cats-vs_Dogs dataset to begin with. I am following a video tutorial and the steps are the same, although the dataset is different and all the other solutions have vastly varied code that I am not able to understand. Can someone tell me why I am going wrong here? Thanks
import numpy as np
import pandas as pd
import tensorflow as tf
import itertools
import os
import shutil
import random
import glob
import matplotlib.pyplot as plt
import warnings
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dense,Activation,Flatten,BatchNormalization,Conv2D,MaxPool2D
from tensorflow.keras.optimizers import Adam
from keras.metrics import categorical_crossentropy
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import confusion_matrix
train_batch=ImageDataGenerator(preprocessing_function=tf.keras.applications.vgg16.preprocess_input) \
.flow_from_directory(directory='/content/Cat-vs-Dogs/train',target_size=(244,244),classes=['cats','dogs'],batch_size=10)
valid_batch=ImageDataGenerator(preprocessing_function=tf.keras.applications.vgg16.preprocess_input) \
.flow_from_directory(directory='/content/Cat-vs-Dogs/valid',target_size=(244,244),classes=['cats','dogs'],batch_size=10)
test_batch=ImageDataGenerator(preprocessing_function=tf.keras.applications.vgg16.preprocess_input) \
.flow_from_directory(directory='/content/Cat-vs-Dogs/test',target_size=(244,244),classes=['cats','dogs'],batch_size=10,shuffle=False)
model1=Sequential([
Conv2D(filters=32,kernel_size=(3,3),activation='relu', padding='same',input_shape=(224,224,3)),
MaxPool2D(pool_size=(2,2),strides=2),
Conv2D(filters=64,kernel_size=(3,3),activation='relu', padding='same'),
MaxPool2D(pool_size=(2,2),strides=2),
Flatten(),
Dense(units=2,activation='softmax')
])
model1.summary()
model1.compile(optimizer=Adam(learning_rate=0.0001),loss='categorical_crossentropy',metrics=['accuracy'])
model1.fit(x=train_batch,validation_data=valid_batch,epochs=10,verbose=2)
```
The Error Output occurs like below
```
Epoch 1/10
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-87-3dd4821591bb> in <module>()
----> 1 model1.fit(x=train_batch,validation_data=valid_batch,epochs=10,verbose=2)
1 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
53 ctx.ensure_initialized()
54 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 55 inputs, attrs, num_outputs)
56 except core._NotOkStatusException as e:
57 if name is not None:
InvalidArgumentError: Graph execution error:
Detected at node 'sequential_4/flatten_4/Reshape' defined at (most recent call last):
File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/usr/local/lib/python3.7/dist-packages/traitlets/config/application.py", line 846, in launch_instance
app.start()
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelapp.py", line 499, in start
self.io_loop.start()
File "/usr/local/lib/python3.7/dist-packages/tornado/platform/asyncio.py", line 132, in start
self.asyncio_loop.run_forever()
File "/usr/lib/python3.7/asyncio/base_events.py", line 541, in run_forever
self._run_once()
File "/usr/lib/python3.7/asyncio/base_events.py", line 1786, in _run_once
handle._run()
File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
self._context.run(self._callback, *self._args)
File "/usr/local/lib/python3.7/dist-packages/tornado/platform/asyncio.py", line 122, in _handle_events
handler_func(fileobj, events)
File "/usr/local/lib/python3.7/dist-packages/tornado/stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 577, in _handle_events
self._handle_recv()
File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 606, in _handle_recv
self._run_callback(callback, msg)
File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 556, in _run_callback
callback(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/tornado/stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 233, in dispatch_shell
handler(stream, idents, msg)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/ipkernel.py", line 208, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/zmqshell.py", line 537, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2718, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2828, in run_ast_nodes
if self.run_code(code, result):
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2882, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-76-3dd4821591bb>", line 1, in <module>
model1.fit(x=train_batch,validation_data=valid_batch,epochs=10,verbose=2)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1384, in fit
tmp_logs = self.train_function(iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1021, in train_function
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1010, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1000, in run_step
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 859, in train_step
y_pred = self(x, training=True)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/base_layer.py", line 1096, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 92, in error_handler
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/sequential.py", line 374, in call
return super(Sequential, self).call(inputs, training=training, mask=mask)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/functional.py", line 452, in call
inputs, training=training, mask=mask)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/functional.py", line 589, in _run_internal_graph
outputs = node.layer(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/base_layer.py", line 1096, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 92, in error_handler
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/layers/core/flatten.py", line 96, in call
return tf.reshape(inputs, flattened_shape)
Node: 'sequential_4/flatten_4/Reshape'
Input to reshape is a tensor with 2381440 values, but the requested shape requires a multiple of 200704
[[{{node sequential_4/flatten_4/Reshape}}]] [Op:__inference_train_function_3624]
The error is because the target size is (244, 244) and the input shape given in the model is (224, 224, 3). You can either change the target size to (224, 224) or change the input shape to (244, 244, 3).
Change the input_shape to (244, 244, 3)
Conv2D(filters=32,kernel_size=(3,3),activation='relu', padding='same',input_shape=(244, 244, 3)),
OR
Change the target_size to (224, 224) in train_batch, valid_batch and test_batch
train_batch=ImageDataGenerator(preprocessing_function=tf.keras.applications.vgg16.preprocess_input) \
.flow_from_directory(directory='/content/Cat-vs-Dogs/train',target_size=(224, 224),classes=['cats','dogs'],batch_size=10)
I am getting this error while running the training code of a model.
Traceback (most recent call last):
File "train.py", line 273, in <module>
train_loss[epoch - 1] = process_epoch(
File "train.py", line 240, in process_epoch
loss = loss_fn(model, batch)
File "train.py", line 221, in <lambda>
loss_fn = lambda model, batch: weak_loss(model, batch, normalization="softmax")
File "train.py", line 171, in weak_loss
corr4d = model(batch).to("cuda")
File "/home/srtf/anaconda3/envs/ncnet/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/srtf/ncnet/lib/model.py", line 263, in forward
feature_A = self.FeatureExtraction(tnf_batch['source_image'])
File "/home/srtf/anaconda3/envs/ncnet/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/srtf/ncnet/lib/model.py", line 84, in forward
features = self.model(image_batch)
File "/home/srtf/anaconda3/envs/ncnet/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/srtf/anaconda3/envs/ncnet/lib/python3.8/site-packages/torch/nn/modules/container.py", line 100, in forward
input = module(input)
File "/home/srtf/anaconda3/envs/ncnet/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/srtf/anaconda3/envs/ncnet/lib/python3.8/site-packages/torch/nn/modules/conv.py", line 353, in forward
return self._conv_forward(input, self.weight)
File "/home/srtf/anaconda3/envs/ncnet/lib/python3.8/site-packages/torch/nn/modules/conv.py", line 349, in _conv_forward
return F.conv2d(input, weight, self.bias, self.stride,
RuntimeError: Input type (torch.FloatTensor) and weight type (torch.cuda.FloatTensor) should be the same
Cuda is there on the system. Where do I need to make changes in the code?
Your input needs to be sent to the correct device:
>>> corr4d = model(batch.cuda())
Which will copy the batch to the GPU device ('cuda:0' by default).
import tensorflow as tf Tensorflow 2.0
i saw that i can load a model from tensorflow like this
image_model = tf.keras.applications.MobileNet(include_top=True, weights='imagenet', pooling='avg')
Now i want to be able to load models from local machine. My issue is that i can not find an pretrained model that works like this:
image_model = tf.keras.models.load_model('inception_v4.h5') (i used h5 from here https://github.com/titu1994/Inception-v4/releases?fbclid=IwAR0pK_CZaB9RwA92nvawNOha6DjY5xI0vtkc9Ff5HTATcFT9x5vGYBUXt5Q (first h5 model))
future: <Task finished coro=<server_task.<locals>.server_work() done,
defined at ....\x.py:249> exception=ValueError('No model found in config file.')>
Traceback (most recent call last):
File "....\x.py", line 280, in server_work
image_model, layers_indices = init(model_choice, layers_to_see)
File "....\x.py", line 146, in init
image_model = options[choice]()
#tf.keras.applications.MobileNetV2(include_top=True, weights='imagenet',
pooling='avg')
File "....\x.py", line 119, in model_H5_model
image_model = tf.keras.models.load_model('..../inception_v4.h5')
File "...\Python\Python37\lib\site-
packages\tensorflow_core\python\keras\saving\save.py", line 146, in
load_model
return hdf5_format.load_model_from_hdf5(filepath, custom_objects, compile)
File "...\AppData\Local\Programs\Python\Python37\lib\site-
packages\tensorflow_core\python\keras\saving\hdf5_format.py", line 165, in load_model_from_hdf5
raise ValueError('No model found in config file.')
ValueError: No model found in config file.
I also tried with a model like this
image_model = tf.keras.models.load_model('model.pb')
File "....\x.py", line 280, in server_work
image_model, layers_indices = init(model_choice, layers_to_see)
File "....\x.py", line 146, in init
image_model = options[choice]()
#tf.keras.applications.MobileNetV2(include_top=True, weights='imagenet',
pooling='avg')
File "....\x.py", line 119, in model_H5_model
image_model = tf.keras.models.load_model('.../model/inceptionv4.pb')
File "...\AppData\Local\Programs\Python\Python37\lib\site-
packages\tensorflow_core\python\keras\saving\save.py", line 149, in
load_model
loader_impl.parse_saved_model(filepath)
File "...\AppData\Local\Programs\Python\Python37\lib\site-
packages\tensorflow_core\python\saved_model\loader_impl.py", line 83, in
parse_saved_model
constants.SAVED_MODEL_FILENAME_PB))
OSError: SavedModel file does not exist at:
.../model/inceptionv4.pb/{saved_model.pbtxt|saved_model.pb}
What i also tried was smth like this:
image_model = tf.keras.applications.MobileNet(include_top=True,
weights='imagenet', pooling='avg')
image_model.save('test') - > when trying to save i receive this error
File "\Python\Python37\lib\site-
packages\tensorflow_core\python\framework\func_graph.py", line 905, in
wrapper
raise e.ag_error_metadata.to_exception(e)
TypeError: in converted code:
relative to ...\Programs\Python\Python37\lib\site-packages:
tensorflow_core\python\eager\def_function.py:606 initialize_variables *
for v, init in initializer_map.items():
tensorflow_core\python\autograph\impl\api.py:438 converted_call
if not options.user_requested and
conversion.is_whitelisted_for_graph(f):
m = tf_inspect.getmodule(o)
tensorflow_core\python\util\tf_inspect.py:337 getmodule
return _inspect.getmodule(object)
pycallgraph\tracer.py:372 wrapper
if rest not in cache:
TypeError: unhashable type: 'ObjectIdentityDictionary'
tf.keras.models.load_model('test_model')
I am wondering where i can find a h5 file or pb (pretrained model) that actually works with tf.keras.models.load_model()
Based on the first comment :
future: <Task finished coro=<server_task.<locals>.server_work() done, defined at c:\Users\...\Desktop\PrivateStuff\...\...\xx.py:249> exception=TypeError("in converted code:\n relative to C:\\Users\\...\\AppData\\Local\\Programs\\Python\\Python37\\lib\\site-packages:\n\n tensorflow_core\\python\\eager\\def_function.py:606 initialize_variables *\n for v, init in initializer_map.items():\n tensorflow_core\\python\\autograph\\impl\\api.py:438 converted_call\n if not options.user_requested and conversion.is_whitelisted_for_graph(f):\n tensorflow_core\\python\\autograph\\impl\\conversion.py:352 is_whitelisted_for_graph\n m = tf_inspect.getmodule(o)\n tensorflow_core\\python\\util\\tf_inspect.py:337 getmodule\n return _inspect.getmodule(object)\n pycallgraph\\tracer.py:372 wrapper\n if rest not in cache:\n\n TypeError: unhashable type: 'ObjectIdentityDictionary'\n")>
Traceback (most recent call last):
File "c:\Users\...\Desktop\PrivateStuff\...\...\xx.py", line 280, in server_work
image_model, layers_indices = init(model_choice, layers_to_see)
File "c:\Users\...\Desktop\PrivateStuff\...\...\xx.py", line 146, in init
image_model = options[choice]() #tf.keras.applications.MobileNetV2(include_top=True, weights='imagenet', pooling='avg')
File "c:\Users\...\Desktop\PrivateStuff\...\...\xx.py", line 55, in model_VGG16
image_model.save(r'c:\test')
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\engine\network.py", line 975, in save
signatures, options)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\saving\save.py", line 115, in save_model
signatures, options)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\saving\saved_model\save.py", line 74, in save
save_lib.save(model, filepath, signatures, options)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\saved_model\save.py", line 870, in save
checkpoint_graph_view)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\saved_model\signature_serialization.py", line 64, in find_function_to_export
functions = saveable_view.list_functions(saveable_view.root)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\saved_model\save.py", line 141, in list_functions
self._serialization_cache)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\engine\base_layer.py", line 2422, in _list_functions_for_serialization
.list_functions_for_serialization(serialization_cache))
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\saving\saved_model\base_serialization.py", line 91, in list_functions_for_serialization
fns = self.functions_to_serialize(serialization_cache)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\saving\saved_model\layer_serialization.py", line 79, in
functions_to_serialize
serialization_cache).functions_to_serialize)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\saving\saved_model\layer_serialization.py", line 94, in
_get_serialized_attributes
serialization_cache)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\saving\saved_model\model_serialization.py", line 47, in
_get_serialized_attributes_internal
default_signature = save_impl.default_save_signature(self.obj)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\saving\saved_model\save_impl.py", line 206, in default_save_signature
fn.get_concrete_function()
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\eager\def_function.py", line 777, in get_concrete_function
self._initialize_uninitialized_variables(initializer_map)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\eager\def_function.py", line 616, in _initialize_uninitialized_variables
return initialize_variables.get_concrete_function()()
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\eager\function.py", line 1891, in get_concrete_function
graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\eager\function.py", line 2150, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\eager\function.py", line 2041, in _create_graph_function
capture_by_value=self._capture_by_value),
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\framework\func_graph.py", line 915, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\framework\func_graph.py", line 905, in wrapper
raise e.ag_error_metadata.to_exception(e)
TypeError: in converted code:
relative to C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages:
tensorflow_core\python\eager\def_function.py:606 initialize_variables *
for v, init in initializer_map.items():
tensorflow_core\python\autograph\impl\api.py:438 converted_call
if not options.user_requested and conversion.is_whitelisted_for_graph(f):
tensorflow_core\python\autograph\impl\conversion.py:352 is_whitelisted_for_graph
m = tf_inspect.getmodule(o)
tensorflow_core\python\util\tf_inspect.py:337 getmodule
return _inspect.getmodule(object)
pycallgraph\tracer.py:372 wrapper
if rest not in cache:
TypeError: unhashable type: 'ObjectIdentityDictionary'
I copied your code to load MobileNet. It works if your provide a full path to save the model. See code below. Note when you load a model with weights='imagenet' the weights are set for the model trained on the imagenet data set. You don't need to load any weights. Now if you want to load weights for the model pre-trained on some other data set first instantiate the model as shown below. Then load the specific weights using model.load_weights.
image_model = tf.keras.applications.MobileNet(include_top=True,
weights='imagenet', pooling='avg')
image_model.save(r'c:\test')
I want to run Python program using PyTorch. How should I make each tensor in batch equal? Because the following problem appears:
Traceback (most recent call last):
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 311, in <module>
fire.Fire(demo)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\fire\core.py", line 138, in Fire
component_trace = _Fire(component, args, parsed_flag_args, context, name)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\fire\core.py", line 468, in _Fire
target=component.__name__)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\fire\core.py", line 672, in _CallAndUpdateTrace
component = fn(*varargs, **kwargs)
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 289, in demo
n_epochs=n_epochs, batch_size=batch_size, seed=seed)
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 168, in train
n_epochs=n_epochs,
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 42, in train_epoch
for batch_idx, (input, target) in enumerate(loader):
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\dataloader.py", line 346, in __next__
data = self._next_data()
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\dataloader.py", line 386, in _next_data
data = self._dataset_fetcher.fetch(index) # may raise StopIteration
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\fetch.py", line 47, in fetch
return self.collate_fn(data)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 87, in default_collate
return [default_collate(samples) for samples in transposed]
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 87, in <listcomp>
return [default_collate(samples) for samples in transposed]
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 72, in default_collate
return default_collate([torch.as_tensor(b) for b in batch])
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 63, in default_collate
return torch.stack(batch, 0, out=out)
RuntimeError: stack expects each tensor to be equal size, but got [650] at entry 0 and [108] at entry 1
I compiled tensorflow from source with MKL in order to accelerate my DNN learning progress. And I have a ResNet model which is copy from tensorflow/models. The dataset is CIFAR-10. When I run the model with channels last format, everything goes ok. But in order to use MKL which is said only accelerate only for channels first format, I add some code to transpose the data into NCHW format, and run it. Then I get:
Caused by op 'stage/residual_v1/conv2d/Conv2D', defined at:
File "main.py", line 182, in <module>
main(args)
File "main.py", line 83, in main
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
File "/home/holmescn/.pyenv/versions/anaconda35.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/training.py", line 447, in train_and_evaluate
return executor.run()
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/training.py", line 531, in run
return self.run_local()
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/training.py", line 669, in run_local
hooks=train_hooks)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py", line 366, in train
loss = self._train_model(input_fn, hooks, saving_listeners)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py", line 1119, in _train_model
return self._train_model_default(input_fn, hooks, saving_listeners)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py", line 1132, in _train_model_default
features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py", line 1107, in _call_model_fn
model_fn_results = self._model_fn(features=features, **kwargs)
File "/home/holmescn/Work/deep-learning-practice/tensorflow/estimator/utils.py", line 18, in _model_fn
logits = build_model(input_layer, mode == tf.estimator.ModeKeys.TRAIN, params=params, args=args)
File "/home/holmescn/Work/deep-learning-practice/tensorflow/estimator/estimators/resnet.py", line 175, in build_model
return resnet.build_model(input_layer, args.num_layers)
File "/home/holmescn/Work/deep-learning-practice/tensorflow/estimator/estimators/resnet.py", line 56, in build_model
x = res_func(x, 3, filters[i], filters[i + 1], strides[i])
File "/home/holmescn/Work/deep-learning-practice/tensorflow/estimator/estimators/resnet.py", line 79, in _residual_v1
x = self._conv(x, kernel_size, out_filter, stride)
File "/home/holmescn/Work/deep-learning-practice/tensorflow/estimator/estimators/base.py", line 59, in _conv
name=name)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/layers/convolutional.py", line 427, in conv2d
return layer.apply(inputs)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py", line 759, in apply
return self.__call__(inputs, *args, **kwargs)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/layers/base.py", line 329, in __call__
outputs = super(Layer, self).__call__(inputs, *args, **kwargs)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py", line 688, in __call__
outputs = self.call(inputs, *args, **kwargs)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/keras/layers/convolutional.py", line 184, in call
outputs = self._convolution_op(inputs, self.kernel)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/ops/nn_ops.py", line 868, in __call__
return self.conv_op(inp, filter)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/ops/nn_ops.py", line 520, in __call__
return self.call(inp, filter)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/ops/nn_ops.py", line 204, in __call__
name=self.name)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/ops/gen_nn_ops.py", line 956, in conv2d
data_format=data_format, dilations=dilations, name=name)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 3414, in create_op
op_def=op_def)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1740, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
InvalidArgumentError (see above for traceback): input and filter must have the same depth: 32 vs 16
[[Node: stage/residual_v1/conv2d/Conv2D = _MklConv2D[T=DT_FLOAT, _kernel="MklOp", data_format="NCHW", dilations=[1, 1, 1, 1], padding="SAME", strides=[1, 1, 1, 1], use_cudnn_on_gpu=true, _device="/job:localhost/replica:0/task:0/device:CPU:0"](Relu, conv2d/kernel/read, Relu:1, DMT/_6)]]
the last trackback said input and filter must have the same depth, which IMO means the depth dim for both input tensor and filter should be same? But how could I do that if I want to generate more feature maps? What should I do?
No need to transpose the data for changing the dataformat.
You can pass the data format as channels first or channels last as argument
For example,
python cifar10_main.py --data-dir=${PWD}/cifar-10-data --data-format=channels_first --job-dir=/tmp/cifar10