following http://answers.opencv.org/question/183507/opencv-dnn-import-error-for-keras-pretrained-vgg16-model/ I'm trying to get densenet to work on openCV DNN but getting:
"error: OpenCV(3.4.2)
/io/opencv/modules/dnn/src/tensorflow/tf_graph_simplifier.cpp:712:
error: (-2:Unspecified error) Tensor's
data type is not supported in function
'getTensorContent'"
import numpy as np
from keras import applications
from keras import backend as K
import cv2 as cv
import tensorflow as tf
model = applications.densenet.DenseNet121(input_shape=(224, 224, 3), weights='imagenet', include_top=True)
sess = K.get_session()
print(model.input, model.outputs)
## Tensor("input_1:0", shape=(?, 224, 224, 3), dtype=float32) [<tf.Tensor 'fc1000/Softmax:0' shape=(?, 1000) dtype=float32>]
from tensorflow.python.tools import freeze_graph
from tensorflow.python.tools import optimize_for_inference_lib
MODEL_PATH = 'out'
MODEL_NAME = 'test'
input_node_name = 'input_1'
output_node_name = 'fc1000/Softmax'
!rm -rf {MODEL_PATH}/
tf.train.write_graph(sess.graph_def, MODEL_PATH, f'{MODEL_NAME}_graph.pb', as_text=False)
tf.train.write_graph(sess.graph_def, MODEL_PATH, f'{MODEL_NAME}_graph.pbtxt')
tf.train.Saver().save(sess, f'{MODEL_PATH}/{MODEL_NAME}.chkp')
freeze_graph.freeze_graph(f'{MODEL_PATH}/{MODEL_NAME}_graph.pbtxt',
None, False,
f'{MODEL_PATH}/{MODEL_NAME}.chkp',
output_node_name,
"save/restore_all",
"save/Const:0",
f'{MODEL_PATH}/frozen_{MODEL_NAME}.pb',
True, "")
graph_def = tf.GraphDef()
with tf.gfile.Open(f'{MODEL_PATH}/frozen_{MODEL_NAME}.pb', "rb") as f:
graph_def.ParseFromString(f.read())
output_graph_def = optimize_for_inference_lib.optimize_for_inference(
graph_def, [input_node_name], [output_node_name], tf.float32.as_datatype_enum)
with tf.gfile.GFile(f'{MODEL_PATH}/opt_{MODEL_NAME}.pb', "wb") as f:
f.write(output_graph_def.SerializeToString())
# Strip Const nodes.
for i in reversed(range(len(graph_def.node))):
if graph_def.node[i].op == 'Const':
del graph_def.node[i]
# for attr in ['T', 'data_format', 'Tshape', 'N', 'Tidx', 'Tdim',
# 'use_cudnn_on_gpu', 'Index', 'Tperm', 'is_training',
# 'Tpaddings']:
# if attr in graph_def.node[i].attr:
# del graph_def.node[i].attr[attr]
# Save stripped model.
tf.train.write_graph(graph_def, "", f'{MODEL_PATH}/stripped_{MODEL_NAME}.pbtxt', as_text=True)
net = cv.dnn.readNetFromTensorflow(f'{MODEL_PATH}/opt_{MODEL_NAME}.pb', f'{MODEL_PATH}/stripped_{MODEL_NAME}.pbtxt')
## error: OpenCV(3.4.2) /io/opencv/modules/dnn/src/tensorflow/tf_graph_simplifier.cpp:712: error: (-2:Unspecified error) Tensor's data type is not supported in function 'getTensorContent'
It's hard to tell which node is causing that from the error.
Any idea please?
Cheers
tensorflow 1.12.0
opencv 3.4.3
Although, I still wasn't able to get optimize_for_inference to work due to the FusedBatchNorm, thanks for the feedback from #dkurt and https://github.com/keras-team/keras/issues/6775 which explains the keras learning_phase. You have to set the learning phase before loading the model!
load the model with set_learning_phase(0):
import numpy as np
from keras import applications
from keras import backend as K
import tensorflow as tf
K.set_learning_phase(0) ##
model = applications.densenet.DenseNet121(input_shape=(224, 224, 3), weights='imagenet', include_top=True)
sess = K.get_session()
print(model.input, model.outputs)
## Tensor("input_1:0", shape=(?, 224, 224, 3), dtype=float32) [<tf.Tensor 'fc1000/Softmax:0' shape=(?, 1000) dtype=float32>]
freeze it:
from tensorflow.python.tools import freeze_graph
from tensorflow.python.tools import optimize_for_inference_lib
MODEL_PATH = 'out'
MODEL_NAME = 'test'
input_node_name = 'input_1'
output_node_name = 'fc1000/Softmax'
!rm -rf {MODEL_PATH}/
tf.train.write_graph(sess.graph_def, MODEL_PATH, f'{MODEL_NAME}_graph.pb', as_text=False)
tf.train.write_graph(sess.graph_def, MODEL_PATH, f'{MODEL_NAME}_graph.pbtxt')
tf.train.Saver().save(sess, f'{MODEL_PATH}/{MODEL_NAME}.chkp')
freeze_graph.freeze_graph(f'{MODEL_PATH}/{MODEL_NAME}_graph.pbtxt',
None, False,
f'{MODEL_PATH}/{MODEL_NAME}.chkp',
output_node_name,
"save/restore_all",
"save/Const:0",
f'{MODEL_PATH}/frozen_{MODEL_NAME}.pb',
True, "")
then load it with dnn:
import cv2 as cv
net = cv.dnn.readNetFromTensorflow(f'{MODEL_PATH}/frozen_{MODEL_NAME}.pb')
# Smoke test
inp = np.ones([1, 3, 224, 224]).astype(np.float32)
net.setInput(inp)
dnn_out = net.forward()
print(dnn_out.shape, dnn_out[0,:5])
## (1, 1000) [2.0760612e-04 2.6876197e-04 5.9680151e-05 5.5908626e-05 1.4762023e-04]
As said, I wasn't able to get optimize_for_inference to work due to the FusedBatchNorm:
WARNING:tensorflow:Didn't find expected Conv2D input to 'conv2_block1_0_bn/FusedBatchNorm_1'
opencv-4.0.0/modules/dnn/src/tensorflow/tf_importer.cpp:497: error: (-2:Unspecified error) Input layer not found: conv2_block1_1_bn/FusedBatchNorm_1 in function 'connect'
So please let me know if you know a solution for that. Thanks
Related
I'm converting my MaskRCNN model to onnx with following codes below :
import torch
from torch import nn
import torchvision.models as models
import io
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
def create_model ():
num_classes=2
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
#model.fc = nn.Linear(in_features=2048, out_features=3, bias=True)
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
hidden_layer,
num_classes)
return model
model=create_model()
weights = torch.load("/content/drive/MyDrive/model_best.pth",map_location='cpu')
model.load_state_dict(weights)
model.eval()
dummy_in = torch.randn(1, 3, 512, 512)
in_names = [ "actual_input_1" ] + [ "learned_%d" % i for i in range(2) ]
out_names = [ "output1" ]
import torch.onnx
torch.onnx.export(model, dummy_in, "maskrcnn_yeni2.onnx", input_names=in_names, output_names=out_names, opset_version=11, verbose=True)
after that, I'm getting a .onnx file and everything seems like okay
and I'm using OpenCV to read it
import cv2
model_onnx=cv2.dnn.readNetFromONNX("/content/maskrcnn_yeni2.onnx")
I got this error: I have searched and went through many similar errors but I couldn't find the solution
error: OpenCV(4.1.2) /io/opencv/modules/dnn/src/onnx/onnx_importer.cpp:155: error: (-210:Unsupported format or combination of formats)
Unsupported data type: INT32 in function 'getMatFromTensor'
Also, when I'm trying with "onnx.load" method it works without problem:
try:
onnx.checker.check_model(model_onnx2)
except onnx.checker.ValidationError as e:
print('The model is invalid: %s' % e)
else:
print('The model is valid!')
output :
The model is valid!
I have trained a Deep Learning network that has a pretrained ELMO layer. I've saved the model and weights using the code below.
model.save("model.h5")
model.save_weights("weights.h5")
I now need to load the load but I'm not sure whats the right way. I've tried two techniques and both of them fail.
1: Tried just loading the model but fails with a get_config error
import numpy as np
import io
import re
from tensorflow import keras
elmo_BiDirectional_model = keras.models.load_model("model.h5")
x_data = np.zeros((1, 1), dtype='object')
x_data[0] = "test token"
with tf.Session() as session:
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
print( elmo_BiDirectional_model.predict(x_data) )
File "C:\temp\Simon\perdict_elmo.py", line 36, in
elmo_BiDirectional_model = keras.models.load_model("model.h5")
File
"C:\ProgramData\Anaconda3\lib\site-packages\tensorflow_core\python\keras\saving\save.py",
line 143, in load_model
return hdf5_format.load_model_from_hdf5(filepath, custom_objects, compile)
File
"C:\ProgramData\Anaconda3\lib\site-packages\tensorflow_core\python\keras\saving\hdf5_format.py",
line 159, in load_model_from_hdf5
raise ValueError('No model found in config file.')
ValueError: No model found in config file.
2: Tried building the model and just setting the weights:
import tensorflow_hub as hub
import tensorflow as tf
elmo = hub.Module("https://tfhub.dev/google/elmo/3", trainable=False)
from tensorflow.keras.layers import Input, Lambda, Bidirectional, Dense, Dropout, Flatten, LSTM
from tensorflow.keras.models import Model
def ELMoEmbedding(input_text):
return elmo(tf.reshape(tf.cast(input_text, tf.string), [-1]), signature="default", as_dict=True)["elmo"]
def build_model():
input_layer = Input(shape=(1,), dtype="string", name="Input_layer")
embedding_layer = Lambda(ELMoEmbedding, output_shape=(1024, ), name="Elmo_Embedding")(input_layer)
BiLSTM = Bidirectional(LSTM(128, return_sequences= False, recurrent_dropout=0.2, dropout=0.2), name="BiLSTM")(embedding_layer)
Dense_layer_1 = Dense(64, activation='relu')(BiLSTM)
Dropout_layer_1 = Dropout(0.5)(Dense_layer_1)
Dense_layer_2 = Dense(32, activation='relu')(Dropout_layer_1)
Dropout_layer_2 = Dropout(0.5)(Dense_layer_2)
output_layer = Dense(3, activation='sigmoid')(Dropout_layer_2)
model = Model(inputs=[input_layer], outputs=output_layer, name="BiLSTM with ELMo Embeddings")
model.summary()
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
return model
elmo_BiDirectional_model = build_model()
elmo_BiDirectional_model.load_weights('weights.h5')
import numpy as np
import io
import re
from tensorflow import keras
x_data = np.zeros((1, 1), dtype='object')
x_data[0] = "test token"
with tf.Session() as session:
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
print( elmo_BiDirectional_model.predict(x_data) )
But this failed with error:
File "C:\temp\Simon\perdict_elmo.py", line 28, in
elmo_BiDirectional_model.load_weights('weights.h5')
File
"C:\ProgramData\Anaconda3\lib\site-packages\tensorflow_core\python\keras\engine\training.py",
line 182, in load_weights
return super(Model, self).load_weights(filepath, by_name)
File
"C:\ProgramData\Anaconda3\lib\site-packages\tensorflow_core\python\keras\engine\network.py",
line 1373, in load_weights
saving.load_weights_from_hdf5_group(f, self.layers)
File
"C:\ProgramData\Anaconda3\lib\site-packages\tensorflow_core\python\keras\saving\hdf5_format.py",
line 645, in load_weights_from_hdf5_group
original_keras_version = f.attrs['keras_version'].decode('utf8')
AttributeError: 'str' object has no attribute 'decode'
Versions:
keras.__version__
'2.2.4-tf'
tensorflow.__version__
'1.15.0'
Finally! I had to downgrade two dependencies, then I used strategy #2 to load the weights into the model.
pip install astroid==2.3.0 --force-reinstall --user
pip install h5py==2.10.0 --force-reinstall --user
Loading efficient net as below :
import efficientnet.keras as efn
from tensorflow.keras import layers
from tensorflow.keras import Model
base_model = efn.EfficientNetB0(input_shape = (224, 224, 3), include_top = False)
and getting below error :
AttributeError: module 'keras.utils' has no attribute 'get_file'
The above posted code is still not working. So, using a workaround for now -
import efficientnet.tfkeras as efn
base_model = efn.EfficientNetB6(input_shape = (224, 224, 3), include_top = False)
I'm trying to use a Lambda layer to wrap a function ('get_reconstruction_loss') that combines two layers in a way so that it calculates the MSE of the results of both. Unfortunately, I can't instantiate the model due to the error shown below.
I'm thankful for any hints!
The code is based on the work of https://github.com/rajatkb/Deep-Super-Resolution-Research
Code:
import cv2
from keras import Model
from keras import backend as K
from keras.applications.vgg16 import VGG16
from keras.layers import Conv2D, Input, Lambda
import numpy as np
class MyClass:
# Source: https://github.com/rajatkb/Deep-Super-Resolution-Research
def __init__(self, img_size, channels, is_train):
# Var definition
self.lambda_content = 1
loss_layer = 'block2_conv2'
##############
### define Model here ###
model_inp = Input(shape = (img_size , img_size , channels) , name='input_layer')
model_output = Conv2D(filters = 64, kernel_size = (9,9),padding ='same', activation ='relu', kernel_initializer= 'RandomNormal' )(model_inp)
model_output = Conv2D(filters = 32, kernel_size = (1,1),padding ='same', activation ='relu', kernel_initializer= 'RandomNormal' )(model_output)
model_output = Conv2D(filters = 3, kernel_size = (5,5),padding ='same', activation ='linear', kernel_initializer= 'RandomNormal', name = 'model_output')(model_output)
self.inference_model = Model(inputs=model_inp, outputs=model_output)
##############
if is_train:
vgg_inp = Input(shape =(img_size, img_size, channels), name='vgg_net_input')
vgg = VGG16(input_tensor =vgg_inp, input_shape =(img_size,img_size,channels) , weights='imagenet' , include_top=False)
for l in vgg.layers: l.trainable =False
# Layer Output
loss_layer_output = [vgg.get_layer(loss_layer).output]
# Define a Model that calculates the feature representation
vgg_reconstruction_model = Model(inputs =vgg_inp, outputs =loss_layer_output)
vgg_reconstruction_model.summary()
# Feature represenation of hr image and prediction image
hr_vgg = vgg_reconstruction_model(vgg_inp)
pred_vgg = vgg_reconstruction_model(model_output)
reconstruction_loss = Lambda(self.get_reconstruction_loss,output_shape=(1,), name='reconstruction_loss')([pred_vgg[0], hr_vgg[0]])
self.loss_model = Model(inputs=[model_inp, vgg_inp] , outputs = [model_output, reconstruction_loss], name='loss_model')
def get_reconstruction_loss(self,args):
new_activation, content_activation = args[0], args[1]
return K.constant(self.lambda_content) * K.mean(K.square(new_activation - content_activation))
if __name__ == "__main__":
net = MyClass(500,3,True)
Error:
Exception has occurred: AttributeError
'NoneType' object has no attribute '_inbound_nodes'
File "/home/robousb2/gD_tools/playground/percep_loss_question.py", line 44, in __init__
self.loss_model = Model(inputs=[model_inp, vgg_inp] , outputs = [model_output, reconstruction_loss], name='loss_model')
File "/home/robousb2/gD_tools/playground/percep_loss_question.py", line 54, in <module>
net = MyClass(500,3,True)
The problem is here:
reconstruction_loss = Lambda(self.get_reconstruction_loss,output_shape=(1,), name='reconstruction_loss')([pred_vgg[0], hr_vgg[0]])
I am not sure why you are indexing with [0], if you remove these, it works:
reconstruction_loss = Lambda(self.get_reconstruction_loss,output_shape=(1,), name='reconstruction_loss')([pred_vgg, hr_vgg])
I am observing inconsistent results from evaluating a saved TensorFlow graph created using Keras. Example code is below. Multiple invocations of the same input with a single session have the same output but using the same input across multiple sessions (ie, by running the code), returns inconsistent results. Please advise.
import os
import shutil
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.layers import Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.saved_model import signature_constants
from tensorflow.saved_model import tag_constants
from tensorflow.saved_model.builder import SavedModelBuilder
from tensorflow.saved_model.signature_def_utils import predict_signature_def
export_dir = './export'
input_size = 64
def LoadInput():
image = load_img('mug.jpg', target_size=(input_size, input_size))
image = img_to_array(image)
input_batch = image.reshape((1, image.shape[0], image.shape[1],
image.shape[2]))
input_batch = preprocess_input(input_batch)
return input_batch
def main():
vgg16 = VGG16(input_shape=(input_size, input_size, 3), include_top=False)
output = Flatten()(vgg16.get_output_at(-1))
model = Model(vgg16.input, output)
# print(model.summary())
shutil.rmtree(export_dir)
builder = SavedModelBuilder(export_dir)
signature = predict_signature_def(
inputs={'input': model.inputs[0]}, outputs={
'output': model.outputs[0]
})
builder.add_meta_graph_and_variables(
sess=K.get_session(),
tags=[tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature
},
main_op=tf.saved_model.main_op.main_op())
builder.save()
with tf.Session() as session:
tf.saved_model.loader.load(session, ["serve"], export_dir)
feed_dict = {'input_1:0': LoadInput()}
output_dict = 'flatten/Reshape:0'
print(session.run(output_dict, feed_dict))
if __name__ == '__main__':
main()
This turned out be caused by not freezing the graph properly. I've posted the working version below:
"""Experimenting with Keras VGG16."""
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.layers import Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from IPython import embed
export_dir = '/tmp/export'
input_size = 64
def LoadInput():
image = load_img('mug.jpg', target_size=(input_size, input_size))
image = img_to_array(image)
input_batch = image.reshape(
(1, image.shape[0], image.shape[1], image.shape[2]))
input_batch = preprocess_input(input_batch)
return input_batch
# stackoverflow.com/questions/45466020/how-to-export-keras-h5-to-tensorflow-pb
def FreezeSession(session,
keep_var_names=None,
output_names=None,
clear_devices=True):
graph = session.graph
with graph.as_default():
freeze_var_names = list(
set(v.op.name for v in tf.global_variables()).difference(
keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ''
frozen_graph = tf.graph_util.convert_variables_to_constants(
session, input_graph_def, output_names, freeze_var_names)
return frozen_graph
# medium.com/
# #pipidog/how-to-convert-your-keras-models-to-tensorflow-e471400b886a
def RunModel():
vgg16 = VGG16(input_shape=(input_size, input_size, 3), include_top=False)
output = Flatten()(vgg16.get_output_at(-1))
model = Model(vgg16.input, output)
print(model.summary())
output_batch = model.predict(LoadInput())
print(output_batch)
# print([v for v in output_batch[0]])
frozen_graph = FreezeSession(
K.get_session(), output_names=[out.op.name for out in model.outputs])
tf.train.write_graph(frozen_graph, '/tmp', 'keras-vgg.pb', as_text=False)
tf.reset_default_graph()
with tf.Session() as session:
graph_def = tf.GraphDef()
graph_def.ParseFromString(open('/tmp/keras-vgg.pb', 'rb').read())
session.graph.as_default()
tf.import_graph_def(graph_def, name='')
# for op in session.graph.get_operations():
# print(op.name)
tensor_input = session.graph.get_tensor_by_name('input_1:0')
tensor_output = session.graph.get_tensor_by_name('flatten/Reshape:0')
output_batch = session.run(tensor_output, {tensor_input: LoadInput()})
print(output_batch)
# print([v for v in output_batch[0]])
def main():
RunModel()
if __name__ == '__main__':
main()