I've fine-tuned a model (using TF 1.9) from Object Detection Zoo Model and right now I am trying to freeze the graph for TensorFlowSharp using TF 1.9.
import tensorflow as tf
import os
from tensorflow.python.tools import freeze_graph
from tensorflow.core.protobuf import saver_pb2
#print("current tensorflow version: ", tf.version)
sess=tf.Session()
model_path = 'latest_cp/'
saver = tf.train.import_meta_graph('model.ckpt.meta')
saver.restore(sess,tf.train.latest_checkpoint('.')) #current dir of the checkpoint file
tf.train.write_graph(sess.graph_def, '.', 'test.pbtxt') #output in pbtxt format
freeze_graph.freeze_graph(input_graph = 'test.pbtxt',
input_binary = False,
input_checkpoint = model_path + 'model.ckpt',
output_node_names = "num_detections,detection_boxes,detection_scores,detection_classes",
output_graph = 'test.bytes' ,
clear_devices = True, initializer_nodes = "",input_saver = "",
restore_op_name = "save/restore_all", filename_tensor_name = "save/Const:0")
It worked but then after I imported it to Unity it returned the following error:
TFException: Op type not registered 'NonMaxSuppressionV3' in binary running on AK38713. Make sure the Op and Kernel are registered in the binary running in this process.
I find out that TensorFlowSharp works with TensorFlow 1.4 and when I tried to freeze graph with 1.4 it returns the same NonMaxSuppressionV3 error.
Do you know any way to solve this issue? Thank you so much for the support.
Related
I'm currently trying to create a model using transfer learning, but I'm getting an error
NameError: name 'scipy' is not defined
I'm going to learn from the video. We have loaded some datasets to the computer and I am trying to convert these datasets into '.json' and '.h5' files. I had to run the code you saw in the first part and create the model. There was supposed to be a download like in the video, but instead I got an error and I can't solve it.
Here are my codes:
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense
from keras.applications.vgg16 import VGG16
import matplotlib.pyplot as plt
from glob import glob
from keras.utils import img_to_array
from keras.utils import load_img
train_path = "/Users/atakansever/Desktop/CNNN/fruits-360_dataset/fruits-360/Training/"
test_path = "/Users/atakansever/Desktop/CNNN/fruits-360_dataset/fruits-360/Test/"
# img = load_img(train_path + "Tangelo/0_100.jpg")
# plt.imshow(img)
# plt.axes("off")
# plt.show()
numberOfClass = len(glob(train_path + "/*"))
# print(numberOfClass)
vgg = VGG16()
# print(vgg.summary())
vgg_layer_list = vgg.layers
# print(vgg_layer_list)
model = Sequential()
for i in range(len(vgg_layer_list)-1):
model.add(vgg_layer_list[i])
# print(model.summary())
for layers in model.layers:
layers.trainable = False
model.add(Dense(numberOfClass, activation="softmax"))
# print(model.summary())
model.compile(loss = "categorical_crossentropy",optimizer = "rmsprop",metrics = ["accuracy"])
#train
train_data = ImageDataGenerator().flow_from_directory(train_path, target_size=(224,224))
test_data = ImageDataGenerator().flow_from_directory(test_path, target_size=(224,224))
batch_size = 32
hist = model.fit_generator(train_data,
steps_per_epoch=1600//batch_size,
epochs=25,
validation_data= test_data,
validation_steps=800//batch_size)
and here is the error
pyenv shell 3.9.7
atakansever#atakan-Air CNNN % pyenv shell 3.9.7
pyenv: shell integration not enabled. Run `pyenv init' for instructions.
atakansever#atakan-Air CNNN % /Users/atakansever/.pyenv/versions/3.9.7/bin/python /Users/atakansever/Desktop/CNNN/fruits.py
Metal device set to: Apple M1
systemMemory: 8.00 GB
maxCacheSize: 2.67 GB
2022-07-10 11:17:50.428036: I tensorflow/core/common_runtime/pluggable_device/pluggable_device_factory.cc:305] Could not identify NUMA node of platform GPU ID 0, defaulting to 0. Your kernel may not have been built with NUMA support.
2022-07-10 11:17:50.428259: I tensorflow/core/common_runtime/pluggable_device/pluggable_device_factory.cc:271] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 0 MB memory) -> physical PluggableDevice (device: 0, name: METAL, pci bus id: <undefined>)
Found 67692 images belonging to 131 classes.
Found 22688 images belonging to 131 classes.
/Users/atakansever/Desktop/CNNN/fruits.py:53: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
hist = model.fit_generator(train_data, steps_per_epoch=1600//batch_size,epochs=25,validation_data= test_data,validation_steps=800//batch_size)
Traceback (most recent call last):
File "/Users/atakansever/Desktop/CNNN/fruits.py", line 53, in <module>
hist = model.fit_generator(train_data, steps_per_epoch=1600//batch_size,epochs=25,validation_data= test_data,validation_steps=800//batch_size)
File "/Users/atakansever/.pyenv/versions/3.9.7/lib/python3.9/site-packages/keras/engine/training.py", line 2260, in fit_generator
return self.fit(
File "/Users/atakansever/.pyenv/versions/3.9.7/lib/python3.9/site-packages/keras/utils/traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "/Users/atakansever/.pyenv/versions/3.9.7/lib/python3.9/site-packages/keras/preprocessing/image.py", line 2244, in apply_affine_transform
if scipy is None:
NameError: name 'scipy' is not defined
try pip install scipy or pip3 install scipy would solve the problem
First, install the scipy package if it isn't already installed:
pip install scipy
and then add scipy to your imports:
import scipy # This is new!
from keras.preprocessing.image import ImageDataGenerator
# ... all your imports
I clicked on the error message and it directed you to the source code.
Comment that two line and save the python script.
# if scipy is None:
# raise ImportError('Image transformations require SciPy. '
# 'Install SciPy.')
Commect code image
Then it will work perfectly.
You have to:
Install scipy pip install scipy
Restart VS code to your IDE or perhaps restart Python Kernel and rerun the code.
I'm trying to load with cv.dnn.readNetFromONNX a pre-trained torch model (U2Net to be precise) saved as onnx.
But I'm receiving the error:
error: OpenCV(4.1.2) /io/opencv/modules/dnn/include/opencv2/dnn/dnn.inl.hpp:349:
error (-204:Requested object was not found) Required argument "starts" not found
into dictionary in function 'get'
This is the code to reproduce the error with Google Colab:
### get U2Net implementation ###
%cd /content
!git clone https://github.com/shreyas-bk/U-2-Net
### download pre-trained model ###
!gdown --id 1ao1ovG1Qtx4b7EoskHXmi2E9rp5CHLcZ -O /content/U-2-Net/u2net.pth
###
%cd /content/U-2-Net
### imports ###
from google.colab import files
from model import U2NET
import torch
import os
### create U2Net model from state
model_dir = '/content/U-2-Net/u2net.pth'
net = U2NET(3, 1)
net.load_state_dict(torch.load(model_dir, map_location='cpu'))
net.eval()
### pass to it a dummy input and save to onnx ###
img = torch.randn(1, 3, 320, 320, requires_grad=False)
img = img.to(torch.device('cpu'))
output_dir = os.path.join('/content/u2net.onnx')
torch.onnx.export(net, img, output_dir, opset_version=11, verbose=True)
### load the model in OpenCV ###
import cv2 as cv
net = cv.dnn.readNetFromONNX('/content/u2net.onnx')
[ OpenCV => 4.1.2, Platform => Google Colab, Torch => 1.11.0+cu113]
As #berak suggestet, the issue was related to the OpenCV version (was 4.1.2). Updating to 4.5.5 solved the issue.
I used the tensorflow2 object detection API. I received a saved_model.pb which is a TensorFlow graph and not a tf.keras model. So it can be loaded with tf.saved_model.load() but not with tf.keras.load_model(). The model is saved via the tf.saved_model.save() in the export_lib_v2.py of the object detection API in line 271.
I tried to build the model from the config file and load the checkpoints, to then save it as a tf.keras model:
import tensorflow as tf
from object_detection.utils import config_util
from object_detection.builders import model_builder
import os
def save_in_tfkeras(save_filepath,label_map_path, config_file_path, checkpoint_path):
configs = config_util.get_configs_from_pipeline_file(config_file_path)
model_config = configs['model']
detection_model = model_builder.build(model_config=model_config, is_training=False)
# Restore checkpoint
ckpt = tf.compat.v2.train.Checkpoint(model=detection_model)
ckpt.restore(checkpoint_path).expect_partial()
detection_model.built(input_shape=(320,320))
tf.keras.models.save_model(detection_model, save_filepath)
print('modelsaved as tf.keras in ' + save_filepath)
if __name__ == "__main__":
PATH_TO_LABELMAP = './models/face_model/face_label.pbtxt'
PATH_TO_CONFIG = './models/face_model/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/pipeline.config'
PATH_TO_CHECKPOINT = './models/face_model/v2_model_50k/ckpt-51'
save_filepath='./Kmodels/mobileNet_V2'
if not os.path.exists(save_filepath):
os.makedirs(save_filepath)
save_in_tfkeras(save_filepath,PATH_TO_LABELMAP,PATH_TO_CONFIG,PATH_TO_CHECKPOINT)
However, this does not seem to work. There are errors which origin, in my opinion, in mixing the tf and tf.keras model. The last error message:
ValueError: Weights for model
ssd_mobile_net_v2fpn_keras_feature_extractor_1 have not yet been
created. Weights are created when the Model is first called on inputs
or build() is called with an input_shape.
The model was saved with TensorFlow loaded as tensorflow.compat.v2
Question: Is there a way to build the model, load the checkpoint weights and then save as tf.keras model?
I am getting an error module 'tensorflow' has no attribute 'get_default_graph' .Please help me to solve this.
tensorflow : 2.4.0
VsCode
python 3.6.9
enter code here
import tensorflow as tf
import numpy as np
n1 =tf.constant(1)
n2 = tf.constant(2)
n3 = n1+n2
with tf.compat.v1.Session() as sess:
result1 = sess.run(n3)
print(result1)
print(tf.get_default_graph())
g = tf.Graph()
print(g)
To perform addition OP, you can execute below code using compat.v1.session in TF 2.x.
import tensorflow as tf
mlt= 2*tf.Variable(4.0)
with tf.compat.v1.Session() as sess:
init= tf.compat.v1.global_variables_initializer()
sess.run(init)
print(sess.run(mlt))
In TF 2.x, eager execution enable by default and working code as shown below
import tensorflow as tf
gt = 2*tf.Variable(4.0)
print(gt)
Problem
cant use eager execution in tensorflow version 1.5
code
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow.python.client import timeline
tf.enable_eager_execution()
x = tf.random_normal([0,10000])
y= tf.random_normal([10000,1000])
res = tf.matmul(x, y)
# Run the graph with full trace option
with tf.Session() as sess:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
sess.run(res, options=run_options, run_metadata=run_metadata)
# Create the Timeline object, and write it to a json
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open('timeline.json', 'w') as f:
f.write(ctf)
Stack Trace
C:\ProgramData\Anaconda3\lib\site-packages\h5py__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from float to np.floating is deprecated. In future, it will be treated as np.float64 == np.dtype(float).type.
from ._conv import register_converters as _register_converters
Traceback (most recent call last):
File "D:/Users/hello/PycharmProjects/crimeBuster/main.py", line 6, in
tf.enable_eager_execution()
AttributeError: module 'tensorflow' has no attribute 'enable_eager_execution'
Version
import tensorflow as tf
print(tf.__version__)
# 1.5.0
Back in version 1.5, eager execution was still in the contributed packages, so you need to import it explicitly; the correct usage is:
import tensorflow as tf
import tensorflow.contrib.eager as tfe
tfe.enable_eager_execution()
Also, just keep in mind that:
For eager execution, we recommend using TensorFlow version 1.8 or newer.
(from the Github page)
Version 1.7 was the first where the command tf.enable_eager_execution() was made available, i.e. eager execution was moved out of contrib (see v1.7 changes).