Traffic sign recognition iOS CoreML - label not showing class - python

I am following this tutorial to build a simple deep learning app for traffic sign recognition. link
I made an own model, and I also tried it with the model in this repository: link
When I run the app from xcode on my iPhone, I can see the picture of the camera, but the text always says "Label", no matter what is on the screen.
The only thing I modified from the tutorial is that I hardcoded the classes before converting to mlmodel:
# import necessary packages
from keras.models import load_model
import coremltools
import argparse
import pickle
# construct the argument parser and parse the arguments
# load the class labels
print("[INFO] loading class labels from label binarizer")
# lb = pickle.loads(open(args["labelbin"], "rb").read())
# class_labels = lb.classes_.tolist()
class_labels = list(range(1, 43))
print("[INFO] class labels: {}".format(class_labels))
# load the trained convolutional neural network
print("[INFO] loading model...")
model = load_model('my_model.h5')
# convert the model to coreml format
print("[INFO] converting model")
coreml_model = coremltools.converters.keras.convert(model,
input_names="image",
image_input_names="image",
image_scale=1/255.0,
class_labels=class_labels,
is_bgr=True)
# save the model to disk
output = "mymodel.mlmodel"
print("[INFO] saving model as {}".format(output))
coreml_model.save(output)
So instead of using laber binarizer, I told the converter that there are 43 classes in my model.
Here is my AppDelegate.swift:
//
// AppDelegate.swift
// trafficsign
//
// Created by administrator on 2020. 11. 11..
// Copyright © 2020. administrator. All rights reserved.
//
import UIKit
#UIApplicationMain
class AppDelegate: UIResponder, UIApplicationDelegate {
var window: UIWindow?
func application(_ application: UIApplication, didFinishLaunchingWithOptions launchOptions: [UIApplication.LaunchOptionsKey: Any]?) -> Bool {
// Override point for customization after application launch.
// Override point for customization after application launch.
window = UIWindow()
window?.makeKeyAndVisible()
let vc = ViewController()
window?.rootViewController = vc
return true
}
}
My SceneDelegate.swift:
//
// SceneDelegate.swift
// trafficsign
//
// Created by administrator on 2020. 11. 11..
// Copyright © 2020. administrator. All rights reserved.
//
import UIKit
class SceneDelegate: UIResponder, UIWindowSceneDelegate {
var window: UIWindow?
func scene(_ scene: UIScene, willConnectTo session: UISceneSession, options connectionOptions: UIScene.ConnectionOptions) {
// Use this method to optionally configure and attach the UIWindow `window` to the provided UIWindowScene `scene`.
// If using a storyboard, the `window` property will automatically be initialized and attached to the scene.
// This delegate does not imply the connecting scene or session are new (see `application:configurationForConnectingSceneSession` instead).
guard let windowScene = (scene as? UIWindowScene) else { return }
window = UIWindow(windowScene: windowScene)
window?.rootViewController = ViewController()
window?.makeKeyAndVisible()
}
func sceneDidDisconnect(_ scene: UIScene) {
// Called as the scene is being released by the system.
// This occurs shortly after the scene enters the background, or when its session is discarded.
// Release any resources associated with this scene that can be re-created the next time the scene connects.
// The scene may re-connect later, as its session was not neccessarily discarded (see `application:didDiscardSceneSessions` instead).
}
func sceneDidBecomeActive(_ scene: UIScene) {
// Called when the scene has moved from an inactive state to an active state.
// Use this method to restart any tasks that were paused (or not yet started) when the scene was inactive.
}
func sceneWillResignActive(_ scene: UIScene) {
// Called when the scene will move from an active state to an inactive state.
// This may occur due to temporary interruptions (ex. an incoming phone call).
}
func sceneWillEnterForeground(_ scene: UIScene) {
// Called as the scene transitions from the background to the foreground.
// Use this method to undo the changes made on entering the background.
}
func sceneDidEnterBackground(_ scene: UIScene) {
// Called as the scene transitions from the foreground to the background.
// Use this method to save data, release shared resources, and store enough scene-specific state information
// to restore the scene back to its current state.
}
}
And most importantly my SceneDelegate.swift:
//
// ViewController.swift
// trafficsign
//
// Created by administrator on 2020. 11. 11..
// Copyright © 2020. administrator. All rights reserved.
//
import UIKit
import AVFoundation
import Vision
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
let label: UILabel = {
let label = UILabel()
label.textColor = .white
label.translatesAutoresizingMaskIntoConstraints = false
label.text = "Label"
label.font = label.font.withSize(30)
return label
}()
override func viewDidLoad() {
super.viewDidLoad()
setupCaptureSession()
view.addSubview(label)
setupLabel()
}
override func didReceiveMemoryWarning() {
// call the parent function
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func setupCaptureSession() {
// create a new capture session
let captureSession = AVCaptureSession()
// find the available cameras
let availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .back).devices
do {
// select a camera
if let captureDevice = availableDevices.first {
captureSession.addInput(try AVCaptureDeviceInput(device: captureDevice))
}
} catch {
// print an error if the camera is not available
print(error.localizedDescription)
}
// setup the video output to the screen and add output to our capture session
let captureOutput = AVCaptureVideoDataOutput()
captureSession.addOutput(captureOutput)
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.frame = view.frame
view.layer.addSublayer(previewLayer)
// buffer the video and start the capture session
captureOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
captureSession.startRunning()
// // creates a new capture session
// let captureSession = AVCaptureSession()
//
// // search for available capture devices
// let availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .back).devices
//
// // get capture device, add device input to capture session
// do {
// if let captureDevice = availableDevices.first {
// captureSession.addInput(try AVCaptureDeviceInput(device: captureDevice))
// }
// } catch {
// print(error.localizedDescription)
// }
//
// // setup output, add output to capture session
// let captureOutput = AVCaptureVideoDataOutput()
// captureSession.addOutput(captureOutput)
//
// captureOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
//
// let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
// previewLayer.frame = view.frame
// previewLayer.videoGravity = .resizeAspectFill
// view.layer.addSublayer(previewLayer)
//
// captureSession.startRunning()
}
// called everytime a frame is captured
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// load our CoreML Pokedex model
guard let model = try? VNCoreMLModel(for: model_squeezeNet_TSR().model) else { return }
// run an inference with CoreML
let request = VNCoreMLRequest(model: model) { (finishedRequest, error) in
// grab the inference results
guard let results = finishedRequest.results as? [VNClassificationObservation] else { return }
// grab the highest confidence result
guard let Observation = results.first else { return }
// create the label text components
let predclass = "\(Observation.identifier)"
let predconfidence = String(format: "%.02f%", Observation.confidence * 100)
// set the label text
DispatchQueue.main.async(execute: {
self.label.text = "\(predclass) \(predconfidence)"
})
}
// create a Core Video pixel buffer which is an image buffer that holds pixels in main memory
// Applications generating frames, compressing or decompressing video, or using Core Image
// can all make use of Core Video pixel buffers
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
// execute the request
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
// guard let model = try? VNCoreMLModel(for: model_squeezeNet_TSR().model) else { return }
// let request = VNCoreMLRequest(model: model) { (finishedRequest, error) in
// guard let results = finishedRequest.results as? [VNClassificationObservation] else { return }
// guard let Observation = results.first else { return }
//
// DispatchQueue.main.async(execute: {
// self.label.text = "\(Observation.identifier)"
// print(Observation.confidence)
// })
// }
// guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
// // executes request
// try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
func setupLabel() {
label.centerXAnchor.constraint(equalTo: view.centerXAnchor).isActive = true
label.bottomAnchor.constraint(equalTo: view.bottomAnchor, constant: -50).isActive = true
}
}

I don't know if this fixes it, but in your conversion script try the following:
class_labels = list(range(1, 43))
class_labels = [str(x) for x in class_labels] # add this line
Currently your class labels are integers. It's possible this confuses Core ML or Vision at some point.

So I added a label binarizer to my model generator script. I used the original coremlconverter script, which gets the labels from the label binarizer. When I run it, it also lists the 42 labels, so it gets it.
But when I run the app, it still shows inconsistent things. Not the "Label" text, but random values, no matter what is on the camera.
here is a video: link
Here is my model creating python script (Those comments that are in hungarian are not important, those are obvious for those who know python):
#!/usr/bin/env python
# coding: utf-8
# In[1]:
pip install tensorflow==1.15.0 keras==2.2.4 scikit-learn==0.19.2 matplotlib pandas pillow opencv-python
# In[9]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
import pickle
#import tensorflow as tf
from PIL import Image
import os
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.models import Sequential, load_model
from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout
from sklearn.preprocessing import LabelBinarizer
data = []
labels = []
classes = 43
cur_path = os.getcwd()
#Képek és címkék betöltése
for i in range(classes):
path = os.path.join(cur_path,'train',str(i))
images = os.listdir(path)
for a in images:
try:
image = Image.open(path + '\\'+ a)
image = image.resize((30,30))
image = np.array(image)
#sim = Image.fromarray(image)
data.append(image)
labels.append(i)
except:
print("Error loading image")
#Listák numpy tömbökbe konvertálása
data = np.array(data)
labels = np.array(labels)
#these two lines are commented when the script is used without label binarizer
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
# In[10]:
print(data.shape, labels.shape)
#Az adathalmaz szétválasztása teszt és edző adathalmazokra
X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.2, random_state=42)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
#Címkék konvertálása one-hot kódolásúra
#the next two lines are uncommented, when the script is used without label binarizer
#y_train = to_categorical(y_train, 43)
#y_test = to_categorical(y_test, 43)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
# In[11]:
#A modell kialakítása
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu', input_shape=X_train.shape[1:]))
model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(rate=0.5))
model.add(Dense(43, activation='softmax'))
#A modell összeállítása
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# In[12]:
epochs = 15
history = model.fit(X_train, y_train, batch_size=32, epochs=epochs, validation_data=(X_test, y_test))
model.save("my_model.h5")
# In[5]:
# save the label binarizer to disk
import pickle
print("[INFO] serializing label binarizer...")
f = open("labelbin", "wb")
f.write(pickle.dumps(lb))
f.close()
# In[13]:
#Pontosság ábrázolása grafikonon
plt.figure(0)
plt.plot(history.history['acc'], label='training accuracy')
plt.plot(history.history['val_acc'], label='val accuracy')
plt.title('Accuracy')
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend()
plt.show()
plt.figure(1)
plt.plot(history.history['loss'], label='training loss')
plt.plot(history.history['val_loss'], label='val loss')
plt.title('Loss')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
plt.show()
# In[8]:
#Pontosság tesztelése a tényleges tesztadathalmazon
from sklearn.metrics import accuracy_score
print(os.path.abspath(path))
y_test = pd.read_csv('Test.csv')
labels = y_test["ClassId"].values
imgs = y_test["Path"].values
data=[]
for img in imgs:
image = Image.open(img)
image = image.resize((30,30))
data.append(np.array(image))
X_test=np.array(data)
pred = model.predict_classes(X_test)
#Pontosság teszt adatokkal
from sklearn.metrics import accuracy_score
print(accuracy_score(labels, pred))
# In[13]:
import os
import time
from sklearn.metrics import accuracy_score
cur_path = os.getcwd()
path = os.path.join(cur_path)
print(os.path.abspath(path))
y_test = pd.read_csv('Test.csv')
labels = y_test["ClassId"].values
imgs = y_test["Path"].values
data=[]
icount=0
timesum=0
timecur=0
#Átlagos futásidő kiszámítása
for img in imgs:
start_time = time.time()
image = Image.open(img)
image = image.resize((30,30))
image = np.expand_dims(image, axis=0)
image = np.array(image)
pred = model.predict_classes([image])[0]
timecur = (time.time() - start_time)
print("--- %s seconds ---" % (timecur))
timesum = timesum + timecur
icount = icount+1
for img in imgs:
image = Image.open(img)
image = image.resize((30,30))
data.append(np.array(image))
X_test=np.array(data)
print("Átlag: ", (timesum/icount))
pred = model.predict_classes(X_test)
from sklearn.metrics import accuracy_score
print(accuracy_score(labels, pred))
# In[7]:
import tkinter as tk
import time
from tkinter import filedialog
from tkinter import *
from PIL import ImageTk, Image
import numpy
#Az edzett modell betöltése
from keras.models import load_model
model = load_model('my_model.h5')
#Az osztályok nevének listája a kiiratáshoz
classes = { 1:'Speed limit (20km/h)',
2:'Speed limit (30km/h)',
3:'Speed limit (50km/h)',
4:'Speed limit (60km/h)',
5:'Speed limit (70km/h)',
6:'Speed limit (80km/h)',
7:'End of speed limit (80km/h)',
8:'Speed limit (100km/h)',
9:'Speed limit (120km/h)',
10:'No passing',
11:'No passing veh over 3.5 tons',
12:'Right-of-way at intersection',
13:'Priority road',
14:'Yield',
15:'Stop',
16:'No vehicles',
17:'Veh > 3.5 tons prohibited',
18:'No entry',
19:'General caution',
20:'Dangerous curve left',
21:'Dangerous curve right',
22:'Double curve',
23:'Bumpy road',
24:'Slippery road',
25:'Road narrows on the right',
26:'Road work',
27:'Traffic signals',
28:'Pedestrians',
29:'Children crossing',
30:'Bicycles crossing',
31:'Beware of ice/snow',
32:'Wild animals crossing',
33:'End speed + passing limits',
34:'Turn right ahead',
35:'Turn left ahead',
36:'Ahead only',
37:'Go straight or right',
38:'Go straight or left',
39:'Keep right',
40:'Keep left',
41:'Roundabout mandatory',
42:'End of no passing',
43:'End no passing veh > 3.5 tons' }
#GUI betöltése
top=tk.Tk()
top.geometry('800x600')
top.title('Traffic sign classification')
top.configure(background='#CDCDCD')
label=Label(top,background='#CDCDCD', font=('arial',15,'bold'))
sign_image = Label(top)
def classify(file_path):
start_time = time.time()
global label_packed
image = Image.open(file_path)
image = image.resize((30,30))
image = numpy.expand_dims(image, axis=0)
image = numpy.array(image)
pred = model.predict_classes([image])[0]
sign = classes[pred+1]
print("--- %s seconds ---" % (time.time() - start_time))
print(sign)
label.configure(foreground='#011638', text=sign)
def show_classify_button(file_path):
classify_b=Button(top,text="Classify Image",command=lambda: classify(file_path),padx=10,pady=5)
classify_b.configure(background='#364156', foreground='white',font=('arial',10,'bold'))
classify_b.place(relx=0.79,rely=0.46)
def upload_image():
try:
file_path=filedialog.askopenfilename()
uploaded=Image.open(file_path)
uploaded.thumbnail(((top.winfo_width()/2.25),(top.winfo_height()/2.25)))
im=ImageTk.PhotoImage(uploaded)
sign_image.configure(image=im)
sign_image.image=im
label.configure(text='')
show_classify_button(file_path)
except:
pass
upload=Button(top,text="Upload an image",command=upload_image,padx=10,pady=5)
upload.configure(background='#364156', foreground='white',font=('arial',10,'bold'))
upload.pack(side=BOTTOM,pady=50)
sign_image.pack(side=BOTTOM,expand=True)
label.pack(side=BOTTOM,expand=True)
heading = Label(top, text="Know Your Traffic Sign",pady=20, font=('arial',20,'bold'))
heading.configure(background='#CDCDCD',foreground='#364156')
heading.pack()
top.mainloop()
# In[ ]:

Related

How to load custom yolo v-7 trained model

How do I load a custom yolo v-7 model.
This is how I know to load a yolo v-5 model :
model = torch.hub.load('ultralytics/yolov5', 'custom', path='yolov5/runs/train/exp15/weights/last.pt', force_reload=True)
I saw videos online and they suggested to use this :
!python detect.py --weights runs/train/yolov7x-custom/weights/best.pt --conf 0.5 --img-size 640 --source final_test_v1.mp4
But I want it to be loaded like a normal model and give me the bounding box co-ordinates of where ever it found the objects.
This is how I did it in yolo v-5:
from models.experimental import attempt_load
yolov5_weight_file = r'weights/rider_helmet_number_medium.pt' # ... may need full path
model = attempt_load(yolov5_weight_file, map_location=device)
def object_detection(frame):
img = torch.from_numpy(frame)
img = img.permute(2, 0, 1).float().to(device) #convert to required shape based on index
img /= 255.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
pred = model(img, augment=False)[0]
pred = non_max_suppression(pred, conf_set, 0.20) # prediction, conf, iou
# print(pred)
detection_result = []
for i, det in enumerate(pred):
if len(det):
for d in det: # d = (x1, y1, x2, y2, conf, cls)
x1 = int(d[0].item())
y1 = int(d[1].item())
x2 = int(d[2].item())
y2 = int(d[3].item())
conf = round(d[4].item(), 2)
c = int(d[5].item())
detected_name = names[c]
# print(f'Detected: {detected_name} conf: {conf} bbox: x1:{x1} y1:{y1} x2:{x2} y2:{y2}')
detection_result.append([x1, y1, x2, y2, conf, c])
frame = cv2.rectangle(frame, (x1, y1), (x2, y2), (255,0,0), 1) # box
if c!=1: # if it is not head bbox, then write use putText
frame = cv2.putText(frame, f'{names[c]} {str(conf)}', (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv2.LINE_AA)
return (frame, detection_result)
Make prediction with yolov7 using torch.hub
!# Download YOLOv7 code
!git clone https://github.com/WongKinYiu/yolov7
%cd yolov7
from pathlib import Path
import torch
from models.yolo import Model
from utils.general import check_requirements, set_logging
from utils.google_utils import attempt_download
from utils.torch_utils import select_device
dependencies = ['torch', 'yaml']
check_requirements(Path("/content/yolov7/").parent / 'requirements.txt', exclude=('pycocotools', 'thop'))
set_logging()
def custom(path_or_model='path/to/model.pt', autoshape=True):
"""custom mode
Arguments (3 options):
path_or_model (str): 'path/to/model.pt'
path_or_model (dict): torch.load('path/to/model.pt')
path_or_model (nn.Module): torch.load('path/to/model.pt')['model']
Returns:
pytorch model
"""
model = torch.load(path_or_model, map_location=torch.device('cpu')) if isinstance(path_or_model, str) else path_or_model # load checkpoint
if isinstance(model, dict):
model = model['ema' if model.get('ema') else 'model'] # load model
hub_model = Model(model.yaml).to(next(model.parameters()).device) # create
hub_model.load_state_dict(model.float().state_dict()) # load state_dict
hub_model.names = model.names # class names
if autoshape:
hub_model = hub_model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available
return hub_model.to(device)
model = custom(path_or_model='yolov7.pt') # custom example
# model = create(name='yolov7', pretrained=True, channels=3, classes=80, autoshape=True) # pretrained example
# Verify inference
import numpy as np
from PIL import Image
imgs = [np.zeros((640, 480, 3))]
results = model(imgs) # batched inference
results.print()
results.save()
df_prediction = results.pandas().xyxy
df_prediction
link to colab
https://colab.research.google.com/drive/1nKoC-_areXmc_20Xn7z6kcqHEKU7SJsX#scrollTo=yyB_MQW1OWhZ
You can do that with:
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
path = '/path/to/your/file.pt'
model = torch.hub.load("WongKinYiu/yolov7","custom",f"{path}",trust_repo=True)
To get results you can run
results = model("/path/to/your/photo")
To get bbox you can use:
results.pandas().xyxy
EDIT
I created a repository with a python package in order to this easily
https://github.com/Tlaloc-Es/aipose
You cannot use attempt_load from the Yolov5 repo as this method is pointing to the ultralytics release files. You need to use attempt_load from Yolov7 repo as this one is pointing to the right files.
# yolov7
def attempt_download(file, repo='WongKinYiu/yolov7'):
# Attempt file download if does not exist
file = Path(str(file).strip().replace("'", '').lower())
...
# yolov5
def attempt_download(file, repo='ultralytics/yolov5', release='v6.2'):
# Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.2', etc.
from utils.general import LOGGER
def github_assets(repository, version='latest'):
...
Then you can download it like this:
# load yolov7 method
from models.experimental import attempt_load
model = attempt_load('yolov7.pt', map_location='cuda:0') # load FP32 model
import torch as th
def loadModel(path:str):
model = th.hub.load("WongKinYiu/yolov7","custom",f{path}",trust_repo=True)
This will work. trust_repo = True will not ask to to say y or n.
In path you can just add your custom train model like ./best.pt

How to load a TF Lite Model into Python from a file

I've followed the End-to-End image classification tutorial for tensorflow lite and have created and saved my model as '/path/to/model.tflite'.
What I haven't been able to figure out is how to load it.
I'm looking for some kind of syntax that is similar to this:
from tflite_model_maker import image_classifier
from tflite_model_maker.image_classifier import DataLoader
model = image_classifier.Load('/path/to/model.tflite')
I'm sure I'm missing something obvious here. This is definitely not the first place I've looked at. This seems to be the best place for me to find what I need, but the syntax used confuses me.
What do I want to be able to do with the model?
test = DataLoader.from_folder('/path/to/testImages')
loss, accuracy = model.evaluate(test)
# A helper function that returns 'red'/'black' depending on if its two input
# parameter matches or not.
def get_label_color(val1, val2):
if val1 == val2:
return 'black'
else:
return 'red'
# Then plot 100 test images and their predicted labels.
# If a prediction result is different from the label provided label in "test"
# dataset, we will highlight it in red color.
test_data = data
plt.figure(figsize=(20, 20))
predicts = model.predict_top_k(test_data)
for i, (image, label) in enumerate(test_data.gen_dataset().unbatch().take(100)):
ax = plt.subplot(10, 10, i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image.numpy(), cmap=plt.cm.gray)
predict_label = predicts[i][0][0]
color = get_label_color(predict_label,
test_data.index_to_label[label.numpy()])
ax.xaxis.label.set_color(color)
plt.xlabel('Predicted: %s' % predict_label)
plt.show()
From the syntax above it seems the model isn't just a file but is a type/class/method depending on what name is most suitable for python.
Feels like this should only take one line of code but I haven't been able to find it anywhere.
Managed to do a simple version of it. The images coming up as a stream doesn't work for me using cv2 with Windows as it does for the pi. So instead I created a webpage in the same directory as this script. This generates an image with the bounding box, using a specified tflite model. This is in no way ideal.
It uses a webcam to get the image and saves the image to the directory the script is run in. It then renames the file so it can be viewed by the webpage I setup to view it.
The majority of this code comes from the TFLite Object Detection Raspberry Pi sample.
import time, os
from PIL import Image
from tflite_support import metadata
import platform
from typing import List, NamedTuple
import json
import cv2 as cv2
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
Interpreter = tf.lite.Interpreter
load_delegate = tf.lite.experimental.load_delegate
class ObjectDetectorOptions(NamedTuple):
"""A config to initialize an object detector."""
enable_edgetpu: bool = False
"""Enable the model to run on EdgeTPU."""
label_allow_list: List[str] = None
"""The optional allow list of labels."""
label_deny_list: List[str] = None
"""The optional deny list of labels."""
max_results: int = -1
"""The maximum number of top-scored detection results to return."""
num_threads: int = 1
"""The number of CPU threads to be used."""
score_threshold: float = 0.0
"""The score threshold of detection results to return."""
class Rect(NamedTuple):
"""A rectangle in 2D space."""
left: float
top: float
right: float
bottom: float
class Category(NamedTuple):
"""A result of a classification task."""
label: str
score: float
index: int
class Detection(NamedTuple):
"""A detected object as the result of an ObjectDetector."""
bounding_box: Rect
categories: List[Category]
def edgetpu_lib_name():
"""Returns the library name of EdgeTPU in the current platform."""
return {
'Darwin': 'libedgetpu.1.dylib',
'Linux': 'libedgetpu.so.1',
'Windows': 'edgetpu.dll',
}.get(platform.system(), None)
class ObjectDetector:
"""A wrapper class for a TFLite object detection model."""
_OUTPUT_LOCATION_NAME = 'location'
_OUTPUT_CATEGORY_NAME = 'category'
_OUTPUT_SCORE_NAME = 'score'
_OUTPUT_NUMBER_NAME = 'number of detections'
def __init__(
self,
model_path: str,
options: ObjectDetectorOptions = ObjectDetectorOptions()
) -> None:
"""Initialize a TFLite object detection model.
Args:
model_path: Path to the TFLite model.
options: The config to initialize an object detector. (Optional)
Raises:
ValueError: If the TFLite model is invalid.
OSError: If the current OS isn't supported by EdgeTPU.
"""
# Load metadata from model.
displayer = metadata.MetadataDisplayer.with_model_file(model_path)
# Save model metadata for preprocessing later.
model_metadata = json.loads(displayer.get_metadata_json())
process_units = model_metadata['subgraph_metadata'][0]['input_tensor_metadata'][0]['process_units']
mean = 0.0
std = 1.0
for option in process_units:
if option['options_type'] == 'NormalizationOptions':
mean = option['options']['mean'][0]
std = option['options']['std'][0]
self._mean = mean
self._std = std
# Load label list from metadata.
file_name = displayer.get_packed_associated_file_list()[0]
label_map_file = displayer.get_associated_file_buffer(file_name).decode()
label_list = list(filter(lambda x: len(x) > 0, label_map_file.splitlines()))
self._label_list = label_list
# Initialize TFLite model.
if options.enable_edgetpu:
if edgetpu_lib_name() is None:
raise OSError("The current OS isn't supported by Coral EdgeTPU.")
interpreter = Interpreter(
model_path=model_path,
experimental_delegates=[load_delegate(edgetpu_lib_name())],
num_threads=options.num_threads)
else:
interpreter = Interpreter(
model_path=model_path, num_threads=options.num_threads)
interpreter.allocate_tensors()
input_detail = interpreter.get_input_details()[0]
# From TensorFlow 2.6, the order of the outputs become undefined.
# Therefore we need to sort the tensor indices of TFLite outputs and to know
# exactly the meaning of each output tensor. For example, if
# output indices are [601, 599, 598, 600], tensor names and indices aligned
# are:
# - location: 598
# - category: 599
# - score: 600
# - detection_count: 601
# because of the op's ports of TFLITE_DETECTION_POST_PROCESS
# (https://github.com/tensorflow/tensorflow/blob/a4fe268ea084e7d323133ed7b986e0ae259a2bc7/tensorflow/lite/kernels/detection_postprocess.cc#L47-L50).
sorted_output_indices = sorted(
[output['index'] for output in interpreter.get_output_details()])
self._output_indices = {
self._OUTPUT_LOCATION_NAME: sorted_output_indices[0],
self._OUTPUT_CATEGORY_NAME: sorted_output_indices[1],
self._OUTPUT_SCORE_NAME: sorted_output_indices[2],
self._OUTPUT_NUMBER_NAME: sorted_output_indices[3],
}
self._input_size = input_detail['shape'][2], input_detail['shape'][1]
self._is_quantized_input = input_detail['dtype'] == np.uint8
self._interpreter = interpreter
self._options = options
def detect(self, input_image: np.ndarray) -> List[Detection]:
"""Run detection on an input image.
Args:
input_image: A [height, width, 3] RGB image. Note that height and width
can be anything since the image will be immediately resized according
to the needs of the model within this function.
Returns:
A Person instance.
"""
image_height, image_width, _ = input_image.shape
input_tensor = self._preprocess(input_image)
self._set_input_tensor(input_tensor)
self._interpreter.invoke()
# Get all output details
boxes = self._get_output_tensor(self._OUTPUT_LOCATION_NAME)
classes = self._get_output_tensor(self._OUTPUT_CATEGORY_NAME)
scores = self._get_output_tensor(self._OUTPUT_SCORE_NAME)
count = int(self._get_output_tensor(self._OUTPUT_NUMBER_NAME))
return self._postprocess(boxes, classes, scores, count, image_width,
image_height)
def _preprocess(self, input_image: np.ndarray) -> np.ndarray:
"""Preprocess the input image as required by the TFLite model."""
# Resize the input
input_tensor = cv2.resize(input_image, self._input_size)
# Normalize the input if it's a float model (aka. not quantized)
if not self._is_quantized_input:
input_tensor = (np.float32(input_tensor) - self._mean) / self._std
# Add batch dimension
input_tensor = np.expand_dims(input_tensor, axis=0)
return input_tensor
def _set_input_tensor(self, image):
"""Sets the input tensor."""
tensor_index = self._interpreter.get_input_details()[0]['index']
input_tensor = self._interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = image
def _get_output_tensor(self, name):
"""Returns the output tensor at the given index."""
output_index = self._output_indices[name]
tensor = np.squeeze(self._interpreter.get_tensor(output_index))
return tensor
def _postprocess(self, boxes: np.ndarray, classes: np.ndarray,
scores: np.ndarray, count: int, image_width: int,
image_height: int) -> List[Detection]:
"""Post-process the output of TFLite model into a list of Detection objects.
Args:
boxes: Bounding boxes of detected objects from the TFLite model.
classes: Class index of the detected objects from the TFLite model.
scores: Confidence scores of the detected objects from the TFLite model.
count: Number of detected objects from the TFLite model.
image_width: Width of the input image.
image_height: Height of the input image.
Returns:
A list of Detection objects detected by the TFLite model.
"""
results = []
# Parse the model output into a list of Detection entities.
for i in range(count):
if scores[i] >= self._options.score_threshold:
y_min, x_min, y_max, x_max = boxes[i]
bounding_box = Rect(
top=int(y_min * image_height),
left=int(x_min * image_width),
bottom=int(y_max * image_height),
right=int(x_max * image_width))
class_id = int(classes[i])
category = Category(
score=scores[i],
label=self._label_list[class_id], # 0 is reserved for background
index=class_id)
result = Detection(bounding_box=bounding_box, categories=[category])
results.append(result)
# Sort detection results by score ascending
sorted_results = sorted(
results,
key=lambda detection: detection.categories[0].score,
reverse=True)
# Filter out detections in deny list
filtered_results = sorted_results
if self._options.label_deny_list is not None:
filtered_results = list(
filter(
lambda detection: detection.categories[0].label not in self.
_options.label_deny_list, filtered_results))
# Keep only detections in allow list
if self._options.label_allow_list is not None:
filtered_results = list(
filter(
lambda detection: detection.categories[0].label in self._options.
label_allow_list, filtered_results))
# Only return maximum of max_results detection.
if self._options.max_results > 0:
result_count = min(len(filtered_results), self._options.max_results)
filtered_results = filtered_results[:result_count]
return filtered_results
_MARGIN = 10 # pixels
_ROW_SIZE = 10 # pixels
_FONT_SIZE = 1
_FONT_THICKNESS = 1
_TEXT_COLOR = (0, 0, 255) # red
def visualize(
image: np.ndarray,
detections: List[Detection],
) -> np.ndarray:
"""Draws bounding boxes on the input image and return it.
Args:
image: The input RGB image.
detections: The list of all "Detection" entities to be visualize.
Returns:
Image with bounding boxes.
"""
for detection in detections:
# Draw bounding_box
start_point = detection.bounding_box.left, detection.bounding_box.top
end_point = detection.bounding_box.right, detection.bounding_box.bottom
cv2.rectangle(image, start_point, end_point, _TEXT_COLOR, 3)
# Draw label and score
category = detection.categories[0]
class_name = category.label
probability = round(category.score, 2)
result_text = class_name + ' (' + str(probability) + ')'
text_location = (_MARGIN + detection.bounding_box.left,
_MARGIN + _ROW_SIZE + detection.bounding_box.top)
cv2.putText(image, result_text, text_location, cv2.FONT_HERSHEY_PLAIN,
_FONT_SIZE, _TEXT_COLOR, _FONT_THICKNESS)
return image
# ---------------------------------- #
# This is where the custom code starts
# ---------------------------------- #
# Load the TFLite model
TFLITE_MODEL_PATH='object.tflite'
DETECTION_THRESHOLD = 0.5 # 50% threshold required before identifying
options = ObjectDetectorOptions(
num_threads=4,
score_threshold=DETECTION_THRESHOLD,
)
# Close camera if already open
try:
cap.release()
except:
print("",end="") # do nothing
detector = ObjectDetector(model_path=TFLITE_MODEL_PATH, options=options)
cap = cv2.VideoCapture(0) #webcam
counter = 0 # Store many times model has run
while cap.isOpened():
success, image = cap.read()
if not success:
sys.exit(
'ERROR: Unable to read from webcam. Please verify your webcam settings.'
)
image = cv2.flip(image, 1)
# Convert the image from BGR to RGB as required by the TFLite model.
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#image.thumbnail((512, 512), Image.ANTIALIAS)
image_np = np.asarray(image)
# Run object detection estimation using the model.
detections = detector.detect(image_np)
# Draw keypoints and edges on input image
image_np = visualize(image_np, detections)
if counter == 10: # <- Change this to decide how many iterations
cap.release()
break
image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
plt.imsave('tmp.jpg',image_np) # Saves the image
os.replace("tmp.jpg", "web.jpg",) # Renames it for the webpage
counter += 1
print(counter)
cap.release()
Here's the HTML for the document placed in the same directory as the python file, I saved it as index.html and opened in the browser while running the python script above.
<!DOCTYPE html>
<html>
<head>
<title>Object Detection</title>
</head>
<body>
<h1>Object Detection</h1>
<p>This displays images saved during detection process</p>
<canvas id="x" width="700px" height="500px"></canvas>
<script>
var newImage = new Image();
newImage.src = "web.jpg";
var canvas = document.getElementById("x");
var context = canvas.getContext("2d");
newImage.onload = function() {
context.drawImage(newImage, 0, 0);
console.log("trigger")
setTimeout(timedRefresh, 1000);
};
function timedRefresh() {
// just change src attribute, will always trigger the onload callback
try {
newImage.src = ("web.jpg#" + new Date().getTime());
}catch(e){
console.log(e);
}
}
setTimeout(timedRefresh, 100);
</script>
</body>
</html>
It's incredibly slow, not ideal in many ways and probably breaks many good coding conventions. It was only used locally, would definitely not use this for a production environment nor recommend its use. Just needed a quick proof of concept and this worked for that.

Pytorch model weights change when put on GPU

I noticed a very strange behaviour regarding the 3D Resnet by Facebookresearch. Using their sample code from the website, I receive different results, when putting the model on GPU. While on cpu the correct class (archery) is predicted, the model fails to predict it on GPU. Can anyone replicate this and confirm that this is indeed the case? Does anyone know, why this is happening and how to prevent it? Following, you will find some code to quickly test it out:
import torch
import json
import urllib
from pytorchvideo.data.encoded_video import EncodedVideo
from torchvision.transforms import Compose, Lambda
from torchvision.transforms._transforms_video import (
CenterCropVideo,
NormalizeVideo,
)
from pytorchvideo.transforms import (
ApplyTransformToKey,
ShortSideScale,
UniformTemporalSubsample
)
def predict_archery(model, device):
json_url = "https://dl.fbaipublicfiles.com/pyslowfast/dataset/class_names/kinetics_classnames.json"
json_filename = "kinetics_classnames.json"
try:
urllib.URLopener().retrieve(json_url, json_filename)
except:
urllib.request.urlretrieve(json_url, json_filename)
with open(json_filename, "r") as f:
kinetics_classnames = json.load(f)
# Create an id to label name mapping
kinetics_id_to_classname = {}
for k, v in kinetics_classnames.items():
kinetics_id_to_classname[v] = str(k).replace('"', "")
side_size = 256
mean = [0.45, 0.45, 0.45]
std = [0.225, 0.225, 0.225]
crop_size = 256
num_frames = 8
sampling_rate = 8
frames_per_second = 30
# Note that this transform is specific to the slow_R50 model.
transform = ApplyTransformToKey(
key="video",
transform=Compose(
[
UniformTemporalSubsample(num_frames),
Lambda(lambda x: x / 255.0),
NormalizeVideo(mean, std),
ShortSideScale(
size=side_size
),
CenterCropVideo(crop_size=(crop_size, crop_size))
]
),
)
# The duration of the input clip is also specific to the model.
clip_duration = (num_frames * sampling_rate) / frames_per_second
url_link = "https://dl.fbaipublicfiles.com/pytorchvideo/projects/archery.mp4"
video_path = 'archery.mp4'
try:
urllib.URLopener().retrieve(url_link, video_path)
except:
urllib.request.urlretrieve(url_link, video_path)
# Select the duration of the clip to load by specifying the start and end duration
# The start_sec should correspond to where the action occurs in the video
start_sec = 0
end_sec = start_sec + clip_duration
# Initialize an EncodedVideo helper class and load the video
video = EncodedVideo.from_path(video_path)
# Load the desired clip
video_data = video.get_clip(start_sec=start_sec, end_sec=end_sec)
# Apply a transform to normalize the video input
video_data = transform(video_data)
# Move the inputs to the desired device
inputs = video_data["video"]
inputs = inputs.to(device)
# Pass the input clip through the model
preds = model(inputs[None, ...])
# Get the predicted classes
post_act = torch.nn.Softmax(dim=1)
preds = post_act(preds)
pred_classes = preds.topk(k=5).indices[0]
# Map the predicted classes to the label names
pred_class_names = [kinetics_id_to_classname[int(i)] for i in pred_classes]
print("Top 5 predicted labels: %s" % ", ".join(pred_class_names))
if __name__ == '__main__':
# Choose device
# device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
device = torch.device("cpu")
# Choose the `slow_r50` model
model = torch.hub.load('facebookresearch/pytorchvideo', 'slow_r50', pretrained=True).to(device)
model = model.eval()
predict_archery(model, device)
Results on cpu:
Top 5 predicted labels: archery, throwing axe, playing paintball,
stretching arm, riding or walking with horse
Results on GPU:
Top 5 predicted labels: flying kite, air drumming, beatboxing,
smoking, reading book
Edit:
Apparently, this issue cannot be reproduced on google colab. I therefore assume that the issue is related to the specific hardware / cuda version. I am using a NVIDIA TITAN Xp and cuda version 11.4.

How to make sure my code runs in GPU and not CPU?

I am new to deep learning and tensorflow. I have a following code. Whenever I run this code my system administrator notifies me that my code is running in CPU and not GPU even thought we have GPU in the system and I have only installed tensorflow-gpu. What changes should I make to my code so that it runs in GPU and not CPU?
import math
import tempfile
import numpy as np
from tensorflow.python.keras.layers import BatchNormalization, Conv2D, Dense, Flatten, MaxPooling2D
from tensorflow.python.keras.models import Sequential
import fastestimator as fe
from fastestimator.dataset.data import cifair10
from fastestimator.architecture.tensorflow import WideResidualNetwork
from fastestimator.op.numpyop.meta import Sometimes
from fastestimator.op.numpyop.multivariate import HorizontalFlip, PadIfNeeded, RandomCrop
from fastestimator.op.numpyop.univariate import CoarseDropout, Normalize
from fastestimator.op.tensorop.loss import CrossEntropy, SuperLoss
from fastestimator.op.tensorop.model import ModelOp, UpdateOp
from fastestimator.trace.io import BestModelSaver
from fastestimator.trace.metric import MCC, Accuracy
from fastestimator.trace.xai import LabelTracker
#training parameters
epochs = 100
batch_size = 128
max_train_steps_per_epoch = None
max_eval_steps_per_epoch = None
save_dir = tempfile.mkdtemp()
train_data, eval_data = cifair10.load_data()
test_data = eval_data.split(0.5)
def corrupt_dataset(dataset, n_classes=10, corruption_fraction=0.4):
# Keep track of which samples were corrupted for visualization later
corrupted = [0 for _ in range(len(dataset))]
# Perform the actual label corruption
n_samples_per_class = len(dataset) // n_classes # dataset size 50000
# n_classes - 100
# n_samples_per_class - 500
n_to_corrupt_per_class = math.floor(corruption_fraction * n_samples_per_class) # 200
n_corrupted = [0] * n_classes
i = 0
while any([elem < n_to_corrupt_per_class for elem in n_corrupted]): # while any class is left to be corrupted
current_class = dataset[i]['y'].item()
if n_corrupted[current_class] < n_to_corrupt_per_class: #check the number of corrupted data of a particular class has reached 200 or not
dataset[i]['y'] = (dataset[i]['y'] + np.random.randint(1, n_classes)) % n_classes # change the y value of a dataset
n_corrupted[current_class] += 1
corrupted[i] = 1
i += 1
# Put the corruption labels into the dataset for visualization
dataset['data_labels'] = np.array(corrupted, dtype=np.int).reshape((len(dataset), 1))
corrupt_dataset(train_data)
def get_wrn():
return WideResidualNetwork((32, 32, 3))
def build_estimator(loss_op):
pipeline = fe.Pipeline(train_data=train_data,
eval_data=eval_data,
test_data=test_data,
batch_size=batch_size,
ops=[Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)),
PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x", mode="train"),
RandomCrop(32, 32, image_in="x", image_out="x", mode="train"),
Sometimes(HorizontalFlip(image_in="x", image_out="x", mode="train")),
CoarseDropout(inputs="x", outputs="x", max_holes=1, mode="train"),
])
model = fe.build(model_fn=get_wrn, optimizer_fn='adam')
network = fe.Network(ops=[
ModelOp(model=model, inputs="x", outputs="y_pred"),
loss_op, # <<<----------------------------- This is where the secret sauce will go
UpdateOp(model=model, loss_name="ce")
])
traces = [
Accuracy(true_key="y", pred_key="y_pred"),
MCC(true_key="y", pred_key="y_pred"),
BestModelSaver(model=model, save_dir=save_dir, metric="mcc", save_best_mode="max", load_best_final=True),
# We will also visualize the difference between the normal and corrupted image confidence scores. You could follow this with an
# ImageViewer trace, but we will get the data out of the system summary instead later for viewing.
LabelTracker(metric="confidence", label="data_labels", label_mapping={"Normal": 0, "Corrupted": 1}, mode="train", outputs="label_confidence"),
]
estimator = fe.Estimator(pipeline=pipeline,
network=network,
epochs=epochs,
traces=traces,
train_steps_per_epoch=max_train_steps_per_epoch,
eval_steps_per_epoch=max_eval_steps_per_epoch,
log_steps=300)
return estimator
loss = SuperLoss(CrossEntropy(inputs=("y_pred", "y"), outputs="ce"), output_confidence="confidence") # The output_confidence arg is only needed if you want to visualize
estimator_super = build_estimator(loss)
superL = estimator_super.fit("SuperLoss")
print("before test")
summary = estimator_super.test()
print("after test")
print(summary.history["test"])

Sequential model always predicts 5

So I've got a sequential model from mostly following along with this video. I've also got a drawing app done in JS and am sending the contents of that canvas to a flask server using JQuery. All of that seems to be working fine but when I give my model the image from the canvas and call predict it always gives me back the same exact response. The values at each index are always the same.
Am I missing something simple? Am I calling the predict wrong in the server or is it something to do with the model itself? It gets the predictions correct in the notebook.
Thanks for your time!
Flask server:
model = load_model('MyModel.h5')
imageWidth = 28
imageHeight = 28
dim = (imageWidth, imageHeight)
# Create the flask web application
app = fl.Flask(__name__)
#app.route('/uploadimage', methods = ['GET', 'POST'])
def uploadimage():
# Get the image from the request
theImage = fl.request.values.get("theImage", "")
#Decode the string to an image
decodedimg = base64.b64decode(theImage[22:])
# Save the image
with open ("theImage.png", "wb") as f:
f.write(decodedimg)
# Open the image from the request as originalImage
originalImage = Image.open("theImage.png")
# Resize it
resizedImage = ImageOps.fit(originalImage, dim, Image.ANTIALIAS)
# Confirm the dimensions of the resized image
w1, h1 = resizedImage.size
print(w1, h1)
# Save it locally
resizedImage.save("resizedImage.png", quality=100, optimize=True)
# Convert to grayscale and then convert that to an array
grayscaleImage = ImageOps.grayscale(resizedImage)
grayscaleArray = np.array(grayscaleImage)
grayscaleArray.shape # Gives (20, 20)
grayscaleArray = grayscaleArray.reshape(1, 28, 28)
setPrediction = model.predict(grayscaleArray)
print(setPrediction) # Always gives back same values
getPrediction = np.array(setPrediction[0])
predictedNumber = str(np.argmax(getPrediction))
print(predictedNumber) # Always '5'
return predictedNumber
Model:
model = kr.models.Sequential() # Create a new sequential neural network
model.add(kr.layers.Flatten()) # Input layer
model.add(kr.layers.Dense(128, activation="relu")) # 128 neurons and the 'basic' activation function.
model.add(kr.layers.Dense(128, activation="relu"))
model.add(kr.layers.Dense(10, activation="softmax"))
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) # Played around with 'sgd' and 'rmsporp' optimizer also.
model.fit(X_train, y_train, epochs=3)
val_loss, val_acc = model.evaluate(X_test, y_test)
print(val_loss, val_acc)

Categories

Resources