I'm using this tutorial to get started with TensorFlow -
TensorFlow for poets.
After training the model using the retrain.py script, I want to use the
retrained_graph.pb in order to classify a video and see the results live while the video is running.
What I did is to use opencv to read the video which I want to classify frame by frame. i.e read a frame, save it, open it, classify it and show it on screen together with the classification result using cv2.imshow().
It works, but due to the reading and writing of the frames from/to the disk, the resulting video is laggy.
Can I use the graph obtained from the training process and classify a video without reading and saving it frame by frame?
This is the code I'm using -
with tf.Session(graph=graph) as sess:
video_capture = cv2.VideoCapture(video_path)
i = 0
while True:
frame = video_capture.read()[1] # get current frame
frameId = video_capture.get(1) #current frame number
i = i + 1
cv2.imwrite(filename="C:\\video_images\\"+ str(i) +".jpg", img=frame) # write frame image to file
image_data = "C:\\video_images\\" + str(i) + ".jpg"
t = read_tensor_from_image_file(image_data,
input_height=input_height,
input_width=input_width,
input_mean=input_mean,
input_std=input_std)
predictions = sess.run(output_operation.outputs[0], {input_operation.outputs[0]: t})
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
scores = []
for node_id in top_k:
human_string = label_lines[node_id]
score = predictions[0][node_id]
scores.append([score, human_string])
#print('%s (score = %.5f)' % (human_string, score))
#print("\n\n")
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, scores[0][1] + " - " + repr(round(scores[0][0], 2)), (10, 50), font, 1, (0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(frame, scores[1][1] + " - " + repr(round(scores[1][0], 2)), (10, 100), font, 1, (0, 0, 255), 2, cv2.LINE_AA)
cv2.imshow("image", frame)
cv2.waitKey(1)
os.remove("C:\\video_images\\" + str(i) + ".jpg")
video_capture.release()
cv2.destroyAllWindows()
Thanks.
frame = video_capture.read()[1] # get current frame
float_caster = frame.astype(np.float32)
dims_expander = np.expand_dims(float_caster, axis=0)
resized = cv2.resize(dims_expander,(int(input_width),int(input_height)))
normalized = (resized - input_mean) / input_std
predictions = sess.run(output_operation.outputs[0], {input_operation.outputs[0]: normalized})
Instead of using imwrite just to call read_tensor_from_image_file, get the frame itself. Resize and normalize it. Then, pass the normalized into session. Get rid of disk unnecessary write/read operations this way.
Managed to solve it.
Edited read_tensor_from_image_file to the following and just fed it with the frame instead of image_data.
def read_tensor_from_image_file(file_name,
input_height=299,
input_width=299,
input_mean=0,
input_std=255):
input_name = "file_reader"
output_name = "normalized"
if type(file_name) is str:
file_reader = tf.read_file(file_name, input_name)
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(file_reader, channels = 3,
name='png_reader')
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(tf.image.decode_gif(file_reader,
name='gif_reader'))
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
else:
image_reader = tf.image.decode_jpeg(file_reader, channels = 3,
name='jpeg_reader')
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0);
resized = tf.image.resize_bilinear(dims_expander, [input_height,
input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]),
[input_std])
sess = tf.Session()
result = sess.run(normalized)
elif type(file_name) is np.ndarray:
resized = cv2.resize(file_name, (input_width, input_height),
interpolation=cv2.INTER_LINEAR)
normalized = (resized - input_mean) / input_std
result = normalized
result = array(result).reshape(1, 224, 224, 3)
return result
Related
So I have am working with YOLOv4 to process video frames for object detection of one class : Human and every time a Human is detected in frame it prints a line in the terminal " Number of human detected :" and gives the number of human detected in a particular frame. Now I want the code to run as it is but instead of printing the above output for every frame, it should print the output of the videoframe it processes at the first 1 min mark and there after at every 3 min mark till the video is fully processed. So for a 5 min video, i would want the statement to be printed at the following videotimestamps: 1:00, 4:00. For a 8 min video it would be: 1:00, 4:00, 7:00.... and so on. I tried using schedule module but it seems to just schedule the entire code to run after 1 min.
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import cv2
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
import datetime
import schedule
import time
from time import sleep
file = "test2"
input = "C:/Users/asmita.nandi/Downloads/" + file + ".mp4"
output = "C:/Users/asmita.nandi/Downloads/" + file + ".avi"
net = cv2.dnn.readNet( "C:/Users/asmita.nandi/Downloads/custom-yolov4-tiny_human-608
(1).cfg","C:/Users/asmita.nandi/Downloads/custom-yolov4-tiny-detector_human.weights")
labelsPath = "C:/Users/asmita.nandi/Downloads/human_label.txt"
def event(input,output,net,labelsPath):
LABELS = open(labelsPath).read().strip().split("\n")
np.random.seed(1)
cmap = plt.get_cmap('tab20b')
colors = [cmap(i)[:3] for i in np.linspace(0, 1, 6)]
CONF_THRESH, NMS_THRESH = 0.25, 0.25
vs = cv2.VideoCapture(input)
fp = vs.get(cv2.CAP_PROP_FPS)
writer = None
W = None
H = None
totalFrames = 0
TotalHuman = 0
while True:
frame = vs.read()
frame = frame[1] if input else frame
if input is not None and frame is None:
break
(H, W) = frame.shape[:2]
print(H,W)
if W is None or H is None:
(H, W) = frame.shape[:2]
if output is not None and writer is None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(output, fourcc,fp,(W,H), True)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (608,608), swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
print(layerOutputs)
end = time.time()
boxes = []
confidences = []
classIDs = []
(H, W) = frame.shape[:2]
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
scores = detection[5:]
#print(detection)
classID = np.argmax(scores)
confidence = scores[classID]
if confidence > CONF_THRESH:
#print("Box")
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
idxs = cv2.dnn.NMSBoxes(boxes, confidences, CONF_THRESH,NMS_THRESH)
ObjectCount = {}
if len(idxs) > 0:
for i in idxs.flatten():
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
color = colors[classIDs[i]]
color = [i * 255 for i in color]
class_name = LABELS[classIDs[i]]
cv2.rectangle(frame,(x,y),(x+w,y+h),color,2)
# if class_name == "Human":
cv2.putText(frame, class_name,(x, y-10),0, 0.5,color,2)
obj, conf = LABELS[classIDs[i]], confidences[i]
if obj not in ObjectCount.keys():
ObjectCount[obj] = 1
else:
ObjectCount[obj] += 1
allvalues=[]
allvalues.append(ObjectCount[obj])
print("Number of Humans detected ", max(allvalues))
if writer is not None:
writer.write(frame)
# show the output frame
#cv2_imshow(frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# increment the total number of frames processed thus far and
# then update the FPS counter
totalFrames += 1
fps.update()
# stop the timer and display FPS information
#for (objectID, centroid) in objects.items():
#print(objectID, centroids)
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print(totalFrames)
#print(info)
# check to see if we need to release the video writer pointer
if writer is not None:
writer.release()
# if we are not using a video file, stop the camera video stream
if not input:
vs.stop()
# otherwise, release the video file pointer
else:
vs.release()
# close any open windows
cv2.destroyAllWindows()
schedule.every(1).minutes.do(event(input,output,net,labelsPath))
while 1:
schedule.run_pending()
time.sleep(1)
I wanted to find out how the video frame length was calculated in the below code.
[UPD] Before I was thinking it was done by Yolo, but later I realized it was OpenCV that dealt with number of frames in a video file.
"""
Class definition of YOLO_v3 style detection model on image and video
"""
import colorsys
import os
from timeit import default_timer as timer
import numpy as np
from keras import backend as K
from keras.models import load_model
from keras.layers import Input
from PIL import Image, ImageFont, ImageDraw
from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body
from yolo3.utils import letterbox_image
import os
from keras.utils import multi_gpu_model
class YOLO(object):
_defaults = {
"model_path": 'model_data/yolo.h5',
"anchors_path": 'model_data/yolo_anchors.txt',
"classes_path": 'model_data/coco_classes.txt',
"score" : 0.3,
"iou" : 0.45,
"model_image_size" : (416, 416),
"gpu_num" : 1,
}
#classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors==6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2, ))
if self.gpu_num>=2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
start = timer()
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# My kingdom for a good redistributable image drawing library.
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[c])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
end = timer()
print(end - start)
return image
def close_session(self):
self.sess.close()
def detect_video(yolo, video_path, output_path=""):
import cv2
video_path = './input.mp4'
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time == 10 : mouseBrush(image)
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
yolo.close_session()
Actually, this code is just one part of the all Yolo3 model, but I think the part that deals with the number of video frames is included here.
If you mean the current FPS. This is the part showing the current FPS in string.
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if curr_fps == 10: # Stops at 10th frame.
time.sleep(60) # Delay for 1 minute (60 seconds).
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
I needed the frame number to control every 10th frame in the video file, and thanks to above comments, I figured out that the line I was looking for is:
curr_fps = curr_fps + 1
UPD: The following line calculated the number of frames in a video file.
NumberOfFrame = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
I have a script, developed in Python + TensorFlow, this script is able to detect people as below:
import numpy as np
import tensorflow as tf
import cv2
import time
class DetectorAPI:
def __init__(self, path_to_ckpt):
self.path_to_ckpt = path_to_ckpt
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.default_graph = self.detection_graph.as_default()
self.sess = tf.Session(graph=self.detection_graph)
# Definite input and output Tensors for detection_graph
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
def processFrame(self, image):
shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image, axis=0)
# Actual detection.
start_time = time.time()
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_np_expanded})
end_time = time.time()
print("Elapsed Time:", end_time-start_time)
im_height, im_width,_ = image.shape
boxes_list = [None for i in range(boxes.shape[1])]
for i in range(boxes.shape[1]):
boxes_list[i] = (int(boxes[0,i,0] * im_height),
int(boxes[0,i,1]*im_width),
int(boxes[0,i,2] * im_height),
int(boxes[0,i,3]*im_width))
return boxes_list, scores[0].tolist(), [int(x) for x in classes[0].tolist()], int(num[0])
def close(self):
self.sess.close()
self.default_graph.close()
if __name__ == "__main__":
model_path = 'modelo/frozen_inference_graph.pb'
odapi = DetectorAPI(path_to_ckpt=model_path)
threshold = 0.7
cap = cv2.VideoCapture('http://81.198.213.128:82/mjpg/video.mjpg')
while True:
r, img = cap.read()
img = cv2.resize(img, (1280, 720))
boxes, scores, classes, num = odapi.processFrame(img)
for i in range(len(boxes)):
if classes[i] == 1 and scores[i] > threshold:
box = boxes[i]
cv2.rectangle(img,(box[1],box[0]),(box[3],box[2]),(255,0,0),2)
cv2.imshow("Preview", img)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
But this code captures the entire frame of the screen and I need the detection to occur only in an expecific position.
Example:
I need the detection to occur only within the red frame!
How can I do this?
Just pass only the red region of the frame into your model.
Assuming you have the top, bot, left, right coordinate of the region in the frame:
while True:
r, img = cap.read()
img = img[top:bot, left:right]
img = cv2.resize(img, (1280, 720))
boxes, scores, classes, num = odapi.processFrame(img)
...
I have a small script which checks if my object is present in image or not. If present then it writes the image to a folder.There are multiple sub-folders inside.On writing, the data for 1st folder works fine but when data is written to the 2nd sub-folder then the data of 1st folder is also appended along-with the data of 2nd folder
The code works perfectly fine except the problem that while writing the data to disk the data of 1st sub-folder is also appended to the data of 2nd sub-folder. Below is my code
def target_non_target(input_frames_folder,model_file,output):
if not os.path.exists(output):
os.makedirs(output,exist_ok=True)
count=0
folders = glob(input_frames_folder)
img_list = []
for folder in folders:
folder_name=os.path.basename(folder)
#print(folder_name)
out_path = output +"\\" + folder_name
print(out_path)
os.makedirs(out_path,exist_ok=True)
for f in glob(folder+"/*.jpg"):
img_list.append(f)
for i in range(len(img_list)):
v1=os.path.basename(img_list[i])
img_name = os.path.splitext(v1)[0]
image = cv2.imread(img_list[i])
orig = image.copy()
image = cv2.resize(image, (28, 28))
image = image.astype("float") / 255.0
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
print("[INFO] loading network...")
model = load_model(model_file)
(non_target, target) = model.predict(image)[0]
if target > non_target:
label = "Target"
else:
label = "Non Target"
probab = target if target > non_target else non_target
label = "{}: {:.2f}%".format(label, probab * 100)
op = imutils.resize(orig, width=400)
cv2.putText(op, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX,0.7, (0, 255, 0), 2)
if target > non_target:
cv2.imwrite(out_path+"/"+"\\{}.jpg".format(img_name),orig)
cv2.waitKey(0)
#return target_op
frames_folder = ("C:\\Python36\\videos\\videos_new\\*")
model = ("C:\\Python35\\target_non_target\\target_non_target.model")
output_folder = ("C:\\Python35\\target_non_target\\Target_images_new\\")
target_check = target_non_target(frames_folder,model,output_folder)
Suppose there are 2 sub-folders A and B inside a main folder X.There will be many more sub-folders.While writing output to disk the data of A is written perfectly but while writing data for B the data of folder A and folder B both are getting appended into B folder. I want the data to be in their respective folders. Any idea what changes could be made in my script for getting the desired output
You are using the img_list = [] to initiate but you need to repeat this at the end of each folder loop to reset it back to empty. Right now you are keeping all of your results and then when you move to the next folder you are keeping your previous results and adding on.
The very end of your for folder in folders loop needs to have img_list = []
Updated full code:
def target_non_target(input_frames_folder,model_file,output):
if not os.path.exists(output):
os.makedirs(output,exist_ok=True)
count=0
folders = glob(input_frames_folder)
img_list = []
for folder in folders:
folder_name=os.path.basename(folder)
#print(folder_name)
out_path = output +"\\" + folder_name
print(out_path)
os.makedirs(out_path,exist_ok=True)
for f in glob(folder+"/*.jpg"):
img_list.append(f)
for i in range(len(img_list)):
v1=os.path.basename(img_list[i])
img_name = os.path.splitext(v1)[0]
image = cv2.imread(img_list[i])
orig = image.copy()
image = cv2.resize(image, (28, 28))
image = image.astype("float") / 255.0
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
print("[INFO] loading network...")
model = load_model(model_file)
(non_target, target) = model.predict(image)[0]
if target > non_target:
label = "Target"
else:
label = "Non Target"
probab = target if target > non_target else non_target
label = "{}: {:.2f}%".format(label, probab * 100)
op = imutils.resize(orig, width=400)
cv2.putText(op, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX,0.7, (0, 255, 0), 2)
if target > non_target:
cv2.imwrite(out_path+"/"+"\\{}.jpg".format(img_name),orig)
cv2.waitKey(0)
img_list = [] # this is the end of for folder in folders, reset list
#return target_op
frames_folder = ("C:\\Python36\\videos\\videos_new\\*")
model = ("C:\\Python35\\target_non_target\\target_non_target.model")
output_folder = ("C:\\Python35\\target_non_target\\Target_images_new\\")
target_check = target_non_target(frames_folder,model,output_folder)
I wanted to see that the images I used in my network were OK, so I saved a bunch of them using the following code:
train_set = dset.MNIST(root=root, train=True, transform=transforms.ToTensor(), download=download)
for it, (img, target) in enumerate(train_loader):
X = Variable(img)
tar = Variable(target)
X = X.view(batch_size, -1)
cur_img_batch = X.data.numpy()
cur_tar_batch = tar.data.numpy()
for i in range(batch_size):
cur_img = cur_img_batch[i]
im = Image.fromarray(cur_img.reshape((28, 28)).astype('uint8') * 255)
if cur_tar_batch[i] == 8:
im.save(test_img_dir + 'iter_' + str(it) + '_sample_' + str(i) + '.png')
This isn't the cleanest code, but it just saves a bunch of images that are all labeled as '8'. Upon opening them, I see that most of them look like this, even though a small minority of them are perfectly fine.
Am I doing something wrong?
From the comments:
The issue was in this line cur_img.reshape((28, 28)).astype('uint8') * 255, converting the normalized image to integer before multiplying it by 255, thus resulting in images with either 0 or 255.
The updated code:
train_set = dset.MNIST(root=root, train=True, transform=transforms.ToTensor(), download=download)
for it, (img, target) in enumerate(train_loader):
X = Variable(img)
tar = Variable(target)
X = X.view(batch_size, -1)
cur_img_batch = X.data.numpy()
cur_tar_batch = tar.data.numpy()
for i in range(batch_size):
cur_img = cur_img_batch[i]
im = Image.fromarray((cur_img.reshape((28, 28)) * 255).astype('uint8'))
if cur_tar_batch[i] == 8:
im.save(test_img_dir + 'iter_' + str(it) + '_sample_' + str(i) + '.png')