Make puttext label appear all the time - python

Im using the following code to detect the object and predict it.
for i in range(len(detections)):
face_i = detections[i]
x,y,w,h = face_i
xw1 = max(int(x - 0.4 * w), 0)
yw1 = max(int(y - 0.4 * h), 0)
xw2 = min(int(x + w + 0.7 * w), img_w - 1)
yw2 = min(int(y + h + 0.4 * h), img_h - 1)
roi = frame[yw1:yw2 + 1, xw1:xw2 + 1, :]
roi = cv2.resize(roi, (299, 299), interpolation=cv2.INTER_CUBIC)
numpy_frame = np.asarray(roi)
numpy_frame = cv2.normalize(numpy_frame.astype('float'), None, -0.5, .5, cv2.NORM_MINMAX)
numpy_final = np.expand_dims(numpy_frame, axis=0)
start_time = timeit.default_timer()
#Do prediction for every five seconds
if (constance.x % 5 == 0):
predictions = sess.run(detection_graph, {'Mul:0': numpy_final})
animal_score= predictions[0][1]
human_Score = predictions[0][0]
if (male_score > female_Score):
human_string = "Animal"
else:
human_string = "Human"
cv2.putText(image_np, str (human_string), (x, y - 10), font, 1, (0, 0, 255), 2, cv2.LINE_AA)
cv2.rectangle(image_np, (xw1, yw1), (xw2, yw2), (0, 222, 0), 1)
return image_np
Since the prediction takes a lot of time, Im skipping the prediction sometimes. since I put text only for every five seconds, the label value is flickering. How can I make the label appear all the time?

You have to call putText on every single frame. If it's a frame where you aren't performing the prediction, draw the results of the most recent prediction.

Related

How do I clock the user action?

I follow a simple head pose estimation tutorial in python, I try to make some modification in the code but I got stuck for days now, I just want to know how long the user been looking to the left or right, if the user is detected to be looking to the left or right for a long time let say 2-3 mins, then the program should give a warning or print a simple message saying how long his/her been looking to the left or right. How do I achieve this? any ideas how to do this?
sorry for my bad english
any help will be appreciated :)
here's my code:
import cv2
import mediapipe as mp
import numpy as np
import time
mp_face_mesh = mp.solutions.face_mesh
face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5)
mp_drawing = mp.solutions.drawing_utils
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
cap = cv2.VideoCapture(0)
while cap.isOpened():
success, image = cap.read()
image = cv2.resize(image, (780, 350))
image = cv2.flip(image, 1)
#start = time.time()
# Flip the image horizontally for a later selfie-view display
# Also convert the color space from BGR to RGB
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance
image.flags.writeable = False
# Get the result
results = face_mesh.process(image)
# To improve performance
image.flags.writeable = True
# Convert the color space from RGB to BGR
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
img_h, img_w, img_c = image.shape
face_3d = []
face_2d = []
if results.multi_face_landmarks:
for face_landmarks in results.multi_face_landmarks:
for idx, lm in enumerate(face_landmarks.landmark):
if idx == 33 or idx == 263 or idx == 1 or idx == 61 or idx == 291 or idx == 199:
if idx == 1:
nose_2d = (lm.x * img_w, lm.y * img_h)
nose_3d = (lm.x * img_w, lm.y * img_h, lm.z * 3000)
x, y = int(lm.x * img_w), int(lm.y * img_h)
# Get the 2D Coordinates
face_2d.append([x, y])
# Get the 3D Coordinates
face_3d.append([x, y, lm.z])
# Convert it to the NumPy array
face_2d = np.array(face_2d, dtype=np.float64)
# Convert it to the NumPy array
face_3d = np.array(face_3d, dtype=np.float64)
# The camera matrix
focal_length = 1 * img_w
cam_matrix = np.array([ [focal_length, 0, img_h / 2],
[0, focal_length, img_w / 2],
[0, 0, 1]])
# The distortion parameters
dist_matrix = np.zeros((4, 1), dtype=np.float64)
# Solve PnP
success, rot_vec, trans_vec = cv2.solvePnP(face_3d, face_2d, cam_matrix, dist_matrix)
# Get rotational matrix
rmat, jac = cv2.Rodrigues(rot_vec)
# Get angles
angles, mtxR, mtxQ, Qx, Qy, Qz = cv2.RQDecomp3x3(rmat)
# Get the y rotation degree
x = angles[0] * 360
y = angles[1] * 360
z = angles[2] * 360
# See where the user's head tilting
if y < -10:
# i want to know how long this guy been looking to his left or right or up or down
text = "Looking Left"
elif y > 10:
text = "Looking Right"
elif x < -10:
text = "Looking Down"
elif x > 10:
text = "Looking Up"
else:
text = "Forward"
# Display the nose direction
nose_3d_projection, jacobian = cv2.projectPoints(nose_3d, rot_vec, trans_vec, cam_matrix, dist_matrix)
p1 = (int(nose_2d[0]), int(nose_2d[1]))
p2 = (int(nose_2d[0] + y * 10) , int(nose_2d[1] - x * 10))
cv2.line(image, p1, p2, (255, 0, 0), 3)
# Add the text on the image
cv2.putText(image, text, (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
cv2.putText(image, "x: " + str(np.round(x,2)), (500, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(image, "y: " + str(np.round(y,2)), (500, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(image, "z: " + str(np.round(z,2)), (500, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
# need some fix here
# end = time.time()
# totalTime = end - start
# fps = 1 / totalTime
#print("FPS: ", fps)
#cv2.putText(image, f'FPS: {int(fps)}', (20,450), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0,255,0), 2)
mp_drawing.draw_landmarks(
image=image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACEMESH_CONTOURS,
landmark_drawing_spec=drawing_spec,
connection_drawing_spec=drawing_spec)
cv2.imshow('Head Pose Estimation', image)
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release()

Running task / function in the background

i wrote a program to capture the position of license plate with my webcam feed using YOLOv4. The result of the detection is then passed to easyOCR to do character identification. Right now, im calling the OCR function in the while loop everytime a detection occured. Is there a way to call the OCR function outside the loop without stopping the webcam feed ? some people suggested me to use queue or sub process but im not quite familiar with the concept. Any help would be very appreciated
#detection
while 1:
#_, pre_img = cap.read()
#pre_img= cv2.resize(pre_img, (640, 480))
_, img = cap.read()
#img = cv2.flip(pre_img,1)
hight, width, _ = img.shape
blob = cv2.dnn.blobFromImage(img, 1 / 255, (416, 416), (0, 0, 0), swapRB=True, crop=False)
net.setInput(blob)
output_layers_name = net.getUnconnectedOutLayersNames()
layerOutputs = net.forward(output_layers_name)
boxes = []
confidences = []
class_ids = []
for output in layerOutputs:
for detection in output:
score = detection[5:]
class_id = np.argmax(score)
confidence = score[class_id]
if confidence > 0.7:
center_x = int(detection[0] * width)
center_y = int(detection[1] * hight)
w = int(detection[2] * width)
h = int(detection[3] * hight)
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append((float(confidence)))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, .5, .4)
boxes = []
confidences = []
class_ids = []
for output in layerOutputs:
for detection in output:
score = detection[5:]
class_id = np.argmax(score)
confidence = score[class_id]
if confidence > 0.5:
center_x = int(detection[0] * width)
center_y = int(detection[1] * hight)
w = int(detection[2] * width)
h = int(detection[3] * hight)
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append((float(confidence)))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, .8, .4)
font = cv2.FONT_HERSHEY_PLAIN
colors = np.random.uniform(0, 255, size=(len(boxes), 3))
if len(indexes) > 0:
for i in indexes.flatten():
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
confidence = str(round(confidences[i], 2))
color = colors[i]
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
detected_image = img[y:y+h, x:x+w]
cv2.putText(img, label + " " + confidence, (x, y + 400), font, 2, color, 2)
#print(detected_image)
cv2.imshow('detection',detected_image)
result = OCR(detected_image)
print(result)
Function for OCR
def OCR(cropped_image):
result = reader.readtext(cropped_image)
text = ''
for result in result:
text += result[1] + ' '
spliced = (remove(text)).upper()
return spliced
You could run the OCR function on an other thread with the thread library like so:
import time # not necessary only to simulate work time
import _thread as thread # in python 3 the name has changed to _thread
def OCR(cropped_image):
result = reader.readtext(cropped_image)
text = ''
for result in result:
text += result[1] + ' '
spliced = (remove(text)).upper()
print(spliced) # you would have to print the result in the OCR function because you can't easily return stuff
while 1:
time.sleep(5) # simulating some work time
print("main")
detected_image = 1
thread.start_new_thread(OCR, (detected_image,)) # calling the OCR function on a new thread.
I hope it will help you...

How to create a meter based on model's probability in Python OpenCV

I have this simple Python code that makes predictions on the emotions of the face (refer to here in case you need to run it), whether the person is happy, sad, etc. It uses cv2 and Keras. Now, I would like to visualize and place a meter on the frame based on the probability of each frame (prob value below which is a percentage). How can I do that?
Something like this. Don't worry about the colors for now.
cap = cv2.VideoCapture(1)
canvasImage = cv2.imread("fg2.png")
x0, x1 = 330, 1290
y0, y1 = 155, 700
prediction_history = []
LOOKBACK = 5 # how far you want to look back
counter = 0
while True:
# Find haar cascade to draw bounding box around face
ret, frame = cap.read()
frame=cv2.flip(frame,3)
if not ret:
break
facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = facecasc.detectMultiScale(gray,scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
prediction = model.predict(cropped_img)
maxindex = int(np.argmax(prediction))
text = emotion_dict[maxindex]
prob = round(prediction[0][3]*100, 2)
prediction_history.append(maxindex)
most_common_index = max(set(prediction_history[-LOOKBACK:][::-1]), key = prediction_history.count)
text = emotion_dict[most_common_index]
#if ("Sad" in text) or ("Angry" in text) or ("Disgusted" in text):
# text = "Sad"
if ("Happy" in text) or ("Sad" in text) :
cv2.putText(frame, text+": "+str(prob), (x+20, y-60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
dim = (800,480)
frame_shrunk = cv2.resize(frame, (x1 - x0, y1 - y0))
canvasImage[y0:y1, x0:x1] = frame_shrunk
#cv2.imshow('Video', cv2.resize(frame,dim,interpolation = cv2.INTER_CUBIC))
cv2.imshow('Demo', canvasImage)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
There is no built in function in OpenCV for drawing meters, here is a helper function that you can use to draw a meter over an image:
def draw_indicator(img, percentage):
def percentage_to_color(p):
return 0, 255 * p, 255 - (255 * p)
# config
levels = 10
indicator_width = 80
indicator_height = 220
level_width = indicator_width - 20
level_height = int((indicator_height - 20) / levels - 5)
# draw
img_levels = int(percentage * levels)
cv2.rectangle(img, (10, img.shape[0] - (indicator_height + 10)), (10 + indicator_width, img.shape[0] - 10), (0, 0, 0), cv2.FILLED)
for i in range(img_levels):
level_y_b = int(img.shape[0] - (20 + i * (level_height + 5)))
cv2.rectangle(img, (20, level_y_b - level_height), (20 + level_width, level_y_b), percentage_to_color(i / levels), cv2.FILLED)
# test code
img = cv2.imread('a.jpg')
draw_indicator(img, 0.7)
cv2.imshow("test", img)
cv2.waitKey(10000)

opencv show rendered image with bounding boxes and labels

The code below is able to detect objects without issue, however, towards the end there is the line "cv2.imshow("demo", img)"
I would expect this window to show the image with the generated bounding boxes and labels, but all I get is a blank window. I got this code originally from some examples on the internet so I'm a bit lost as to how to position that line, or why it's not generating the image.
import cv2
import numpy as np
def take_pic(output_filename):
import os
capture_img="ffmpeg -y -rtsp_transport udp -i rtsp://mycamera:apassword#172.16.66.106/live -vframes 1 " + output_filename
net = cv2.dnn.readNet("yolov3.weights", "./darknet/cfg/yolov3.cfg")
classes = []
with open("./darknet/data/coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
output_filename = "/tmp/camera.jpeg"
cap = cv2.imread(output_filename)
j = 0
if j==0:
cv2.namedWindow("demo", cv2.WINDOW_AUTOSIZE)
while True:
take_pic(output_filename)
cap = cv2.imread(source)
j = j + 1
print("j= " + str(j))
img = cap
img = cv2.resize(img, None, fx=0.4, fy=0.4)
height, width, channels = img.shape
blob = cv2.dnn.blobFromImage(img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
# Object detected
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
print(str(center_x)+" "+str(center_y))
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
print("label :"+str(label)+"x: "+str(x)+" y: " + str(y))
color = colors[i]
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
cv2.putText(img, label, (x, y + 30), font, 3, color, 3)
cv2.imshow("demo", img)
else:
print("camera open failed")
cv2.destroyAllWindows()
With opencv, a imshow is required to be accompanied with a waitKey method in order to display an image.
Paste something similar to this towards the end of your loop, after you call cv2.imshow:
if cv2.waitKey(0) == ord('q'):
print('exitting loop')
break
If the image shows blank during imshow method, then you might need to multiply pixels with 255. For instance, in Matlab, the images are normalized between 0 - 1.
Try:
cv2.imshow("demo", img * 255)
cv2.waitKey(0)

OpenCV DNN Face detector

def detect_video(image):
gray=image
blob = cv2.dnn.blobFromImage(gray, 1.0, (300, 300), [104, 117, 123], False, False)
net.setInput(blob)
detections = net.forward()
bboxes = []
gray=cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
frameWidth=image.shape[1]
frameHeight=image.shape[0]
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.7:
x1 = int(detections[0, 0, i, 3] * frameWidth)
y1 = int(detections[0, 0, i, 4] * frameHeight)
x2 = int(detections[0, 0, i, 5] * frameWidth)
y2 = int(detections[0, 0, i, 6] * frameHeight)
cv2.rectangle(image,(x1,y1),(x2,y2),(255,255,0),3)
try:
image1 = gray[y1:(y2), x1:(x2)]
img = cv2.resize(image1, (48,48), interpolation = cv2.INTER_CUBIC) / 255.
prediction=model1.predict_proba(img.reshape(1,48,48,1))
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image,str(emotions[prediction[0].argmax()]),(x1,y1+10), font, 1,(255,255,255),2,cv2.LINE_AA)
result=prediction
if result is not None:
if result[0][6] < 0.6:
result[0][6] = result[0][6] - 0.12
result[0][:3] += 0.01
result[0][4:5] += 0.04
# write the different emotions and have a bar to indicate probabilities for each class
for index, emot in enumerate(emotion):
cv2.putText(image, emot, (10, index * 20 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
cv2.rectangle(image, (130, index * 20 + 10), (130 + int(result[0][index] * 100), (index + 1) * 20 + 4), (255, 0, 0), -1)
emt=[prediction[0][0],prediction[0][1],prediction[0][2],prediction[0][3],prediction[0][4],prediction[0][5],prediction[0][6]]
indx=np.arange(len(emotion))
plt.bar(indx,emt,color='blue')
plt.xticks(indx,emotion)
plt.savefig("ab.png")
cv2.imshow("graph",cv2.imread("ab.png"))
plt.clf()
#cv2.waitKey(5)
#plt.show()
#return indx,emt
except:
#print("----->Problem during resize .Probably Cant detect any face")
continue
return image
I have made my own model and trained on KDEF dataset.Now when I am giving the video as an input , it detects the face in the video but it makes two bounding boxes.Can anyone help me whats the mistake in the code.Its running successfully but just creating two bounding boxes.The input which the neural networks accepts is 48*48.
first select the detection which has the most significant confidence then draw it on image.
detection_index = 0
max_confidence = 0
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if max_confidence < confidence:
max_confidence = confidence
detection_index = i
i = detection_index
x1 = int(detections[0, 0, i, 3] * frameWidth)
y1 = int(detections[0, 0, i, 4] * frameHeight)
x2 = int(detections[0, 0, i, 5] * frameWidth)
y2 = int(detections[0, 0, i, 6] * frameHeight)
cv2.rectangle(image, (x1, y1), (x2, y2), (255, 255, 0), 3)
try:
image1 = gray[y1:(y2), x1:(x2)]
img = cv2.resize(image1, (48, 48), interpolation=cv2.INTER_CUBIC) / 255.
prediction = model1.predict_proba(img.reshape(1, 48, 48, 1))
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image, str(emotions[prediction[0].argmax()]), (x1, y1 + 10), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
result = prediction
if result is not None:
if result[0][6] < 0.6:
result[0][6] = result[0][6] - 0.12
result[0][:3] += 0.01
result[0][4:5] += 0.04
# write the different emotions and have a bar to indicate probabilities for each class
for index, emot in enumerate(emotion):
cv2.putText(image, emot, (10, index * 20 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
cv2.rectangle(image, (130, index * 20 + 10), (130 + int(result[0][index] * 100), (index + 1) * 20 + 4),
(255, 0, 0), -1)
emt = [prediction[0][0], prediction[0][1], prediction[0][2], prediction[0][3], prediction[0][4],
prediction[0][5], prediction[0][6]]
indx = np.arange(len(emotion))
plt.bar(indx, emt, color='blue')
plt.xticks(indx, emotion)
plt.savefig("ab.png")
cv2.imshow("graph", cv2.imread("ab.png"))
plt.clf()
# cv2.waitKey(5)
# plt.show()
# return indx,emt
except:
# print("----->Problem during resize .Probably Cant detect any face")
continue
return image

Categories

Resources