How do I set time interval when detecting emotion? - python

I'm currently detecting emotion and I want to implement a 20-second interval before the program can recognize another emotion. I've tried timer threading but the program still continuously detecting emotion.
Here's the portion of my code where it detects emotion and where I implemented the timer threading:
def load_video(self, *args):
ret, frame = self.capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = self.faceCascade.detectMultiScale(gray,
scaleFactor=1.2,
minNeighbors=10,
minSize=(64, 64),
flags=cv2.CASCADE_SCALE_IMAGE)
for (x,y,w,h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h),(0,255,0), 2)
faceROI = gray[y:y+h,x:x+w]
resized_img = resize(faceROI, (128,64))
fd, hog_image = hog(resized_img, orientations=9, pixels_per_cell=(8, 8),cells_per_block=(2, 2),
visualize=True, multichannel=False)
hist = self.desc.describe(resized_img)
feat = np.hstack([fd,hist])
self.l = [feat]
self.recognition()
buffer = cv2.flip(frame, 0).tostring()
texture = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')
texture.blit_buffer(buffer, colorfmt='bgr', bufferfmt='ubyte')
self.image.texture = texture
def recognition(self):
threading.Timer(20.0, self.recognition).start() #20-Second Interval
print("The predicted image is : "+self.Categories[self.model.predict(self.l)[0]])

Related

How to record audio while OpenCv has enabled camera

I want to record the audio and store the same while analyzing the emotion using Opencv. But unfortunately, the audio which is being recorded is of minimal length and the voice is also not clear. I want to analyze the voice emotion too for which I need to audio. Could somebody help me in resolving the same?
def main():
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
p = pyaudio.PyAudio()
stream = p.open(format=audio_format, channels=channels,rate=16000, input=True,frames_per_buffer=1024)
start_time = time.time()
aud = True
while aud:
ret, frame = cap.read()
data = stream.read(chunk, exception_on_overflow = False)
frames.append(data)
if not ret:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors= 5, flags=cv2.CASCADE_SCALE_IMAGE)
result = DeepFace.analyze(img_path = frame , actions = ["emotion"], enforce_detection=False,detector_backend='ssd')
for (x,y,w,h) in faces:
if w > 130: #trick: ignore small faces
draw_border(frame, (x, y), (x + w, y + h), (255, 0, 105),4, 15, 10) ## draw rectangle around face.
detected_face = frame[int(y):int(y+h), int(x):int(x+w)] #crop detected face
detected_face = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY) #transform to gray scale
detected_face = cv2.resize(detected_face, (48, 48)) #resize to 48x48
img_pixels = img_to_array(detected_face)
img_pixels = np.expand_dims(img_pixels, axis = 0)
img_pixels /= 255 #pixels are in scale of [0, 255]. normalize all pixels in scale of [0, 1]
emotion = result["dominant_emotion"]
txt = str(emotion)
cv2.putText(frame,txt,(50,50),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),3)
cv2.imshow(file, frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
aud=False
break
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open('/Users/xyz/Documents/Audio/wv.wav', 'wb')
wf.setnchannels(1)
wf.setsampwidth(p.get_sample_size(audio_format))
wf.setframerate(sample_rate)
wf.writeframes(b''.join(frames))
wf.close()
cap.release()
cv2.destroyAllWindows()

How to set a time interval when producing the output in image classification?

I am trying to set a time interval when predicting the face that has been detected in a live camera feed, however the camera is not showing up in the interface.
Here's my current code as of the moment:
def load_video(self, *args):
ret, frame = self.capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
while(True):
faces = self.faceCascade.detectMultiScale(gray, scaleFactor=1.1,minNeighbors=5,minSize=(60, 60),flags=cv2.CASCADE_SCALE_IMAGE)
for (x,y,w,h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h),(0,255,0), 2)
faceROI = gray[y:y+h,x:x+w]
resized_img = resize(faceROI, (128,64))
fd, hog_image = hog(resized_img, orientations=9, pixels_per_cell=(8, 8),cells_per_block=(2, 2), visualize=True, multichannel=False)
hist = self.desc.describe(resized_img)
feat = np.hstack([fd,hist])
l = [feat]
print("The predicted image is : "+self.Categories[self.model.predict(l)[0]])
time.sleep(20)
buffer = cv2.flip(frame, 0).tostring()
texture = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')
texture.blit_buffer(buffer, colorfmt='bgr', bufferfmt='ubyte')
self.image.texture = texture
I tried moving the last 4 lines before the while loop, but it is still not showing up. Same goes when putting it at the end (inside end) of the while loop. Any suggestions?

Image from webcam open cv

The program is written for the application and displays the broadcast from the camera video, however, I came across the fact that the output is in some unspecified encoding (format) look photo
class Worker1(QThread):
ImageUpdate = pyqtSignal(QImage)
def run(self):
self.ThreadActive = True
Capture = cv2.VideoCapture(0)
while self.ThreadActive:
face_cascade = cv2.CascadeClassifier('cascade/haarcascade_russian_plate_number.xml')
ret, frame = Capture.read()
if ret:
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
with_cascade = face_cascade.detectMultiScale(image, 1.3, 7)
for i, (x, y, w, h) in enumerate(with_cascade):
roi_color = frame[y:y + h, x:x + w]
r = 300.0 / roi_color.shape[1]
dim = (400, int(roi_color.shape[0] * r))
resized = cv2.resize(roi_color, dim, interpolation=cv2.INTER_AREA)
w_resized = resized.shape[0]
h_resized = resized.shape[1]
frame[380:380 + w_resized, 235:235 + h_resized] = resized
convert_to_qt_format = QImage(frame.data, frame.shape[1], frame.shape[0], QImage.Format_RGB888)
self.ImageUpdate.emit(convert_to_qt_format)
def image_update_slot(self, Image):
self.ui.Cam.setPixmap(QPixmap.fromImage(Image))
self.ui.Cam.setScaledContents(True)
How can i fix this problem?
photo with problem:
Maybe there are problems with the image format, but if I change it, the programme will be closed

Python CV2 video writer doesn't save video

I have a simple python code using OpenCV and Keras that performs some detections on frames (follow-up from my previous question here). But when I want to record and save the frames as a video using video_writer, the generated video is empty.
What is wrong in the video_writer?
#........some code
# start the webcam feed
cap = cv2.VideoCapture(1)
canvasImageOriginal = cv2.imread("fg2.png")
canvasImage = cv2.imread("fg2.png")
canvasHappy = cv2.imread("fg2happy.png")
canvasSad = cv2.imread("fg2sad.png")
x0, x1 = 330, 1290
y0, y1 = 155, 700
#=========
w=960#int(cap.get(cv2.CV_CAP_PROP_FRAME_WIDTH ))
h=540#int(cap.get(cv2.CV_CAP_PROP_FRAME_HEIGHT ))
# video recorder
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video_writer = cv2.VideoWriter('output.avi', fourcc, 25.0, (w, h))
#=========
prediction_history = []
LOOKBACK = 5 # how far you want to look back
counter = 0
while True:
# Find haar cascade to draw bounding box around face
ret, frame = cap.read()
frame=cv2.flip(frame,3)
if not ret:
break
facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = facecasc.detectMultiScale(gray,scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
prediction = model.predict(cropped_img)
maxindex = int(np.argmax(prediction))
text = emotion_dict[maxindex]
print(prediction[0][3])
prediction_history.append(maxindex)
most_common_index = max(set(prediction_history[-LOOKBACK:][::-1]), key = prediction_history.count)
text = emotion_dict[most_common_index]
#if ("Sad" in text) or ("Angry" in text) or ("Disgusted" in text):
# text = "Sad"
if ("Happy" in text) or ("Sad" in text) :
cv2.putText(frame, text, (x+20, y-60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
if ("Happy" in text):
counter= counter+1
if counter == 10:
#print("Happy!")
canvasImage = canvasHappy
else:
counter = 0
canvasImage = canvasImageOriginal
dim = (800,480)
frame_shrunk = cv2.resize(frame, (x1 - x0, y1 - y0))
canvasImage[y0:y1, x0:x1] = frame_shrunk
#cv2.imshow('Video', cv2.resize(frame,dim,interpolation = cv2.INTER_CUBIC))
cv2.imshow('Demo', canvasImage)
video_writer.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
video_writer.release()
cv2.destroyAllWindows()
As it is mentioned above, please check print(frame.shape).
When I did it, I saw (300,450,3), and I changed the resolution of videowriter as (450,300) and it worked for me. As a result, I can say that frame.shape=(y, x, maybe color) but the resolution of videowriter=(x,y).

python opencv camera won't turn on

I ran the following code with python. But there is no video display screen. So the camera light is on, but the video screen is not visible.
Operating system: Windows 10 x64
python: 3.9.1
source
https://github.com/GangYuanFan/Closed-Eye-Detection-with-opencv/blob/master/cv_close_eye_detect.py
import cv2
eye_cascPath = 'haarcascade_eye_tree_eyeglasses.xml'
face_cascPath = 'haarcascade_frontalface_alt.xml'
faceCascade = cv2.CascadeClassifier(face_cascPath)
eyeCascade = cv2.CascadeClassifier(eye_cascPath)
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
while 1:
ret, img = cap.read()
if ret:
frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
frame,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
# flags = cv2.CV_HAAR_SCALE_IMAGE
)
#print("Found {0} faces!".format(len(faces)))
if len(faces) > 0:
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
frame_tmp = img[faces[0][1]:faces[0][1] + faces[0][3], faces[0][0]:faces[0][0] + faces[0][2]:1, :]
frame = frame[faces[0][1]:faces[0][1] + faces[0][3], faces[0][0]:faces[0][0] + faces[0][2]:1]
eyes = eyeCascade.detectMultiScale(
frame,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
# flags = cv2.CV_HAAR_SCALE_IMAGE
)
if len(eyes) == 0:
print('no eyes!!!')
else:
print('eyes!!!')
frame_tmp = cv2.resize(frame_tmp, (400, 400), interpolation=cv2.INTER_LINEAR)
cv2.imshow('Face Recognition', frame_tmp)
waitkey = cv2.waitKey(1)
if waitkey == ord('q') or waitkey == ord('Q'):
cv2.destroyAllWindows()
break
It looks like your only cv2.imshow() is inside the the if len(faces) > 0: condition, try placing it in the if ret: condition, you should atleast see the cv2 camera window pop up then.

Categories

Resources