How to capture photos with cv2.imwrite()? - python

cv2.imwrite() is not working. I am trying for taking 100 photos when face will be detected.
Here is the code given:
import cv2
import datetime
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
while True:
_,frame = cap.read()
greyImg = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
face = face_cascade.detectMultiScale(greyImg,1.3,5)
for x,y,w,h in face:
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0),5)
time_stamp = datetime.datetime.now().strftime("%D-%m-%Y")
file_name = f"{time_stamp}-face.jpg"
for i in range(100):
cv2.imwrite(file_name,greyImg)
cv2.imshow("Face recogniton", cv2.flip(frame,1))
if cv2.waitKey(1) == ord("q"):
break

The main bug in your code that you are looping the 100 times saving the same image.
This code is supposed to solve your issue:
import cv2
from datetime import datetime
import numpy as np
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
faces_counter: int = 0
while True:
_,frame = cap.read()
greyImg = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
face = face_cascade.detectMultiScale(greyImg,1.3,5)
if np.any(face):
faces_counter += 1
if faces_counter > 100:
break
for x,y,w,h in face:
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0),5)
now = datetime.now()
current_time = now.strftime("%H_%M_%S")
file_name = f"Img_{current_time}_{faces_counter}-face.png"
cv2.imwrite(f"{str(file_name)}", frame)
cv2.imshow("Face recogniton", cv2.flip(frame,1))
if cv2.waitKey(1) == ord("q"):
break

Related

How do I add an image overlay to my live video using cv2?

This is my code, I've looked at some tutorials but can't find what I'm looking for
I want to overlay the Frame.png image on my webcam. I tried to add the image directly but it didn't work either. If possible, Is there a way to add an image, not to overlay but to keep the image at a certain coordinate in the live webcam window
import cv2
import numpy as np
def detect_and_save():
alpha = 0.2
beta = 1-alpha
cap = cv2.VideoCapture(0)
sciframe = cv2.imread('Frame.png')
classifier = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
while True:
ret ,frame = cap.read()
overlay = frame.copy()
output = frame.copy()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = classifier.detectMultiScale(gray,1.5,5)
cv2.putText(output, "HUD Test",(175, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 50, 50), 3)
cv2
for face in faces:
x,y,w,h = face
cv2.rectangle(overlay,(x,y),(x+w,y+h),(255,200,0),-1)
cv2.rectangle(overlay,(x,y),(x+w,y+h),(255,0,0),1)
cv2.rectangle(overlay,(x,y-20),(x+w,y),(25,20,0),-1)
cv2.addWeighted(overlay,alpha,output,beta,0,output)
cv2.putText(output,"Human",(x+10,y-10),cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
if not ret:
continue
cv2.imshow("HUD",output)
key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('s'):
cv2.imwrite('./images/CID_{}.png'.format(time.strftime('%d%m%y_%H_%M_%S')),output)
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
import time
detect_and_save()
You can directly add one image on top of another one at any coordinate easily in opencv.
cap = cv2.VideoCapture(0)
im_height = 50 #define your top image size here
im_width = 50
im = cv2.resize(cv2.imread("Frame.png"), (im_width, im_height))
while (True):
ret, frame = cap.read()
frame[0:im_width, 0:im_height] = im #for top-left corner, 0:50 and 0:50 for my image; select your region here like 200:250
cv2.imshow("live camera", frame)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()

How Can I get the landmarks and pixel points of the nose

I am running this code to detect the nose and get its position in real time. I was able to detect the face and draw the points of the landmarks.However I couldn't figure out how I can get the positions of a specific landmark in terms of x and y. Can anyone please help me
here is my code
import cv2 as cv
import mediapipe as mp
import time
cap = cv.VideoCapture(0)
if not cap.isOpened():
print("camera-failed")
exit()
pTime = 0
mpDraw = mp.solutions.drawing_utils
mpFaceMesh = mp.solutions.face_mesh
faceMesh = mpFaceMesh.FaceMesh(max_num_faces=1)
drawSpec = mpDraw.DrawingSpec(thickness=1,circle_radius=1)
while True:
ret, img = cap.read()
imgRGB = cv.cv2.cvtColor(img, cv.COLOR_BGRA2RGB)
results = faceMesh.process(imgRGB)
if results.multi_face_landmarks:
for faceLms in results.multi_face_landmarks:
mpDraw.draw_landmarks(img,faceLms,mpFaceMesh.FACEMESH_CONTOURS,
drawSpec,drawSpec)
for id, lm in enumerate(faceLms.landmark):
print(lm)
ih, iw, ic = img.shape
x,y,z= int(lm.x*iw),int(lm.y*ih),int(lm.z*ic)
print(id,x,y,z)
cTime = time.time()
fps = 1/(cTime-pTime)
pTime = cTime
cv.putText(img, f'FPS: {int(fps)}',(20,70), cv.FONT_HERSHEY_PLAIN,
3,(0,255,0),3)
if not ret:
print("Error exiting...")
break
exit()
cv.imshow( "image",img)
if cv.waitKey(1) == ord('q'):
break
cap.release()
cv.destroyAllWindows()

cv2 issue for face-detection algorithm

I have a face detection program.
I tried to run the code but it's not working.
import cv2
import numpy as np
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("trainer/trainer.yml")
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
cam = cv2.VideoCapture(0)
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
fontColor = (255, 255, 255)
while True:
ret, im =cam.read()
gray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
faces=faceCascade.detectMultiScale(gray, 1.2,5)
for(x,y,w,h) in faces:
cv2.rectangle(im,(x,y),(x+w,y+h),(225,0,0),2)
Id, conf = recognizer.predict(gray[y:y+h,x:x+w])
if(conf<50):
if(Id==1):
Id="chandra"
elif(Id==2):
Id="vamsi"
else:
Id="Unknown"
cv2.putText(im,str(Id), (x,y+h),fontFace, 255)
cv2.imshow('im',im)
if (cv2.waitKey(10) == ord('q')):
break
cam.release()
cv2.destroyAllWindows()
I got this error:
(-215:Assertion failed) !empty() in function 'cv::CascadeClassifier::detectMultiScale'
I'm using opencv2 and python 3.7
Maybe this will help:
try:
ret, im =cam.read()
except:
continue
Please, use this code below to check if you have any images coming from your camera:
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()

OpenCV - Coloring a specific area during video capture?

Here is my code:
import cv2
import numpy as np
cap = cv2.VideoCapture(1)
fourcc = cv2.VideoWriter_fourcc('M','J','P','G')
framesize = (640,480)
out = cv2.VideoWriter('dump.avi',fourcc,60.0,framesize)
font = cv2.FONT_HERSHEY_SIMPLEX
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
cv2.imshow('frame',frame)
cv2.rectangle(gray, (0,0),(640,480),(255,255,255),3)
cv2.putText(gray, "gray", (0,130),font, 5,(255,255,255),2, cv2.LINE_AA)
cv2.imshow('fr',gray)
Here I am trying to color a specific square area on the live image feed
#gray[100:105,110:115] = [255,255,255]
io = gray[37:111,107:194]
Here I am cloning an are into another
gray[200:200,270:283] = io
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows()
How can I color a specific area? As my attempt is not working.

object tracking by python and opencv

my code is
import numpy as np
import cv2
cap = cv2.VideoCapture("slow.flv")
ret,frame = cap.read()
r,h,c,w = 250,90,400,125 # simply hardcoded the values
track_window = (c,r,w,h)
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)),
np.array((180.,255.,255.)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret ,frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
ret, track_window = cv2.meanShift(dst, track_window, term_crit)
x,y,w,h = track_window
img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
cv2.imshow("img2",img2)
k = cv2.waitKey(60) & 0xff
if k == 27:
break
else:
cv2.imwrite(chr(k)+".jpg",img2)
else:
break
cv2.destroyAllWindows()
cap.release()
the error is:
File "objecttracking.py", line 10, in
roi = frame[r:r+h, c:c+w]
TypeError: 'NoneType' object has no attribute 'getitem'
It would appear that cap.read() is returning a None for the value you're storing to frame. Thus when you try and index into frame with frame[r:r+h, c:c+w] you get an error.

Categories

Resources