Set background color outside ROI in OpenCV - python

I successfully displayed the video feed and am trying to change the background color of the area outside ROI from black to blue but the screen still shows black background. Please help me solve the problem. Any help would be greatly appreciated.
Original code
import numpy as np
from cv2 import cv2
'''
ML object detection algo(haarcascade)used to identify objects.
the XML file consists of trained Haar Cascade models.
'''
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#initialize video from the webcam
video = cv2.VideoCapture(1)
while True:
# ret tells if the camera works properly. Frame is an actual frame from the video feed
ret, frame= video.read()
# print(cv2.VideoCapture(0).isOpened())
# make sure port is working and read the image
if frame is not None and video.isOpened():
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect the faces within the subregions of the image in scales
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=6)
# Draw the rectangle around each face
for (x, y, w, h) in faces:
#Use the coordinates to find the center of the face and from that point draw a rectangle of radius w/2 or h/2.
center_coordinates = x + w // 2, y + h // 2
radius = w // 2 # or can be h / 2 or can be anything based on your requirements
#background color(black)
mask=np.zeros(frame.shape[:2] , dtype="uint8")
# Draw the desired region to crop out in white
cv2.circle(mask, center_coordinates, radius, (255,255,255),-1)
masked=cv2.bitwise_and(frame,frame,mask=mask)
cv2.imshow('mask applied',masked)
if cv2.waitKey(30) & 0xff==27:
break
video.release()
cv2.destroyAllWindows()
The above code detects and displays the face in the circular mask on the black background. But as mentioned above, The background color outside circular ROI should be blue.
I tried replacing mask=np.zeros(frame.shape[:2], dtype="uint8")with the code below and fails. Frame.shape[0:2]doesn't even include channel and I can't figure out how to change the color in the first place.
mask=np.ones(frame.shape[0:2], dtype="uint8")
mask[:,:,0]=255
mask[:,:,1]=0
mask[:,:,2]=0
I also tried creating a circular masked image then place it on another image only to find out it results in the same problem.
import numpy as np
from cv2 import cv2
'''
ML object detection algo(haarcascade)used to identify objects.
the XML file consists of trained Haar Cascade models.
'''
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#initialize video from the webcam
video = cv2.VideoCapture(1)
print(cv2.VideoCapture(1).isOpened())
while True:
# ret tells if the camera works properly. Frame is an actual frame from the video feed
ret, frame= video.read()
# print(cv2.VideoCapture(0).isOpened())
# make sure port is working and read the image
if frame is not None and video.isOpened():
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect the faces within the subregions of the image in scales
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=6)
# Draw the rectangle around each face
for (x, y, w, h) in faces:
#Use the coordinates to find the center of the face and from that point draw a rectangle of radius w/2 or h/2.
center_coordinates = x + w // 2, y + h // 2
radius = w // 2 # or can be h / 2 or can be anything based on your requirements
#background color(black)
mask=np.zeros(frame.shape[:2] , dtype="uint8")
# create blue colored background
color = np.full_like(frame, (255,0,0))
# Draw the desired region to crop out in white
roi=cv2.circle(mask, center_coordinates, radius, (255,255,255),-1)
masked=cv2.bitwise_and(frame,frame,mask=mask)
mask_blue=cv2.bitwise_and(color,color,mask=mask-roi)
# combine the two masked images
result = cv2.add(masked,mask_blue)
cv2.imshow('result',result)
if cv2.waitKey(30) & 0xff==27:
break
video.release()
cv2.destroyAllWindows()

I have changed your code as follows according to your requirement. Here i have added an extra line
masked[np.where((masked==[0,0,0]).all(axis=2))]=[255,0,0]
where you can change pixel values of black region to any specific color.
import numpy as np
import cv2
'''
ML object detection algo(haarcascade)used to identify objects.
the XML file consists of trained Haar Cascade models.
'''
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +'haarcascade_frontalface_default.xml')
#initialize video from the webcam
video = cv2.VideoCapture(0)
print(cv2.VideoCapture(0).isOpened())
while True:
# ret tells if the camera works properly. Frame is an actual frame from the video feed
ret, frame= video.read()
# print(cv2.VideoCapture(0).isOpened())
# make sure port is working and read the image
if frame is not None and video.isOpened():
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect the faces within the subregions of the image in scales
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=6)
# Draw the rectangle around each face
for (x, y, w, h) in faces:
#Use the coordinates to find the center of the face and from that point draw a rectangle of radius w/2 or h/2.
center_coordinates = x + w // 2, y + h // 2
radius = w // 2 # or can be h / 2 or can be anything based on your requirements
#background color(black)
mask=np.zeros(frame.shape[:2] , dtype="uint8")
# Draw the desired region to crop out in white
roi=cv2.circle(mask, center_coordinates, radius, (255,255,255),-1)
masked=cv2.bitwise_and(frame,frame,mask=mask)
masked[np.where((masked==[0,0,0]).all(axis=2))]=[255,0,0]
cv2.imshow('result',masked)
if cv2.waitKey(30) & 0xff==27:
break
video.release()
cv2.destroyAllWindows()

Related

Capture what is in a rectangle of a numpy array

This might sound obvious to some of you but I was trying to figure out how to capture only what is in a rectangle.
The below code, uses dlib face detection and draws a bounding box around a detected face.
cv2.rectangle(img,(det.left(), det.top()), (det.right(), det.bottom()), color_green, line_width)
What I am struggling with is how to capture/ view only what is within the rectangle.
So in this example I only want to see what is in (det.left(), det.top()), (det.right(), det.bottom())
import sys
import dlib
import cv2
detector = dlib.get_frontal_face_detector()
cam = cv2.VideoCapture(1)
color_green = (0,255,0)
line_width = 3
while True:
ret_val, img = cam.read()
rgb_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
dets = detector(rgb_image)
for det in dets:
cv2.rectangle(img,(det.left(), det.top()), (det.right(), det.bottom()), color_green, line_width)
cv2.imshow('my webcam', img)
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.destroyAllWindows()
You can try to implement cropping with numpy slicing.
For a rectangle drawn by:
cv2.rectangle(img,(left,top), (right,bottom), color_green, line_width)
You can try cropping as:
new_img = img[top:bottom,left:right,:]
to show only the parts that are within the detected boxes you'd want to either black out what's outside the dets (which I find almost impossible) or you could simply draw the desired parts on a black canvas instead
this is why I start here with a black canvas
import sys
import dlib
import cv2
import numpy as np
detector = dlib.get_frontal_face_detector()
cam = cv2.VideoCapture(1)
color_green = (0, 255, 0)
line_width = 3
while True:
ret_val, img = cam.read()
# get image dims to creat a black canvas of the same size
# img shape is (rows, cols, c) or (h, w, c)
img_h, img_w , _ = img.shape # c = 3 assuming it's a color iamge
# this will act as our black background / canvas
black_tmp = np.zeros((img_h, img_w, 3), dtype=int)
rgb_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
dets = detector(rgb_image)
for det in dets:
# det cy -> left , top | x2y2 -> right bottom
x , y, x2,y2 = det.left(), det.top(), det.right(), det.bottom()
# copy the pixels in the desired area to your black background
black_tmp [y:y2, x:x2, ::] = img[y:y2, x:x2, ::]
# maybe draw the rectangle on the new image too
cv2.rectangle(black_tmp,(det.left(), det.top()), (det.right(), det.bottom()), color_green, line_width)
cv2.imshow('my webcam', black_tmp)
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.destroyAllWindows()
I've tried to only minimally edit your code
here's the np.zeros docs in case you need it
I hope this helps, if I missed any detail you need please feel free to point it out

Recording x/y axis movement with openCV-python

I'm trying to record the distance travelled by an object (in this instance, part of a face as detected by a haar cascade) from a video file. So far, I have a rectangle drawn to the section of the face that I wish to record x/y travel data for, but have been unable to find info on exactly how to store info on how far/which way the face has travelled in 2 dimensions. My code is below:
import cv2
import numpy as py
from matplotlib import pyplot as plt
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture("resources/video/EXAMPLE.mp4")
while True:
ret, img = cap.read()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 9)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w, int(y+h/3)), (255,0,0), 2)
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cap.destroyAllWindows()
Any info/pointers as far as how I can record movement data would be appreciated!
If you simply want to store the coordinates, you can add the (x,y) tuple to a list
If you're tracking just one face you could use object tracking
If you want to track multiple faces you can check out the multitracker.

Cropping faces from an image using OpenCV in Python

I'm currently trying to crop faces from an image.
I want the code to work no matter how many faces are in the image.
An example of the input image:
I'd like to crop the faces so I can run a facial keypoint detection algorithm on them (previously made).
The end result will look something like this:
My code is currently:
# Load in color image for face detection
image = cv2.imread('images/obamas4.jpg')
# Convert the image to RGB colorspace
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Make a copy of the original image to draw face detections on
image_copy = np.copy(image)
# Convert the image to gray
gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Detect faces in the image using pre-trained face dectector
faces = face_cascade.detectMultiScale(gray_image, 1.25, 6)
# Print number of faces found
print('Number of faces detected:', len(faces))
# Get the bounding box for each detected face
for f in faces:
x, y, w, h = [ v for v in f ]
cv2.rectangle(image_copy, (x,y), (x+w, y+h), (255,0,0), 3)
# Define the region of interest in the image
face_crop = gray_image[y:y+h, x:x+w]
# Display the image with the bounding boxes
fig = plt.figure(figsize = (9,9))
axl = fig.add_subplot(111)
axl.set_xticks([])
axl.set_yticks([])
ax1.set_title("Obamas with Face Detection")
axl.imshow(image_copy)
# Display the face crops
fig = plt.figure(figsize = (9,9))
axl = fig.add_subplot(111)
axl.set_xticks([])
axl.set_yticks([])
axl.set_title("Obamas Face Crops")
axl.imshow(face_crop)
The output looks like this:
Right now it only outputs the last face detected in the image. I'm certain I'm missing something simple like a for loop.
I'd like to be able to run my facial keypoint detection algorithm on all of the gray cropped face images.
Thank you for your help!
The problem is in your code, face_crop is storing only the last face detected.
So make it as a list and append all faces to it. Then use a for loop to display all faces. Like this:
face_crop = []
for f in faces:
x, y, w, h = [ v for v in f ]
cv2.rectangle(image_copy, (x,y), (x+w, y+h), (255,0,0), 3)
# Define the region of interest in the image
face_crop.append(gray_image[y:y+h, x:x+w])
for face in face_crop:
cv2.imshow('face',face)
cv2.waitKey(0)
I used cv2.imshow() to display the images. You can modify this to use plt.imshow()
Hope this helps!

how to identify the position of detected face

I have to detect faces using openCV and python. Then identify the position of the detected face if it is in the right, the left or the middle of the screen.
I already succeed to detect faces using the code below and still to know the position of the faces could someone please help me ?
import cv2
import sys
import numpy as np
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(1)
while True:
#capture frame by frame
ret,frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray,
scaleFactor=1.1,
minNeighbors= 5,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE)
#Draw a rectangle around the faces
for (x, y, w,h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0,255, 0), 2)
cv2.imshow('video',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
You could get the centre of the rectangle:
centre_x = x + w/2
centre_y = y + y/2
Then compare it with the size of the image. Assuming you have the image shape information:
height, width, channels = frame.shape #it could be gray.shape too
You can understand for example if the face is detected on the left side of the image by checking centre_x<width.
You have all the information to divide the image into a grid and understand where the rectangle places itself.

ambiguous results in face and eye detection when image is resized

I am trying out a code for face and eye detection in Open CV using Python. The code works well for image size 2848 X 4272 and even when I resized it by a factor of 0.5. But whenever I am resizing it with another factors such as 0.2,0.4 etc , it gives me ambiguous results for eyes(such as few regions of forehead, nose).In that case I am not able to get a generalised code for all image sizes. Is there any code so that I get correct detections with any image size as its very difficult to process such big images. The code is as such
import numpy as np
import cv2
import cv2.cv as cv
#attaching the haar cascade files
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
# reading the image
img11 = cv2.imread('IMG_0347.JPG')
if img11 !=None:
# resizing the image
w,h,c= img11.shape
print "dimension"
print w,h
img = cv2.resize(img11,None,fx=0.4, fy=0.3, interpolation = cv2.INTER_LINEAR)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # converting into grayscale
gray = cv2.equalizeHist(gray)
#cv2.imshow('histo',gray)
w,h,c= img.shape # finding out the dimensions of the image i.e width, height and number of channels
# creating a white background of same dimensions as input image for pasting the eyes detected by 'haarcascade_eye.xml'
im = np.zeros((w,h,c),np.uint8)
im[:]=[255,255,255]
# creating a white background of same dimensions as input image for pasting the masked eyes
im_mask = np.zeros((w,h,c),np.uint8)
im_mask[:]=[255,255,255]
# faces gives the top left coordinates of the detected face and width and height of the rectangle
faces = face_cascade.detectMultiScale(gray, 1.5, 5)
# taking face as the ROI
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),1) # Draws the rectangle around the detected face
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
#cv2.imshow('image1',img) # shows the original image with face detected
#cv2.imshow('image1',roi_color) # shows only the face detected (colored)
# searching for eyes in the detected face i.e in the roi gray
eyes = eye_cascade.detectMultiScale(roi_gray)
#print eyes # prints the top left coordinates of the detected eyes and width and height of the rectangle
if eyes.any():
for (ex,ey,ew,eh)in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),1) # draws rectangle around the masked eyes
eye_mask= roi_color[ey+1:u, ex+1:ex+ew] # eye_mask is the masked portion of the detected eye extracted from roi_color
im_mask[ey+1+y:y+u, ex+x+1:ex+ew+x]=eye_mask #pasting the eye_mask on the white background called im_mask
else:
print ("eyes could not be detected")
cv2.imshow('image',im_mask) #shows the im-mask white background with masked eyes pasted on it
It is logical that as the image gets smaller and smaller, it becomes harder to differentiate an eye from a nose, for example. So unless you understand fundamentally what your image analysis functions are looking for (I don't) it's hard to know the best way to downsize your images while retaining the type of information that the analysis needs.
Having said that, I believe cv2.INTER_AREA is used for shrinking images more commonly than cv2.INTER_LINEAR etc.
Try this instead of the resize you have:
img = cv2.resize(img11, None, fx=0.4, fy=0.3, interpolation=cv2.INTER_AREA)
Also, aren't you making it harder to identify eyes by changing the aspect ratio of your images (fx != fy)? If you don't have a special reason for that, you can just choose the target size explicitly with the second position argument, size. For example:
effective_but_smaller_size = (640, 480) # or whatever you find works
img = cv2.resize(img11, effective_but_smaller_size, interpolation=cv2.INTER_AREA)

Categories

Resources