converting three chanel image to single channel image - python

am using hough transform for detecting circles in the real time video frame.in hough circles function the first parameter is to pass as an single channel image.i dont know how to do this. it continously shows error in that part.
the code am using :
import cv2
import cv2 as cv
from matplotlib import pyplot as plt
from scipy.ndimage import imread
import numpy as np
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
cam_capture = cv2.VideoCapture(0)
cv2.destroyAllWindows()
upper_left = (50, 50)
bottom_right = (300, 300)
def sketch_transform(image):
## print(image)
## print("2")
## #gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
## print(image)
## print("3")
## gray = 255-gray
## ret, thresh = cv2.threshold(gray, 225, 255, cv2.THRESH_BINARY_INV)
## image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
## print(image)
## print("4")
## #image = cv2.drawContours(image, contours, -1,(0,0,255),3)
## print(image)
## print("4")
circle_detect(image)
return image
def circle_detect(image):
#image1=str(image)
#mage = cv2.imread(image, cv2.IMREAD_COLOR)
print (image)
print("k")
image = cv2.cvtColor(image,cv2.COLOR_GRAY2RGB)
circles = cv2.HoughCircles(image,cv2.HOUGH_GRADIENT,1,20,
param1=90,param2=30,minRadius=0,maxRadius=100)
print(circles)
circles = np.uint16(np.around(circles))
## for i in circles[0,:]:
## # draw the outer circle
## cv2.circle(image,(i[0],i[1]),i[2],(0,255,0),2)
## # draw the center of the circle
## cv2.circle(image,(i[0],i[1]),2,(0,0,255),3)
return circles
while True:
_, image_frame = cam_capture.read()
#Rectangle marker
r = cv2.rectangle(image_frame, upper_left, bottom_right, (100, 50, 200), 5)
rect_img = image_frame[upper_left[1] : bottom_right[1], upper_left[0] : bottom_right[0]]
sketcher_rect = rect_img
print(sketcher_rect)
print("1")
sketcher_rect = sketch_transform(sketcher_rect)
#Conversion for 3 channels to put back on original image (streaming)
#sketcher_rect_rgb = cv2.cvtColor(sketcher_rect, cv2.COLOR_GRAY2RGB)
#Replacing the sketched image on Region of Interest
image_frame[upper_left[1] : bottom_right[1], upper_left[0] : bottom_right[0]] = sketcher_rect
cv2.imshow("Sketcher ROI", image_frame)
if cv2.waitKey(1) == 13:
break
cam_capture.release()
cv2.destroyAllWindows()
am new to opencv and i have been struggling for last four days for detecting circles in a video frame.as an image the detction is good but it make some trouble in video frame.
the output is:
Traceback (most recent call last):
File "/home/pi/Downloads/Pi-tracker-master/fgroi.py", line 62, in <module>
sketcher_rect = sketch_transform(sketcher_rect)
File "/home/pi/Downloads/Pi-tracker-master/fgroi.py", line 27, in sketch_transform
circle_detect(image)
File "/home/pi/Downloads/Pi-tracker-master/fgroi.py", line 36, in circle_detect
image = cv2.cvtColor(image,cv2.COLOR_GRAY2RGB)
cv2.error: OpenCV(3.4.4) /home/pi/packaging/opencv-python/opencv/modules/imgproc/src/color.hpp:255: error: (-2:Unspecified error) in function 'cv::CvtHelper<VScn, VDcn, VDepth, sizePolicy>::CvtHelper(cv::InputArray, cv::OutputArray, int) [with VScn = cv::Set<1>; VDcn = cv::Set<3, 4>; VDepth = cv::Set<0, 2, 5>; cv::SizePolicy sizePolicy = (cv::SizePolicy)2u; cv::InputArray = const cv::_InputArray&; cv::OutputArray = const cv::_OutputArray&]'
> Invalid number of channels in input image:
> 'VScn::contains(scn)'
> where
> 'scn' is 3

Related

how to show multiple videoCapture in one frame?

i have an SLR (sign language Recognition) task, and i want to show the preprocessing part, here is my code :
import numpy as np
import cv2
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
import tensorflow as tf
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
#Load CNN Model
model = load_model("VGG16withALLTRAINABLE(NO BACKGROUND).h5")
#Creating ROI frame for capturing hand
top_ROI = 100
btm_ROI = 300
right_ROI = 50
left_ROI = 250
#Creating Background Removal Parameters
blur_size = 5
canny_low = 25
# min_area = 0
# max_area = 0
canny_high = 150
dilate_iter = 10
erode_iter = 10
mask_color = (0.0,0.0,0.0)
#Video Capture
cap = cv2.VideoCapture(0)
while True:
ret,frame = cap.read()
#flipping frame
# frame = cv2.flip(frame, 1)
#Create ROI inside Frame
roi = frame[top_ROI:btm_ROI, right_ROI:left_ROI]
cv2.rectangle(frame, (left_ROI, top_ROI), (right_ROI,btm_ROI), (255,128,0), 3) #Visual Rectangle for ROI
#Resizing and Reshaping to equalize model input size and shape
roi = cv2.resize(roi, (300, 300))
blurred_roi = cv2.GaussianBlur(roi, (blur_size,blur_size) , 0)
gray_roi = cv2.cvtColor(blurred_roi, cv2.COLOR_BGR2GRAY)
_,threshed = cv2.threshold(gray_roi, 100, 255, cv2.THRESH_BINARY_INV)
# edge = cv2.Canny(gray_roi, canny_low, canny_high)
# edge = cv2.dilate(edge, None)
# edge = cv2.erode(edge, None)
cntr = []
cntr_area = []
contours,_= cv2.findContours(threshed, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
contour_info = []
for c in contours:
contour_info.append((c,cv2.contourArea(c), ))
contour_info = np.array(contour_info)
contour_info = sorted(contour_info, key=lambda x: x[1], reverse=True)
max_contour = contour_info[0]
mask = np.zeros(threshed.shape)
cv2.fillConvexPoly(mask, max_contour[0], (255))
mask = cv2.dilate(mask, None, iterations=dilate_iter)
mask = cv2.erode(mask, None, iterations=erode_iter)
mask = cv2.GaussianBlur(mask, (blur_size, blur_size), 0)
mask_stack = np.dstack([mask]*3) # Create 3-channel alpha mask
#-- Blend masked img into MASK_COLOR background --------------------------------------
mask_stack = mask_stack.astype('float32') / 255.0 # Use float matrices,
roi = roi.astype('float32') / 255.0 # for easy blending
masked = (mask_stack * roi) + ((1-mask_stack) * mask_color) # Blend
masked = (masked * 255).astype('uint8') # Convert back to 8-bit
print(mask.shape)
print(mask_stack.shape)
print(masked.shape)
cv2.imshow("Frame", frame)
cv2.imshow("ROI", gray_roi)
cv2.imshow("Thresed", threshed)
cv2.imshow('Mask', masked)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
This is my current result [Result in diffrent Frames]
My question is, can i make all the result in one frames (one frame with multiple videos) ?
i have tried once with this code, but it wont work while i add the second video stream functions (video_stream2()) :
from tkinter import *
from PIL import ImageTk, Image
import cv2
#Creating ROI frame for capturing hand
top_ROI = 100
btm_ROI = 300
right_ROI = 50
left_ROI = 250
root = Tk()
root.geometry("1920x1080")
# Create a frame
Main_video = Frame(root, highlightbackground='grey', highlightthicknes=3)
Main_video.grid(row=0, column= 0, padx=450, pady=150, ipadx= 0, ipady=0)
Roi_video = Frame(root, highlightbackground='grey', highlightthicknes=3)
Roi_video.grid(row=0, column= 0, padx=0, pady=0, ipadx= 0, ipady=0)
# Create a label in the frame
label_main = Label(Main_video)
label_main.grid()
label_roi = Label(Roi_video)
label_roi.grid()
# Capture from camera
cap = cv2.VideoCapture(0)
# function for video streaming
def video_stream():
_, frame = cap.read()
#Create ROI inside Frame
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#Main Video
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
label_main.imgtk = imgtk
label_main.configure(image=imgtk)
label_main.after(1, video_stream)
def video_stream2():
_, frame = cap.read()
#Create ROI inside Frame
roi = frame[top_ROI:btm_ROI, right_ROI:left_ROI]
cv2.rectangle(frame, (left_ROI, top_ROI), (right_ROI,btm_ROI), (255,128,0), 3) #Visual Rectangle for ROI
cv2roi_gray = cv2.cvtColor(roi, cv2.COLOR_RGB2GRAY)
#Roi Video
roi_img = Image.fromarray(cv2roi_gray)
imgtk_roi= ImageTk.PhotoImage(image=roi_img)
label_roi.imgtk_roi = imgtk_roi
label_roi.configure(image=imgtk_roi)
label_roi.after(1, video_stream2)
video_stream()
video_stream2()
root.mainloop()
The procedure to combine several images (windows) to one like that:
...is easy by following the example code:
import numpy as np
import cv2
import time
#Video Capture
cap = cv2.VideoCapture(0)
while(True):
ret,frame = cap.read()
frame_uus=cv2.resize(frame,(240,160))
#let's simulate the images...
#frame=np.random.randint(0,255,[320,480,3],dtype='uint8')
gray_roi=0.5*np.random.randint(0,255,[160,240,1],dtype='uint8')+0.5*frame_uus[:,:,0:1]
threshed=0.1+0*np.random.randint(0,255,[160,240,3],dtype='uint8')+0.3*frame_uus
masked=0.5*np.random.randint(0,255,[160,240,3],dtype='uint8')+0.2*frame_uus
#make sure all data is in uint8-format suitable for cv2..
gray_roi=gray_roi.astype(np.uint8)
threshed=threshed.astype(np.uint8)
masked=masked.astype(np.uint8)
#show separate images...
cv2.imshow("Frame", frame)
cv2.imshow("ROI", gray_roi)
cv2.imshow("Thresed", threshed)
cv2.imshow('Mask', masked)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#Define space between images...
vali=2
#let's combine the images...
max_height=np.max([len(frame),len(gray_roi),len(threshed),len(masked)])
#Let's calculate total width for the combined image...remember to add space between images...
total_width=len(frame[0])+len(gray_roi[0])+len(threshed[0])+len(masked[0])+4*vali
#For clearness let's make a green background image
baseimage=np.zeros([max_height,total_width,3],'uint8')
baseimage[:,:,1]=255
#let's add separate images to the baseimage
baseimage[0:len(frame),0:len(frame[0]),:]=frame
#Take into account the grayscale...
alku=len(frame[0])+vali
loppu=alku+len(gray_roi[0])
baseimage[0:len(gray_roi),alku:loppu,0:1]=gray_roi
baseimage[0:len(gray_roi),alku:loppu,1:2]=gray_roi
baseimage[0:len(gray_roi),alku:loppu,2:3]=gray_roi
#Add next image...
alku=loppu+vali
loppu=alku+len(threshed[0])
baseimage[0:len(threshed),alku:loppu,:]=threshed
#And the last one...
alku=loppu+vali
loppu=alku+len(masked[0])
baseimage[0:len(masked),alku:loppu,:]=masked
#And finally let's show the baseimage...
cv2.imshow('Combined', baseimage)
cap.release()
cv2.destroyAllWindows()

how to convert a ros subscriber image into an open cv image?

I have a program that detects laser points that I know works when I read an image from video 0 but I do not know how to make the program work from a ros subscriber image. I need to know how to convert a ros image subscriber into a usable opencv image named "image" I have researched how to do this and I have come across several solutions that all use the function bridge.imgmsg_to_cv2 but I can not get this to work I am sure it is a simple fix I just don't know what I am doing. This should be relatively simple though. here is my code:
# import the necessary packages
from __future__ import print_function
from imutils import contours
from skimage import measure
import numpy as np
import argparse
import imutils
import cv2
import message_filters
from sensor_msgs.msg import Image, CameraInfo
from std_msgs.msg import Int32, Float32MultiArray
import rospy
from cv_bridge import CvBridge, CvBridgeError
import roslib
roslib.load_manifest('my_package')
import sys
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
'''
def getPoint(cameraTip,dotXY,normalPoint):
slope= (cameraTip[2]-dotXY[2])/(cameraTip[1]-dotXY[1])
b=cameraTip[2]-(slope*cameraTip[1])
z=slope*normalPoint[1]+b
return [normalPoint[0],normalPoint[1],z]
'''
class image_converter:
def __init__(self):
self.image_pub = rospy.Publisher("image_topic_2",Image)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("CM_040GE/image_raw",Image,self.callback)
def callback(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
(rows,cols,channels) = cv_image.shape
if cols > 60 and rows > 60 :
cv2.circle(cv_image, (50,50), 10, 255)
cv2.imshow("Image window", cv_image)
cv2.waitKey(3)
try:
self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
except CvBridgeError as e:
print(e)
def main(args):
ic = image_converter()
rospy.init_node('image_converter', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
image = bridge.imgmsg_to_cv2(image_message, desired_encoding="passthrough")
while(1):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (11, 11), 0)
#threshold the image to reveal light regions in the
# blurred image
thresh = cv2.threshold(blurred, 30, 255, cv2.THRESH_BINARY)[1]
# perform a series of erosions and dilations to remove
# any small blobs of noise from the thresholded image
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=4)
# perform a connected component analysis on the thresholded
# image, then initialize a mask to store only the "large"
# components
labels = measure.label(thresh, neighbors=8, background=0)
mask = np.zeros(thresh.shape, dtype="uint8")
# loop over the unique components
for label in np.unique(labels):
# if this is the background label, ignore it
if label == 0:
continue
# otherwise, construct the label mask and count the
# number of pixels
labelMask = np.zeros(thresh.shape, dtype="uint8")
labelMask[labels == label] = 255
numPixels = cv2.countNonZero(labelMask)
# if the number of pixels in the component is sufficiently
# large, then add it to our mask of "large blobs"
if numPixels > 300:
mask = cv2.add(mask, labelMask)
# find the contours in the mask, then sort them from left to
# right
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = contours.sort_contours(cnts)[0]
# loop over the contours
for (i, c) in enumerate(cnts):
# draw the bright spot on the image
(x, y, w, h) = cv2.boundingRect(c)
((cX, cY), radius) = cv2.minEnclosingCircle(c)
#x and y center are cX and cY
cv2.circle(image, (int(cX), int(cY)), int(radius),
(0, 0, 255), 3)
cv2.putText(image, "#{}".format(i + 1), (x, y - 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(1)
#camera.release()
Currently the error I am getting is:
Traceback (most recent call last):
File "lazerSub2.py", line 18, in <module>
roslib.load_manifest('my_package')
File "/opt/ros/indigo/lib/python2.7/dist-packages/roslib/launcher.py", line 62, in load_manifest
sys.path = _generate_python_path(package_name, _rospack) + sys.path
File "/opt/ros/indigo/lib/python2.7/dist-packages/roslib/launcher.py", line 93, in _generate_python_path
m = rospack.get_manifest(pkg)
File "/usr/lib/python2.7/dist-packages/rospkg/rospack.py", line 167, in get_manifest
return self._load_manifest(name)
File "/usr/lib/python2.7/dist-packages/rospkg/rospack.py", line 211, in _load_manifest
retval = self._manifests[name] = parse_manifest_file(self.get_path(name), self._manifest_name, rospack=self)
File "/usr/lib/python2.7/dist-packages/rospkg/rospack.py", line 203, in get_path
raise ResourceNotFound(name, ros_paths=self._ros_paths)
rospkg.common.ResourceNotFound: my_package
ROS path [0]=/opt/ros/indigo/share/ros
ROS path [1]=/home/robot/catkin_ws/src
ROS path [2]=/opt/ros/indigo/share
ROS path [3]=/opt/ros/indigo/stacks
I think this is what you wanted to do, separate out the ros and the cv components. Your first error was when you were mixing the ros types badly with cv processing. If you throw the ros stuff into a main/main class, keep all of the ros types there, and your functional processing elsewhere.
''' image_converter.py '''
from __future__ import print_function
import cv2
import numpy as np
import imutils
from imutils import contours
from skimage import measure
'''
def getPoint(cameraTip,dotXY,normalPoint):
slope= (cameraTip[2]-dotXY[2])/(cameraTip[1]-dotXY[1])
b=cameraTip[2]-(slope*cameraTip[1])
z=slope*normalPoint[1]+b
return [normalPoint[0],normalPoint[1],z]
'''
# Image Processing functions
def convert_image(image): # Image of kind bgr8
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (11, 11), 0)
#threshold the image to reveal light regions in the
# blurred image
thresh = cv2.threshold(blurred, 30, 255, cv2.THRESH_BINARY)[1]
# perform a series of erosions and dilations to remove
# any small blobs of noise from the thresholded image
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=4)
# perform a connected component analysis on the thresholded
# image, then initialize a mask to store only the "large"
# components
labels = measure.label(thresh, neighbors=8, background=0)
mask = np.zeros(thresh.shape, dtype="uint8")
# loop over the unique components
for label in np.unique(labels):
# if this is the background label, ignore it
if label == 0:
continue
# otherwise, construct the label mask and count the
# number of pixels
labelMask = np.zeros(thresh.shape, dtype="uint8")
labelMask[labels == label] = 255
numPixels = cv2.countNonZero(labelMask)
# if the number of pixels in the component is sufficiently
# large, then add it to our mask of "large blobs"
if numPixels > 300:
mask = cv2.add(mask, labelMask)
# find the contours in the mask, then sort them from left to
# right
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = contours.sort_contours(cnts)[0]
# loop over the contours
for (i, c) in enumerate(cnts):
# draw the bright spot on the image
(x, y, w, h) = cv2.boundingRect(c)
((cX, cY), radius) = cv2.minEnclosingCircle(c)
#x and y center are cX and cY
cv2.circle(image, (int(cX), int(cY)), int(radius),
(0, 0, 255), 3)
cv2.putText(image, "#{}".format(i + 1), (x, y - 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(1)
#camera.release()
return image
# ROS Interface
if __name__ == "__main__":
import rospy
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
bridge = CvBridge()
def show_img(cv_image):
(rows,cols,channels) = cv_image.shape
if cols > 60 and rows > 60 :
cv2.circle(cv_image, (50,50), 10, 255)
cv2.imshow("Image window", cv_image)
cv2.waitKey(3)
image_pub = rospy.Publisher("image_topic_2", Image)
def callback(data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
show_img(cv_image)
cv_image2 = convert_image(cv_image)
image_pub.publish(bridge.cv2_to_imgmsg(cv_image2, "bgr8"))
except CvBridgeError as e:
print(e)
image_sub = rospy.Subscriber("CM_040GE/image_raw", Image, callback)
rospy.init_node('image_converter', anonymous=True)
rospy.spin()
print("image_converter: Shutting down")
cv2.destroyAllWindows()

using Opencv python2.7 to detect a rectangle

I have written a python code using opencv to detect a rectangle (displaying height, and width) also determine the distance from camera to the object but when i run the code i get this error(can be found below) i don't know what am doing wrong please i would appreciate it if anyone could help me out i have tried every possible solution that i could find but still can't rid of the error.
ERROR
yuv_red = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV)
error: C:\builds\master_PackSlaveAddon-win32-vc12-static\opencv\modules\imgproc\src\color.cpp:8059: error: (-215) scn == 3 || scn == 4 in function cv::cvtColor
import sys
sys.path.append('C:\Python27\Lib\site-packages')
import cv2
import numpy as np
import argparse
import math
############################# Capturing Video Through Camera ########################
cap = cv2.VideoCapture(0)
############# Distance to Camera initial value set to zero ##########################
Distance_to_Camera=0
while(True):
################################# Capture frame-by-frame ###########################
ret, frame = cap.read()
############################ Converting frame(img i.e BGR to YUV) ###################
yuv_red = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV)
red_color = np.uint8([[[0,0,255]]])
yuv_color = cv2.cvtColor(red_color,cv2.COLOR_BGR2YUV)
print yuv_color
############################### Processing of Image ##############################
##################### Defining the Range of Red Colour ###########################
red_lower = np.array([136,87,111],np.uint8)
red_upper = np.array([180,255,255],np.uint8)
##################### Finding the Range of Red Colour in the image ###################
mask = cv2.inRange(yuv_red, red_lower,red_upper)
####################### Morphological Transformation, Dilation #######################
res = cv2.bitwise_and(frame, frame, mask = mask)
#####################################################################################
gray = cv2.cvtColor(res,cv2.COLOR_BGR2GRAY) #Converting the BGR res to Gray
blurred = cv2.GaussianBlur(gray, (5,5), 5) #Blur Image to remove noise
blur = cv2.bilateralFilter(blurred, 5,50,50) #Smooth the image
median = cv2.medianBlur(blur,5) #Reduce noise from image
thresh = cv2.threshold(median, 3, 255, cv2.THRESH_BINARY)[1] #To achieve a better output of white and black
frame2, contour, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
##################### Splitting and Merging Image channels ###########################
b,g,r = cv2.split(res)
ttl = res.size/3
Ra = float(np.sum(r))/ ttl
print Ra
if Ra > 1:
c = contours[0]
M = cv2.moments(c)
x = int(M['m10']/M['m00'])
y = int(M['m01']/M['m00'])
x,y,w,h = cv2.boundingRect(c)
epsilon = 0.01*cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, epsilon, True)
perimeter = cv2.approxLength(c,True)
area = cv2.contourArea(c)
ah = h/40
aw = w/40
Distance_to_Camera = round(math.sqrt(315/area),4)
print Distance_to_Camera
approx = cv2.approxPolyDP(c, epsilon, True)
print len(approx)
shape = len(approx)
if shape == 4:
print " 4 Sides"
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(frame,ArithmeticError[box],0,(0,0,255),0)
print box
if ah == aw:
################################ Displaying Text on the Image ################################
print "Unknown"
cv2.putTextputText(frame,"Unknown",(x+150,y+150),cv2.FONT_HERSHEY_SIMPLEX,2,(255,255,255),4)
else:
################################ Displaying Text on the Image ################################
print "Rectangle"
cv2.putTextputText(frame,"Rectangle",(x+150,y+150),cv2.FONT_HERSHEY_SIMPLEX,2,(255,255,255),4)
output = ("Distance="+str((round((distance+0.0004)*1000))) + "cm" + "X=" + str(aw)+"cm" +"Y="+str(ah)+"cm"+"Perimeter=" +str(round(perimeter/40))+"cm"+"Area="+str(round((area/1.64)/1000))+"cm^2")
cv2.imshow('gray',frame)
else:
cv2.imshow('gray',frame)
########################################## Output ##########################################
cv2.imshow('gray',frame)
cv2.imshow('grayscaled',thresh)
if cv2.waitKey(20) & 0xFF == 27:
break
##if k == 27: # wait for ESC key to exit
## cv2.destroyAllWindows()
# When everything done, release the capture
cv2.destroyAllWindows()
cap.release()
[1]: https://i.stack.imgur.com/qB2rx.png
In the line implementing a bilateral filter you have an erroneous parenthesis:
blur = cv2.bilateralFilter(blurred, (5,50,50)
Check out the docs for cv2.bilateralFilter() to see the proper use.

Python - resize image

I'm using the code below (which is a googling result) to detect faces:
import io
import picamera
import cv2
import numpy
import PIL
from PIL import Image
from resizeimage import resizeimage
#Load a cascade file for detecting faces
face_cascade = cv2.CascadeClassifier('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml')
#Create a memory stream so photos doesn't need to be saved in a file
stream = io.BytesIO()
#Get the picture (low resolution, so it should be quite fast)
#Here you can also specify other parameters (e.g.:rotate the image)
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
camera.vflip = False
camera.hflip = False
camera.brightness = 60
camera.capture(stream, format='jpeg')
#Convert the picture into a numpy array
buff = numpy.fromstring(stream.getvalue(), dtype=numpy.uint8)
#Now creates an OpenCV image
image = cv2.imdecode(buff, 1)
#Load a cascade file for detecting faces
#face_cascade = cv2.CascadeClassifier('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml')
#Convert to grayscale
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
#Look for faces in the image using the loaded cascade file
faces = face_cascade.detectMultiScale(gray, 1.1, 5)
print "Found "+str(len(faces))+" face(s)"
#Draw a rectangle around every found face
#Crop faces and save to separate files
id = 1
for (x,y,w,h) in faces:
cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2)
cropped = image[ y : y+h, x : x+w ]
#RESIZE IMAGE to 92x112
cropped = cv2.resize(cropped,None,92,112)
cv2.imwrite("../reco/test_faces/cropped_face" + str(id) + ".png", cropped)
id = id + 1
At the end I want to crop the faces into image files and resize them to 92x112. This is what I try with:
cropped = cv2.resize(cropped,None,92,112)
When I run this I get:
OpenCV Error: Assertion failed (dsize.area() || (inv_scale_x > 0 && inv_scale_y > 0)) in resize, file /build/opencv-ISmtkH/opencv-2.4.9.1+dfsg/modules/imgproc/src/imgwarp.cpp, line 1835
Traceback (most recent call last):
File "1track.py", line 48, in <module>
cropped = cv2.resize(cropped,None,92,112)
cv2.error: /build/opencv-ISmtkH/opencv-2.4.9.1+dfsg/modules/imgproc/src/imgwarp.cpp:1835: error: (-215) dsize.area() || (inv_scale_x > 0 && inv_scale_y > 0) in function resize
To resize the image to new dimensions, you need to know the ratio between the new dimensions and the current ones. So if you want to set (for example) a 640x480 image into a 92x112 image:
92/640=0.143
112/480=0.233
You use these ratios in the cv2.resize function:
cropped = cv2.resize(cropped, (0,0), fx=0.143, fy=0.233)

OpenCV Python: Error with using Mask parameter in GoodFeatureToDetect

I was trying to make a facial detection program in Python that combines Haar Cascade Classification and Lucas Kanade. But I am getting error saying something like this:
Error:
Traceback (most recent call last):
File "/home/anthony/Documents/Programming/Python/Computer-Vision/OpenCV-Doc/optical-flow-and-haar-detection-test.py", line 80, in <module>
corners_t = cv2.goodFeaturesToTrack(gray, mask = mask_use, **feature_params)
error: /build/buildd/opencv-2.4.8+dfsg1/modules/imgproc/src/featureselect.cpp:63: error: (-215) mask.empty() || (mask.type() == CV_8UC1 && mask.size() == image.size()) in function goodFeaturesToTrack
How my program works:
My program uses Haar Cascade to get coordinates of a detected face, copy whatever is in that area created by the coordinates (in this case, the face), take an image with nothing but black colors (all pixels are set to zero via numpy), and paste the copied face into the black background. By setting the new face with black background into the mask parameter, this would force Lucas Kanade (goodFeaturesToDetect) to create feature points on the face which will be tracked by optical flow.
Code:
from matplotlib import pyplot as plt
import numpy as np
import cv2
rectangle_x = 0
face_classifier = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
#cap = cv2.VideoCapture('video/sample.mov')
cap = cv2.VideoCapture(0)
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 200,
qualityLevel = 0.01,
minDistance = 10,
blockSize = 7 )
# Parameters for lucas kanade optical flow
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors
color = np.random.randint(0,255,(100,3))
# Take first frame and find corners in it
ret, old_frame = cap.read()
#old_frame = cv2.imread('images/webcam-first-frame-two.png')
######Adding my code###
cv2.imshow('Old_Frame', old_frame)
cv2.waitKey(0)
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
restart = True
#while restart == True:
face = face_classifier.detectMultiScale(old_gray, 1.2, 4)
if len(face) == 0:
print "This is empty"
for (x,y,w,h) in face:
focused_face = old_frame[y: y+h, x: x+w]
#cv2.rectangle(old_frame, (x,y), (x+w, y+h), (0,255,0),2)
#initalize all pixels to zero (picture completely black)
mask_use = np.zeros(old_frame.shape,np.uint8)
#Crop old_frame coordinates and paste it on the black mask)
mask_use[y:y+h,x:x+w] = old_frame[y:y+h,x:x+w]
height, width, depth = mask_use.shape
print "Height: ", height
print "Width: ", width
print "Depth: ", depth
height, width, depth = old_frame.shape
print "Height: ", height
print "Width: ", width
print "Depth: ", depth
cv2.imshow('Stuff', mask_use)
cv2.imshow('Old_Frame', old_frame)
#cv2.imshow('Zoom in', focused_face)
face_gray = cv2.cvtColor(old_frame,cv2.COLOR_BGR2GRAY)
gray = cv2.cvtColor(focused_face,cv2.COLOR_BGR2GRAY)
corners_t = cv2.goodFeaturesToTrack(gray, mask = mask_use, **feature_params)
corners = np.int0(corners_t)
#print corners
for i in corners:
ix,iy = i.ravel()
cv2.circle(focused_face,(ix,iy),3,255,-1)
cv2.circle(old_frame,(x+ix,y+iy),3,255,-1)
print ix, " ", iy
plt.imshow(old_frame),plt.show()
"""
print "X: ", x
print "Y: ", y
print "W: ", w
print "H: ", h
#face_array = [x,y,w,h]
"""
#############################
p0 = cv2.goodFeaturesToTrack(old_gray, mask = old_gray, **feature_params)
#############################
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
while(1):
ret,frame = cap.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st==1]
###print "Good_New"
###print good_new
good_old = p0[st==1]
# draw the tracks
for i,(new,old) in enumerate(zip(good_new,good_old)):
#print i
#print color[i]
a,b = new.ravel()
c,d = old.ravel()
cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
cv2.circle(frame,(a, b),5,color[i].tolist(),-1)
if i == 99:
break
#For circle, maybe replace (a,b) with (c,d)?
#img = cv2.add(frame,mask)
cv2.imshow('frame',frame)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
cv2.destroyAllWindows()
cap.release()
Can anyone see the problem and tell me how to fix it? Thanks.
I've had this error caused by using arrays that aren't the same size.
You have a for loop that dynamically assigns values to focused_face but the good_features to track uses a static size (= to the last instance of focused_face). Old_frame looks like it uses the shape of the first instance of focused_face.
Make sure you are using image and mask arrays of the same shape in goodFeaturesToTrack.

Categories

Resources