overlaying transparent image to video stream - OpenCV - python

I'm struggling to understand how to overlay a .png with transparency to a video stream.
For some reason, the transparent area is always displayed as black.
Here's what I do:
Loading the image and setting up the environment
import cv2
import numpy as np
from PIL import Image
cap = cv2.VideoCapture(0)
cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("window",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
dim = (640,480)
alpha=0.0
foreground = cv2.imread('png.png',cv2.IMREAD_UNCHANGED)
rows,cols,channels = foreground.shape
Adding an artificial alhpa layer to the frame and overlaying the loaded image
def logoOverlay(image,logo,alpha=1.0,x=0, y=0, scale=1.0):
(h, w) = image.shape[:2]
image = np.dstack([image, np.ones((h, w), dtype="uint8") * 255])
overlay = cv2.resize(logo, None,fx=scale,fy=scale)
(wH, wW) = overlay.shape[:2]
output = image.copy()
# blend the two images together using transparent overlays
try:
if x<0 : x = w+x
if y<0 : y = h+y
if x+wW > w: wW = w-x
if y+wH > h: wH = h-y
overlay=cv2.addWeighted(output[y:y+wH, x:x+wW],alpha,overlay[:wH,:wW],1-alpha,0)
output[y:y+wH, x:x+wW ] = overlay
except Exception as e:
print("Error: Logo position is overshooting image!")
print(e)
output= output[:,:,:3]
return output
Calling this function every frame:
while(True):
ret, frame = cap.read()
frame = cv2.flip(frame,1)
frame = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
frame = logoOverlay(frame,foreground,alpha=alpha,scale=1,y=100,x=100)
cv2.imshow('window',frame)
thanks for your help, highly appreciated!
FP

Related

how to REAL TIME track my cursor to a coordinate? (DIY aimbot)

i need to track my cursor to the nose of a human to create a DIY aimbot with pose detection.
(just for fun, not intending to cheat, there would be so many better and easier options than to make my own)
i already have the first part of the code and it shows you your screen and the skeleton, as well as the exact coordinates of the nose with no problem,
but the method that im using to move my cursor over to that point is not working
im using mouse.move and have tried other stuff like pyautogui, tkinter.
it doesn't give me an error but still does not work
import cv2
import mediapipe as mp
import numpy as np
import time
import pyautogui
import mouse
mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose
# display screen resolution, get it from your OS settings
SCREEN_SIZEX = (1920)
SCREEN_SIZEY = (1080)
# define the codec
fourcc = cv2.VideoWriter_fourcc(*"XVID")
# create the video write object
out = cv2.VideoWriter("output.avi", fourcc, 20.0, (SCREEN_SIZEX, SCREEN_SIZEY))
with mp_pose.Pose(min_detection_confidence=0.1, min_tracking_confidence=0.9) as pose:
while True:
# make a screenshot
img = pyautogui.screenshot()
# convert these pixels to a proper numpy array to work with OpenCV
frame = np.array(img)
# convert colors from BGR to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Recolor image to RGB
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image.flags.writeable = False
# Make detection
results = pose.process(image)
# Recolor back to BGR
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
try:
landmarks = results.pose_landmarks.landmark
lndmark = landmarks[mp_pose.PoseLandmark.NOSE.value]
x = [landmarks[mp_pose.PoseLandmark.NOSE.value].x]
y = [landmarks[mp_pose.PoseLandmark.NOSE.value].y]
#print(x)
#print(y)
mouse.move(x, y)
except:
pass
# Render detections
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=2),
mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2)
)
# write the frame
out.write(frame)
pTime = 0
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(image, str(int(fps)), (20, 50), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
cv2.imshow('Mediapipe Feed', image)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
out.release()
cv2.destroyAllWindows()
#for lndmark in mp_pose.PoseLandmark:
#print(lndmark)
this is the part that doesn't work:
try:
landmarks = results.pose_landmarks.landmark
lndmark = landmarks[mp_pose.PoseLandmark.NOSE.value]
x = [landmarks[mp_pose.PoseLandmark.NOSE.value].x]
y = [landmarks[mp_pose.PoseLandmark.NOSE.value].y]
mouse.move(x, y)
except:
pass
i would assume that it is beacuse x and y are supposed to numbers or somehow it can't read or proccess it correctly
but it doesn't give me an error, so im asking it here hoping on of you guys had already figured this one out

how to show multiple videoCapture in one frame?

i have an SLR (sign language Recognition) task, and i want to show the preprocessing part, here is my code :
import numpy as np
import cv2
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
import tensorflow as tf
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
#Load CNN Model
model = load_model("VGG16withALLTRAINABLE(NO BACKGROUND).h5")
#Creating ROI frame for capturing hand
top_ROI = 100
btm_ROI = 300
right_ROI = 50
left_ROI = 250
#Creating Background Removal Parameters
blur_size = 5
canny_low = 25
# min_area = 0
# max_area = 0
canny_high = 150
dilate_iter = 10
erode_iter = 10
mask_color = (0.0,0.0,0.0)
#Video Capture
cap = cv2.VideoCapture(0)
while True:
ret,frame = cap.read()
#flipping frame
# frame = cv2.flip(frame, 1)
#Create ROI inside Frame
roi = frame[top_ROI:btm_ROI, right_ROI:left_ROI]
cv2.rectangle(frame, (left_ROI, top_ROI), (right_ROI,btm_ROI), (255,128,0), 3) #Visual Rectangle for ROI
#Resizing and Reshaping to equalize model input size and shape
roi = cv2.resize(roi, (300, 300))
blurred_roi = cv2.GaussianBlur(roi, (blur_size,blur_size) , 0)
gray_roi = cv2.cvtColor(blurred_roi, cv2.COLOR_BGR2GRAY)
_,threshed = cv2.threshold(gray_roi, 100, 255, cv2.THRESH_BINARY_INV)
# edge = cv2.Canny(gray_roi, canny_low, canny_high)
# edge = cv2.dilate(edge, None)
# edge = cv2.erode(edge, None)
cntr = []
cntr_area = []
contours,_= cv2.findContours(threshed, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
contour_info = []
for c in contours:
contour_info.append((c,cv2.contourArea(c), ))
contour_info = np.array(contour_info)
contour_info = sorted(contour_info, key=lambda x: x[1], reverse=True)
max_contour = contour_info[0]
mask = np.zeros(threshed.shape)
cv2.fillConvexPoly(mask, max_contour[0], (255))
mask = cv2.dilate(mask, None, iterations=dilate_iter)
mask = cv2.erode(mask, None, iterations=erode_iter)
mask = cv2.GaussianBlur(mask, (blur_size, blur_size), 0)
mask_stack = np.dstack([mask]*3) # Create 3-channel alpha mask
#-- Blend masked img into MASK_COLOR background --------------------------------------
mask_stack = mask_stack.astype('float32') / 255.0 # Use float matrices,
roi = roi.astype('float32') / 255.0 # for easy blending
masked = (mask_stack * roi) + ((1-mask_stack) * mask_color) # Blend
masked = (masked * 255).astype('uint8') # Convert back to 8-bit
print(mask.shape)
print(mask_stack.shape)
print(masked.shape)
cv2.imshow("Frame", frame)
cv2.imshow("ROI", gray_roi)
cv2.imshow("Thresed", threshed)
cv2.imshow('Mask', masked)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
This is my current result [Result in diffrent Frames]
My question is, can i make all the result in one frames (one frame with multiple videos) ?
i have tried once with this code, but it wont work while i add the second video stream functions (video_stream2()) :
from tkinter import *
from PIL import ImageTk, Image
import cv2
#Creating ROI frame for capturing hand
top_ROI = 100
btm_ROI = 300
right_ROI = 50
left_ROI = 250
root = Tk()
root.geometry("1920x1080")
# Create a frame
Main_video = Frame(root, highlightbackground='grey', highlightthicknes=3)
Main_video.grid(row=0, column= 0, padx=450, pady=150, ipadx= 0, ipady=0)
Roi_video = Frame(root, highlightbackground='grey', highlightthicknes=3)
Roi_video.grid(row=0, column= 0, padx=0, pady=0, ipadx= 0, ipady=0)
# Create a label in the frame
label_main = Label(Main_video)
label_main.grid()
label_roi = Label(Roi_video)
label_roi.grid()
# Capture from camera
cap = cv2.VideoCapture(0)
# function for video streaming
def video_stream():
_, frame = cap.read()
#Create ROI inside Frame
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#Main Video
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
label_main.imgtk = imgtk
label_main.configure(image=imgtk)
label_main.after(1, video_stream)
def video_stream2():
_, frame = cap.read()
#Create ROI inside Frame
roi = frame[top_ROI:btm_ROI, right_ROI:left_ROI]
cv2.rectangle(frame, (left_ROI, top_ROI), (right_ROI,btm_ROI), (255,128,0), 3) #Visual Rectangle for ROI
cv2roi_gray = cv2.cvtColor(roi, cv2.COLOR_RGB2GRAY)
#Roi Video
roi_img = Image.fromarray(cv2roi_gray)
imgtk_roi= ImageTk.PhotoImage(image=roi_img)
label_roi.imgtk_roi = imgtk_roi
label_roi.configure(image=imgtk_roi)
label_roi.after(1, video_stream2)
video_stream()
video_stream2()
root.mainloop()
The procedure to combine several images (windows) to one like that:
...is easy by following the example code:
import numpy as np
import cv2
import time
#Video Capture
cap = cv2.VideoCapture(0)
while(True):
ret,frame = cap.read()
frame_uus=cv2.resize(frame,(240,160))
#let's simulate the images...
#frame=np.random.randint(0,255,[320,480,3],dtype='uint8')
gray_roi=0.5*np.random.randint(0,255,[160,240,1],dtype='uint8')+0.5*frame_uus[:,:,0:1]
threshed=0.1+0*np.random.randint(0,255,[160,240,3],dtype='uint8')+0.3*frame_uus
masked=0.5*np.random.randint(0,255,[160,240,3],dtype='uint8')+0.2*frame_uus
#make sure all data is in uint8-format suitable for cv2..
gray_roi=gray_roi.astype(np.uint8)
threshed=threshed.astype(np.uint8)
masked=masked.astype(np.uint8)
#show separate images...
cv2.imshow("Frame", frame)
cv2.imshow("ROI", gray_roi)
cv2.imshow("Thresed", threshed)
cv2.imshow('Mask', masked)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#Define space between images...
vali=2
#let's combine the images...
max_height=np.max([len(frame),len(gray_roi),len(threshed),len(masked)])
#Let's calculate total width for the combined image...remember to add space between images...
total_width=len(frame[0])+len(gray_roi[0])+len(threshed[0])+len(masked[0])+4*vali
#For clearness let's make a green background image
baseimage=np.zeros([max_height,total_width,3],'uint8')
baseimage[:,:,1]=255
#let's add separate images to the baseimage
baseimage[0:len(frame),0:len(frame[0]),:]=frame
#Take into account the grayscale...
alku=len(frame[0])+vali
loppu=alku+len(gray_roi[0])
baseimage[0:len(gray_roi),alku:loppu,0:1]=gray_roi
baseimage[0:len(gray_roi),alku:loppu,1:2]=gray_roi
baseimage[0:len(gray_roi),alku:loppu,2:3]=gray_roi
#Add next image...
alku=loppu+vali
loppu=alku+len(threshed[0])
baseimage[0:len(threshed),alku:loppu,:]=threshed
#And the last one...
alku=loppu+vali
loppu=alku+len(masked[0])
baseimage[0:len(masked),alku:loppu,:]=masked
#And finally let's show the baseimage...
cv2.imshow('Combined', baseimage)
cap.release()
cv2.destroyAllWindows()

How to resize output frame from live stream from youtube using opencv

It's my beginning with coding and this site. I'm working on project, where I want to use openCV, but I've got an issue with that. I need to resize output frame, for recognizing object. I have read, that frame should be in size 416x416, but when I'm trying to release the frame, it's still in regular size.
Here's the code:
import pafy
import youtube_dl
import cv2
import numpy as np
url = "https://www.youtube.com/watch?v=WOn7m0_aYBw"
video = pafy.new(url)
best = video.getbest(preftype="mp4")
cap = cv2.VideoCapture()
cap.open(best.url)
net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg")
classes = []
with open("coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers =[layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
while True:
ret, frame = cap.read()
# if ret == True:
img = cv2.imshow('frame',frame)
#cap.set(cv2.CAP_PROP_FRAME_WIDTH, 416)
#cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 416)
width = 416
height = 416
dim = (width, height)
img = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
print(img.shape)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
blob = cv2.dnn.blobFromImage(img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
print(img.shape) returns correct size, but I think I'm releasing wrong window. How to change this code to releasing window in correct size?
You were showing the frame before resizing
while True:
ret, frame = cap.read()
width = 416
height = 416
dim = (width, height)
img = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
print(img.shape)
cv2.imshow('frame',img)
if cv2.waitKey(20) & 0xFF == ord('q'):
break

OpenCV & Python - Image too big to display

I have an image that is 6400 × 3200, while my screen is 1280 x 800. Therefore, the image needs to be resized for display only. I am using Python and OpenCV 2.4.9.
According to OpenCV Documentation,
If you need to show an image that is bigger than the screen resolution, you will need to call namedWindow("", WINDOW_NORMAL) before the imshow.
That is what I am doing, but the image is not fitted to the screen, only a portion is shown because it's too big. I've also tried with cv2.resizeWindow, but it doesn't make any difference.
import cv2
cv2.namedWindow("output", cv2.WINDOW_NORMAL) # Create window with freedom of dimensions
# cv2.resizeWindow("output", 400, 300) # Resize window to specified dimensions
im = cv2.imread("earth.jpg") # Read image
cv2.imshow("output", im) # Show image
cv2.waitKey(0) # Display the image infinitely until any keypress
Although I was expecting an automatic solution (fitting to the screen automatically), resizing solves the problem as well.
import cv2
cv2.namedWindow("output", cv2.WINDOW_NORMAL) # Create window with freedom of dimensions
im = cv2.imread("earth.jpg") # Read image
imS = cv2.resize(im, (960, 540)) # Resize image
cv2.imshow("output", imS) # Show image
cv2.waitKey(0) # Display the image infinitely until any keypress
The other answers perform a fixed (width, height) resize. If you wanted to resize to a specific size while maintaining aspect ratio, use this
def ResizeWithAspectRatio(image, width=None, height=None, inter=cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
return cv2.resize(image, dim, interpolation=inter)
Example
image = cv2.imread('img.png')
resize = ResizeWithAspectRatio(image, width=1280) # Resize by width OR
# resize = ResizeWithAspectRatio(image, height=1280) # Resize by height
cv2.imshow('resize', resize)
cv2.waitKey()
Use this for example:
cv2.namedWindow('finalImg', cv2.WINDOW_NORMAL)
cv2.imshow("finalImg",finalImg)
The only way resizeWindow worked for me was to have it after imshow. This is the order I'm using:
# Create a Named Window
cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
# Move it to (X,Y)
cv2.moveWindow(win_name, X, Y)
# Show the Image in the Window
cv2.imshow(win_name, image)
# Resize the Window
cv2.resizeWindow(win_name, width, height)
# Wait for <> miliseconds
cv2.waitKey(wait_time)
In OpenCV, cv2.namedWindow() just creates a window object, but doesn't resize the original image. You can use cv2.resize(img, resolution) to solve the problem.
Here's what it displays, a 740 * 411 resolution image.
image = cv2.imread("740*411.jpg")
cv2.imshow("image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
Here, it displays a 100 * 200 resolution image after resizing. Remember the resolution parameter use column first then is row.
image = cv2.imread("740*411.jpg")
image = cv2.resize(image, (200, 100))
cv2.imshow("image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
This code will resize the image so that it can retain it's aspect ratio and only ever take up a specified fraction of the screen area.
It will automatically adjust depending on your screen size and the size of the image.
Use the area variable to change the max screen area you want the image to be able to take up. The example shows it displayed at quarter the screen size.
import cv2
import tkinter as tk
from math import *
img = cv2.imread("test.jpg")
area = 0.25
h, w = img.shape[:2]
root = tk.Tk()
screen_h = root.winfo_screenheight()
screen_w = root.winfo_screenwidth()
vector = sqrt(area)
window_h = screen_h * vector
window_w = screen_w * vector
if h > window_h or w > window_w:
if h / window_h >= w / window_w:
multiplier = window_h / h
else:
multiplier = window_w / w
img = cv2.resize(img, (0, 0), fx=multiplier, fy=multiplier)
cv2.imshow("output", img)
cv2.waitKey(0)
I've also made a similar function where area is still a parameter but so is window height and window width.
If no area is input then it will use a defined height and width (window_h, window_w) of the window size you would like the image to fit inside.
If an input is given for all parameters then 'area' is prioritised.
import cv2
import tkinter as tk
from math import *
def resize_image(img, area=0.0, window_h=0, window_w=0):
h, w = img.shape[:2]
root = tk.Tk()
screen_h = root.winfo_screenheight()
screen_w = root.winfo_screenwidth()
if area != 0.0:
vector = math.sqrt(area)
window_h = screen_h * vector
window_w = screen_w * vector
if h > window_h or w > window_w:
if h / window_h >= w / window_w:
multiplier = window_h / h
else:
multiplier = window_w / w
img = cv2.resize(img, (0, 0), fx=multiplier, fy=multiplier)
return img
# using area
initial_image = cv2.imread("test.jpg")
resized_image = resize_image(initial_image, area=0.25))
cv2.imshow("output", resized_image)
cv2.waitKey(0)
# using window height and width
initial_image = cv2.imread("test.jpg")
resized_image = resize_image(initial_image, window_h = 480, window_w = 270))
cv2.imshow("output", resized_image)
cv2.waitKey(0)
Looks like opencv lib is pretty sensitive to parameters passed to the methods. The following code worked for me using opencv 4.3.0:
win_name = "visualization" # 1. use var to specify window name everywhere
cv2.namedWindow(win_name, cv2.WINDOW_NORMAL) # 2. use 'normal' flag
img = cv2.imread(filename)
h,w = img.shape[:2] # suits for image containing any amount of channels
h = int(h / resize_factor) # one must compute beforehand
w = int(w / resize_factor) # and convert to INT
cv2.resizeWindow(win_name, w, h) # use variables defined/computed BEFOREHAND
cv2.imshow(win_name, img)
Try this:
image = cv2.imread("img/Demo.jpg")
image = cv2.resize(image,(240,240))
The image is now resized. Displaying it will render in 240x240.
The cv2.WINDOW_NORMAL option works correctly but the first time it displays the window in an standard size.
If you resize the window like any other windows in your computer, by position the mouse over the edge of the window you want to resize and then drag the mouse to the position you want. If you do this to both width and height of the window to the size you want to obtain.
The following times you refresh the window, by executing the code, OpenCV will generate the window with the size of the last time it was shown or modified.
Try this code:
img = cv2.imread("Fab2_0.1 X 1.03MM GRID.jpg", cv2.IMREAD_GRAYSCALE)
image_scale_down = 3
x = (int)(img.shape[0]/image_scale_down)
y = (int)(img.shape[1]/image_scale_down)
image = cv2.resize(img, (x,y))
cv2.imshow("image_title", image)
cv2.waitKey(5000)
cv2.destroyAllWindows()
The most upvote answer is perfect !
I just add my code for those who want some "dynamic" resize handling depending of the ratio.
import cv2
from win32api import GetSystemMetrics
def get_resized_for_display_img(img):
screen_w, screen_h = GetSystemMetrics(0), GetSystemMetrics(1)
print("screen size",screen_w, screen_h)
h,w,channel_nbr = img.shape
# img get w of screen and adapt h
h = h * (screen_w / w)
w = screen_w
if h > screen_h: #if img h still too big
# img get h of screen and adapt w
w = w * (screen_h / h)
h = screen_h
w, h = w*0.9, h*0.9 # because you don't want it to be that big, right ?
w, h = int(w), int(h) # you need int for the cv2.resize
return cv2.resize(img, (w, h))
Try this code
img = cv2.resize(img,(1280,800))
Try with this code:
from PIL import Image
Image.fromarray(image).show()

track a image in video and replace with another image using opencv

I have to track a window in a video and need to paste an image on window,I have used camshift to track the window, but it did not track it correct.My window is in brown color so I have given the following color range.
np.array((110,0,0)--lower
np.array((130,255,255)--higher..
I have red many documents in opencv but not able to figure out which method to follow.I am using opencv2.4.9 with python.
Below is the code which I tried.Please help me out to figure out the exact location of window.emphasized text
#!/usr/bin/env python
import numpy as np
import cv2
cap = cv2.VideoCapture("myvideo.mp4")
# take first frame of the video
ret,frame = cap.read()
#print frame
#print ret
# setup initial location of window
r,h,c,w = 157,40,337,40
track_window = (c,r,w,h)
# set up the ROI for tracking
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((110,0,0)), np.array((130,255,255)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[255],[0,255])
cv2.imshow('img2',roi_hist)
#print roi_hist
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by at least 1 pt
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
i = 1
while(1):
ret ,frame = cap.read()
if ret == True:
i += 1
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,255],1)
# apply meanshift to get the new location
ret, track_window = cv2.CamShift(dst, track_window, term_crit)
#print track_window
# Draw it on image
x,y,w,h = track_window
img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
cv2.imshow('img2',frame)
k = cv2.waitKey(200) & 0xff
if k == 27:
break
else:
# print "comes here2";
cv2.imwrite(str(i)+"test.jpg",frame)
#break
else:
break
cv2.destroyAllWindows()
cap.release()

Categories

Resources