Related
I'm trying to save the output imgCanny as an image/screenshot. How do I incorporate this so a screenshot of that window can be captured when a certain key is pressed. I am taking the live feed from the webcam and processing it. I would then like to be able to press a key on the keyboard to capture and save a screenshot of the imgCanny Window.
cap = cv2.VideoCapture(1)
cv2.waitKey(0)
cap.set(3, frameWidth)
cap.set(4, frameHeight)
def empty(a):
pass
cv2.namedWindow("Parameters")
cv2.resizeWindow("Parameters", 640, 240)
cv2.createTrackbar("Threshold1", "Parameters", 150, 500, empty)
cv2.createTrackbar("Threshold2", "Parameters", 255, 500, empty)
def getContours(img, imgContour):
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE) #retreival method. External, only extreme
outer contours
#change APPROX to simple for less points
#remove small bits of noise
for cnt in contours:
area = cv2.contourArea(cnt)
# Used to flatted the array containing
# the co-ordinates of the vertices.
approx = cv2.approxPolyDP(cnt, 0.009 * cv2.arcLength(cnt, True),
True)
n = approx.ravel()
i = 0
for j in n:
if (i % 2 == 0):
x = n[i]
y = n[i + 1]
# String containing the co-ordinates.
string = str(int(x / 3.5)) + " " + str(int(y / 3.5))
print(string)
if (i == 0):
# text on topmost co-ordinate.
cv2.putText(imgContour, "", (x, y),
font, 0.5, (255, 0, 0))
else:
# text on remaining co-ordinates.
cv2.putText(imgContour, string, (x, y),
font, 0.5, (0, 255, 0))
i = i + 1
if area > 3000:
cv2.drawContours(imgContour, cnt, -1, (255, 0, 255), 1)
peri = cv2.arcLength(cnt, True) #true means contour is closed
while True:
success, img = cap.read()
imgContour = img.copy()
imgBlur = cv2.GaussianBlur(img, (31, 31), 1)
imgGray = cv2.cvtColor(imgBlur, cv2.COLOR_BGR2GRAY)
threshold1 = cv2.getTrackbarPos("Threshold1", "Parameters")
threshold2 = cv2.getTrackbarPos("Threshold2", "Parameters")
imgCanny = cv2.Canny(imgGray, threshold1, threshold2)
kernel = np.ones((5, 5))
imgDil = cv2.dilate(imgCanny, kernel, iterations=1)
getContours(imgDil, imgContour)
#cv2.imshow("Results", img )
#cv2.imshow("Mask", imgGray)
cv2.imshow("Canny", imgCanny)
cv2.imshow("Dilated", imgContour)
cv2.imshow("Test", imgDil)
key = cv2.waitKey(100)
if key == 27: #kills with Esc
break
cap.release()
cv2.destroyAllWindows()
I want to run this code on my Raspberry Pi 3. I have used pip install imutils on the Pi but, when I run the code via the CLI, it returns "No module named imutils". I do not wish to use virtual environments. I have cv2 running correctly on the Pi and that works no problem, is there a fix for this imutils problem?
Updating, upgrading, removing imutils but it is needed.
import numpy as np
import cv2
import Person
import time
import imutils
import datetime
cap = cv2.VideoCapture('testVideo.mp4')
fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=True) # Create the background substractor
kernelOp = np.ones((3, 3), np.uint8)
kernelOp1 = np.ones((7, 7), np.uint8)
kernelOp2 = np.ones((5, 5), np.uint8)
kernelCl = np.ones((11, 11), np.uint8)
kernelCl1 = np.ones((20, 20), np.uint8)
kernelCl2 = np.ones((25, 25), np.uint8)
# Variables
font = cv2.FONT_HERSHEY_SIMPLEX
persons = []
max_p_age = 5
pid = 1
areaTH = 5000
w_margin = 50
h_margin = 50
wmax = 500
import pdb;
pdb.set_trace() # debuginimo pradzia
# Atvaizdavimo kintamieji
cnt_up = 0
cnt_down = 0
line_down_color = (255, 0, 0)
line_up_color = (0, 0, 255)
pts_L1 = np.array([[0, 320], [480, 320]])
pts_L2 = np.array([[0, 400], [480, 400]])
counter = 0
while (cap.isOpened()):
ret, frame = cap.read() # read a frame
frame = imutils.resize(frame, width=min(640, frame.shape[1]))
fgmask = fgbg.apply(frame) # Use the substractor
try:
ret, imBin = cv2.threshold(fgmask, 200, 255, cv2.THRESH_BINARY)
mask0 = cv2.morphologyEx(imBin, cv2.MORPH_OPEN, kernelOp2)
mask = cv2.morphologyEx(mask0, cv2.MORPH_CLOSE, kernelCl2)
except:
# if there are no more frames to show...
print('EOF')
break
maskOriginal = mask
_, contours0, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
########if contour is too big cut in half
mask2_flag = 0
for cnt in contours0:
area = cv2.contourArea(cnt)
if area > areaTH:
M = cv2.moments(cnt)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
x, y, w, h = cv2.boundingRect(cnt)
if w > wmax:
mask2 = cv2.line(mask, ((x + w / 2), 0), ((x + w / 2), 640), (0, 0, 0), 10)
mask2_flag = 1
if mask2_flag == 0:
mask2 = mask
cv2.imshow('Mask line', mask2)
cv2.imshow('mask to open', mask0)
cv2.imshow('Mask initialize', maskOriginal)
cv2.imshow('initial subtraction', imBin)
_, contours0, hierarchy = cv2.findContours(mask2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours0:
cv2.drawContours(frame, cnt, -1, (0, 255, 0), 3, 8)
area = cv2.contourArea(cnt)
for i in persons:
i.updateDingimas(i.getDingimas() + 1)
if i.getDingimas() > 25:
persons.remove(i)
if area > areaTH:
M = cv2.moments(cnt)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
x, y, w, h = cv2.boundingRect(cnt)
print('x{} y{} w{} h{}'.format(x, y, w, h))
new = True
for i in persons:
if abs(x - i.getX()) <= w_margin and abs(y - i.getY()) <= h_margin:
new = False
i.updateCoords(cx, cy)
i.updateDingimas(0)
break
if new == True:
p = Person.MyPerson(pid, cx, cy, max_p_age)
persons.append(p)
pid += 1
cv2.circle(frame, (cx, cy), 5, (0, 0, 255), -1)
img = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.drawContours(frame, cnt, -1, (0, 255, 0), 3)
cv2.imshow('img', img)
#########################
# Trajectory rendering
#########################
for i in persons:
if len(i.getTracks()) >= 2:
pts = np.array(i.getTracks(), np.int32)
pts = pts.reshape((-1, 1, 2))
frame = cv2.polylines(frame, [pts], False, i.getRGB())
if i.getDir() == None:
i.kurEina(pts_L2[0, 1], pts_L1[0, 1])
if i.getDir() == 'up':
cnt_up += 1
print('Timestamp: {:%H:%M:%S} UP {}'.format(datetime.datetime.now(), cnt_up))
elif i.getDir() == 'down':
cnt_down += 1
print('Timestamp: {:%H:%M:%S} DOWN {}'.format(datetime.datetime.now(), cnt_down))
cv2.putText(frame, str(i.getId()), (i.getX(), i.getY()), font, 0.7, i.getRGB(), 1, cv2.LINE_AA)
#########################
# Rendering
#########################
str_in = 'In: ' + str(cnt_up)
str_out = 'Out: ' + str(cnt_down)
frame = cv2.polylines(frame, [pts_L1], False, line_down_color, thickness=4)
frame = cv2.polylines(frame, [pts_L2], False, line_up_color, thickness=4)
cv2.putText(frame, str_in, (10, 50), font, 1, (0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(frame, str_out, (10, 100), font, 1, (255, 0, 0), 2, cv2.LINE_AA)
cv2.imshow('Frame', frame)
# Abort and exit with 'Q' or ESC
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release() # release video file
cv2.destroyAllWindows() # close all openCV windows
I want to run this code without "No module named imutils" error.
If you intend to use the module with Python 3, you need to install it with pip3 so that it is installed in the correct location.
i stuck in below code please help me to solve error which is WARN:0 terminating async callback error in cv2
# this is a client program which run on client side
import cv2
import socket
import numpy as np
import math
import time
try:
start_time = time.time()
state1 = "off"
state2 = "off"
state3 = "off"
mode = "on"
host = "192.168.0.106" # socket which acording server in our case #ip address of rapberry Pi
port = 9345
mySocket = socket.socket()
mySocket.connect((host,port))
cap = cv2.VideoCapture(0)
while(cap.isOpened()):
# read image
ret, img = cap.read()
# get hand data from the rectangle sub window on the screen
cv2.rectangle(img, (300,300), (100,100), (0,255,0),0)
crop_img = img[100:300, 100:300]
# convert to grayscale
grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
# applying gaussian blur
value = (35, 35)
blurred = cv2.GaussianBlur(grey, value, 0)
# thresholdin: Otsu's Binarization method
_, thresh1 = cv2.threshold(blurred, 127, 255,
cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# show thresholded image
cv2.imshow('Thresholded', thresh1)
# check OpenCV version to avoid unpacking error
(version, _, _) = cv2.__version__.split('.')
if version == '3':
image, contours, hierarchy = cv2.findContours(thresh1.copy(), \
cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
elif version == '2':
contours, hierarchy = cv2.findContours(thresh1.copy(),cv2.RETR_TREE, \
cv2.CHAIN_APPROX_NONE)
# find contour with max area
cnt = max(contours, key = lambda x: cv2.contourArea(x))
# create bounding rectangle around the contour (can skip below two lines)
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(crop_img, (x, y), (x+w, y+h), (0, 0, 255), 0)
# finding convex hull
hull = cv2.convexHull(cnt)
# drawing contours
drawing = np.zeros(crop_img.shape,np.uint8)
cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 0)
cv2.drawContours(drawing, [hull], 0,(0, 0, 255), 0)
# finding convex hull
hull = cv2.convexHull(cnt, returnPoints=False)
# finding convexity defects
defects = cv2.convexityDefects(cnt, hull)
count_defects = 0
cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3)
# applying Cosine Rule to find angle for all defects (between fingers)
# with angle > 90 degrees and ignore defects
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
# find length of all sides of triangle
a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)
c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
# apply cosine rule here
angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57
# ignore angles > 90 and highlight rest with red dots
if angle <= 90:
count_defects += 1
cv2.circle(crop_img, far, 1, [0,0,255], -1)
#dist = cv2.pointPolygonTest(cnt,far,True)
# draw a line from start to end i.e. the convex points (finger tips)
# (can skip this part)
cv2.line(crop_img,start, end, [0,255,0], 2)
#cv2.circle(crop_img,far,5,[0,0,255],-1)
# define actions required
if count_defects == 1:
if (time.time()>start_time+2):
if mode == "on":
mySocket.send("on_led1".encode())
state1 = "on"
print("led 1 is on")
else:
mySocket.send("off_led1".encode())
state1 = "off"
print("led 1 is off")
start_time = time.time()
cv2.putText(img, "led 1 is "+state1, (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
elif count_defects == 2:
if (time.time()>start_time+2):
if mode == "on":
mySocket.send("on_led2".encode())
state2 = "on"
print("led 2 is on")
else:
mySocket.send("off_led2".encode())
state2 = "off"
print("led 2 is off")
start_time = time.time()
cv2.putText(img, "led 2 is "+state2, (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
elif count_defects == 3:
if (time.time()>start_time+2):
if mode == "on":
mySocket.send("on_led3".encode())
state3 = "on"
print("led 3 is on")
else:
mySocket.send("off_led3".encode())
state3 = "off"
print("led 3 is off")
start_time = time.time()
cv2.putText(img, "led 3 is "+state3, (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
elif count_defects == 4:
cv2.putText(img,"mode is "+mode, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
if(time.time() > start_time+2):
if mode == "on":
mode = "off"
else:
mode = "on"
start_time = time.time()
print(mode)
else:
cv2.putText(img, "use your fingure for turn On/Off lights current mode is "+mode, (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
# show appropriate images in windows
cv2.imshow('Gesture', img)
all_img = np.hstack((drawing, crop_img))
cv2.imshow('Contours', all_img)
k = cv2.waitKey(10)
if k == 27:
break
mySocket.close()
except:
mySocket.send("close_all".encode())
mySocket.close()
above is a program which i created for accessing embedded device using cv2 all things was working properly but still i get an error which [ WARN:0] terminating async callback i also use camera.release() cv2.destroyAllWindows() function but it is not work , any help would be appreciated
i also try this suggestionsuggestion but still it is not working i am using windows 10 operating system
here is full code with explanation code
This warning come from MSMF backend. Try:
cap = cv2.VideoCapture(cv2.CAP_DSHOW)
or
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
i find problem in your code. maybe you was updated your cv2 library to version 4. that's why your both condition, which given below is not satisfied.
if version == '3':
image, contours, hierarchy = cv2.findContours(thresh1.copy(), \
cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
elif version == '2':
contours, hierarchy = cv2.findContours(thresh1.copy(),cv2.RETR_TREE, \
cv2.CHAIN_APPROX_NONE)
you should remove elif version == '2': and simply use else: it might be help.
I have this code that I downloaded from GitHub to use in an OpenCV project. Everything worked fine the first time, but after that it won't open, and it keeps showing me the following error at line 8:
Traceback (most recent call last):
File "hand.py", line 8, in <module>
crop_img = img[100:300,100:300]
TypeError: 'NoneType' object is not subscriptable
Here is the code:
import cv2
import numpy as np
import math
cap = cv2.VideoCapture(0)
while(cap.isOpened()):
ret, img = cap.read()
cv2.rectangle(img,(300,300),(100,100),(0,255,0),0)
crop_img = img[100:300,100:300]
grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
value = (35, 35)
blurred = cv2.GaussianBlur(grey, value, 0)
_, thresh1 = cv2.threshold(blurred, 127, 255,
cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
cv2.imshow('Thresholded', thresh1)
contours, hierarchy = cv2.findContours(thresh1.copy(),cv2.RETR_TREE, \
cv2.CHAIN_APPROX_NONE)
max_area = -1
for i in range(len(contours)):
cnt=contours[i]
area = cv2.contourArea(cnt)
if(area>max_area):
max_area=area
ci=i
cnt=contours[ci]
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(crop_img,(x,y),(x+w,y+h),(0,0,255),0)
hull = cv2.convexHull(cnt)
drawing = np.zeros(crop_img.shape,np.uint8)
cv2.drawContours(drawing,[cnt],0,(0,255,0),0)
cv2.drawContours(drawing,[hull],0,(0,0,255),0)
hull = cv2.convexHull(cnt,returnPoints = False)
defects = cv2.convexityDefects(cnt,hull)
count_defects = 0
cv2.drawContours(thresh1, contours, -1, (0,255,0), 3)
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)
c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57
if angle <= 90:
count_defects += 1
cv2.circle(crop_img,far,1,[0,0,255],-1)
#dist = cv2.pointPolygonTest(cnt,far,True)
cv2.line(crop_img,start,end,[0,255,0],2)
#cv2.circle(crop_img,far,5,[0,0,255],-1)
if count_defects == 1:
cv2.putText(img,"this is 2", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
elif count_defects == 2:
str = "this is 3 !!!"
cv2.putText(img, str, (5,50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
elif count_defects == 3:
cv2.putText(img,"This is 4 :P", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
elif count_defects == 4:
cv2.putText(img,"this is 5 !!!", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
else:
cv2.putText(img,"this is 0 !!!", (50,50),\
cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
#cv2.imshow('drawing', drawing)
#cv2.imshow('end', crop_img)
cv2.imshow('Gesture', img)
all_img = np.hstack((drawing, crop_img))
cv2.imshow('Contours', all_img)
k = cv2.waitKey(10)
if k == 27:
break
I found the solution , it appears that some crappy video drivers return an invalid 1st frame. all i did is check ret and continue if it's false , it's working fine .
The exception indicates that cap.read returned None to img. You should look out for that:
cap = cv2.VideoCapture(0)
while(cap.isOpened()):
ret, img = cap.read()
if not ret: break
The documentations say:
If no frames has been grabbed (camera has been disconnected, or there are no more frames in video file), the methods return false and the functions return NULL pointer.
So if you are using a USB webcam, make sure that your USB connection is stable. In particular, if you are using any USB hubs/extensions or your port is in a bad shape.
Hi in the code below I'm making frame difference and then I want to
find contours that appears from frame difference and drwa them whenever they appear
the code below is working well in differentiating but my problem is with contours I can't draw them when they appear . When I run it this error message appears :
cnt=contours[0] :index error list index out of range
I have used cnt=contours[0] in previous examples it works but I don't know
what is the problem here . Thanks for help
import cv2
import sys
import time
import numpy as np
BLUR_SIZE = 3
NOISE_CUTOFF = 12
cam = cv2.VideoCapture(0)
cam.set(3,640)
cam.set(4,480)
window_name = "delta view"
window_name_now = "now view"
frame_now = cam.read()[1]
frame_now = cam.read()[1]
frame_now = cv2.cvtColor(frame_now, cv2.COLOR_RGB2GRAY)
frame_now = cv2.blur(frame_now, (BLUR_SIZE, BLUR_SIZE))
frame_prior = frame_now
delta_count_last = 1
while True:
frame_delta = cv2.absdiff(frame_prior, frame_now)
frame_delta = cv2.threshold(frame_delta, NOISE_CUTOFF, 255, 3)[1]
delta_count = cv2.countNonZero(frame_delta)
cv2.normalize(frame_delta, frame_delta, 0, 255, cv2.NORM_MINMAX)
frame_delta = cv2.flip(frame_delta, 1)
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(frame_delta, cv2.MORPH_OPEN, kernel)
cont_img = opening.copy()
image,contours, hierarchy = cv2.findContours(cont_img, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnt=contours[0]
for cnt in contours:
area = cv2.contourArea(cnt)
if area < 2000 or area > 4000:
continue
if len(cnt) < 5:
continue
ellipse = cv2.fitEllipse(cnt)
cv2.ellipse(roi, ellipse, (0,255,0), 2)
cv2.imshow('Contours', roi)
cv2.putText(frame_delta, "DELTA: %d" % (delta_count),
(5, 15), cv2.FONT_HERSHEY_PLAIN, 0.8, (255, 255, 255))
cv2.imshow(window_name, opening)
#frame_delta = cv2.threshold(frame_delta, 92, 255, 0)[1]
dst = cv2.flip(frame_now, 1)
dst = cv2.addWeighted(dst,1.0, frame_delta,0.9,0)
cv2.imshow(window_name_now, dst)
if (delta_count_last == 0 and delta_count != 0):
sys.stdout.write("MOVEMENT %f\n" % time.time())
sys.stdout.flush()
elif delta_count_last != 0 and delta_count == 0:
sys.stdout.write("STILL %f\n" % time.time())
sys.stdout.flush()
delta_count_last = delta_count
frame_prior = frame_now
frame_now = cam.read()[1]
frame_now = cv2.cvtColor(frame_now, cv2.COLOR_RGB2GRAY)
frame_now = cv2.blur(frame_now, (BLUR_SIZE, BLUR_SIZE))
key = cv2.waitKey(10)
if key == 0x1b or key == ord('q'):
cv2.destroyWindow(window_name)
break