my code is
import numpy as np
import cv2
cap = cv2.VideoCapture("slow.flv")
ret,frame = cap.read()
r,h,c,w = 250,90,400,125 # simply hardcoded the values
track_window = (c,r,w,h)
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)),
np.array((180.,255.,255.)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret ,frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
ret, track_window = cv2.meanShift(dst, track_window, term_crit)
x,y,w,h = track_window
img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
cv2.imshow("img2",img2)
k = cv2.waitKey(60) & 0xff
if k == 27:
break
else:
cv2.imwrite(chr(k)+".jpg",img2)
else:
break
cv2.destroyAllWindows()
cap.release()
the error is:
File "objecttracking.py", line 10, in
roi = frame[r:r+h, c:c+w]
TypeError: 'NoneType' object has no attribute 'getitem'
It would appear that cap.read() is returning a None for the value you're storing to frame. Thus when you try and index into frame with frame[r:r+h, c:c+w] you get an error.
Related
cv2.imwrite() is not working. I am trying for taking 100 photos when face will be detected.
Here is the code given:
import cv2
import datetime
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
while True:
_,frame = cap.read()
greyImg = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
face = face_cascade.detectMultiScale(greyImg,1.3,5)
for x,y,w,h in face:
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0),5)
time_stamp = datetime.datetime.now().strftime("%D-%m-%Y")
file_name = f"{time_stamp}-face.jpg"
for i in range(100):
cv2.imwrite(file_name,greyImg)
cv2.imshow("Face recogniton", cv2.flip(frame,1))
if cv2.waitKey(1) == ord("q"):
break
The main bug in your code that you are looping the 100 times saving the same image.
This code is supposed to solve your issue:
import cv2
from datetime import datetime
import numpy as np
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
faces_counter: int = 0
while True:
_,frame = cap.read()
greyImg = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
face = face_cascade.detectMultiScale(greyImg,1.3,5)
if np.any(face):
faces_counter += 1
if faces_counter > 100:
break
for x,y,w,h in face:
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0),5)
now = datetime.now()
current_time = now.strftime("%H_%M_%S")
file_name = f"Img_{current_time}_{faces_counter}-face.png"
cv2.imwrite(f"{str(file_name)}", frame)
cv2.imshow("Face recogniton", cv2.flip(frame,1))
if cv2.waitKey(1) == ord("q"):
break
I need some help here plz :)
I have this piece of code with open cv:
import cv2
import numpy as np
import time
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
low_red = np.array([100, 100, 100])
high_red = np.array([225, 225, 225])
mask = cv2.inRange(frame, low_red, high_red)
hasColor = np.sum(mask)
if hasColor > 1000000:
print(f'Hand opened, infos : {hasColor}')
elif hasColor > 500000 and hasColor < 1000000:
print(f'Hand closed, infos : {hasColor}')
cv2.imshow("Camera", frame)
cv2.imshow("Mask", mask)
if cv2.waitKey(1) & 0xFF == ord('e'):
break
And I would like that it detect only the objects of a certain size (like a hand) and not the smaller ones.
Thanks for help :)
EDIT: Made some good progress but still don't know how to get the size
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
low = np.array([100, 100, 100])
high = np.array([225, 225, 225])
mask = cv2.inRange(frame, low, high)
cv2.imshow("Camera", frame)
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
ret, tresh = cv2.threshold(gray, 127, 255, 0)
contours, hierarchy = cv2.findContours(tresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for i in range(len(contours)):
x,y,w,h = cv2.boundingRect(contours[i])
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow("Mask", mask)
cv2.imshow("Hull", frame)
if cv2.waitKey(1) & 0xFF == ord('e'):
break
So I think I have to get the value of the bounding box but idk how
Problem solved thanks to a kind human,
I just had to add the following code in the for loop:
if h > 150 and w > 150:
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
I just had to check for the size, thanks for your help !
The code shows correct image, but show error message after image 'frame' playback. so I couldn't get 'res' image
It just shows me 'No Object Files' error message.
Which part should I fix to make it work?
import cv2
import numpy as np
cap = cv2.VideoCapture('ObjectTrack.mp4')
while cap.isOpened():
ret, frame = cap.read()
if not ret:
print("No Object Files")
break
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_orange = np.array([100,200,200])
upper_orange = np.array([140,255,255])
mask_orange = cv2.inRange(hsv, lower_orange, upper_orange)
res = cv2.bitwise_and(frame,frame,mask = mask_orange)
cv2.imshow('frame',frame)
cv2.imshow('res',res)
if cv2.waitKey(50) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
The reason behind your error is that the frame is None(Null). and your code enters into this if
if not ret:
print("No Object Files")
break
and then gets out of the while loop ( while cap.isOpened(): ... ).
Just change the indentation and also the if condition
like this:
while cap.isOpened():
ret, frame = cap.read()
if ret: # if frame is not None:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_orange = np.array([100, 200, 200])
upper_orange = np.array([140, 255, 255])
mask_orange = cv2.inRange(hsv, lower_orange, upper_orange)
res = cv2.bitwise_and(frame, frame, mask=mask_orange)
cv2.imshow('frame', frame)
cv2.imshow('res', res)
if cv2.waitKey(50) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
The reason is further discussed here.
I have image with my object
and my object
when i execute my code
import cv2
import numpy as np
cap = cv2.VideoCapture("c.mp4")
while(True):
ret, img_rgb = cap.read()
img_rgb = img_rgb[400:1200,10:1000]
img_rgb = cv2.imread('ccc.jpg')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('job/ff2.jpg',0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
threshold = 0.8
loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
print("yes")
cv2.rectangle(img_gray, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
cv2.imshow('res.png',img_gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.waitKey(0)
cap.release()
cv2.destroyAllWindows()
My object is detected
but when i use video its not find my object in video https://youtu.be/O1IB0dnDrWw
Any help please
I have to track a window in a video and need to paste an image on window,I have used camshift to track the window, but it did not track it correct.My window is in brown color so I have given the following color range.
np.array((110,0,0)--lower
np.array((130,255,255)--higher..
I have red many documents in opencv but not able to figure out which method to follow.I am using opencv2.4.9 with python.
Below is the code which I tried.Please help me out to figure out the exact location of window.emphasized text
#!/usr/bin/env python
import numpy as np
import cv2
cap = cv2.VideoCapture("myvideo.mp4")
# take first frame of the video
ret,frame = cap.read()
#print frame
#print ret
# setup initial location of window
r,h,c,w = 157,40,337,40
track_window = (c,r,w,h)
# set up the ROI for tracking
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((110,0,0)), np.array((130,255,255)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[255],[0,255])
cv2.imshow('img2',roi_hist)
#print roi_hist
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by at least 1 pt
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
i = 1
while(1):
ret ,frame = cap.read()
if ret == True:
i += 1
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,255],1)
# apply meanshift to get the new location
ret, track_window = cv2.CamShift(dst, track_window, term_crit)
#print track_window
# Draw it on image
x,y,w,h = track_window
img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
cv2.imshow('img2',frame)
k = cv2.waitKey(200) & 0xff
if k == 27:
break
else:
# print "comes here2";
cv2.imwrite(str(i)+"test.jpg",frame)
#break
else:
break
cv2.destroyAllWindows()
cap.release()