openCV matching two object python and tracking - python

I have image with my object
and my object
when i execute my code
import cv2
import numpy as np
cap = cv2.VideoCapture("c.mp4")
while(True):
ret, img_rgb = cap.read()
img_rgb = img_rgb[400:1200,10:1000]
img_rgb = cv2.imread('ccc.jpg')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('job/ff2.jpg',0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
threshold = 0.8
loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
print("yes")
cv2.rectangle(img_gray, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
cv2.imshow('res.png',img_gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.waitKey(0)
cap.release()
cv2.destroyAllWindows()
My object is detected
but when i use video its not find my object in video https://youtu.be/O1IB0dnDrWw
Any help please

Related

How to capture photos with cv2.imwrite()?

cv2.imwrite() is not working. I am trying for taking 100 photos when face will be detected.
Here is the code given:
import cv2
import datetime
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
while True:
_,frame = cap.read()
greyImg = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
face = face_cascade.detectMultiScale(greyImg,1.3,5)
for x,y,w,h in face:
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0),5)
time_stamp = datetime.datetime.now().strftime("%D-%m-%Y")
file_name = f"{time_stamp}-face.jpg"
for i in range(100):
cv2.imwrite(file_name,greyImg)
cv2.imshow("Face recogniton", cv2.flip(frame,1))
if cv2.waitKey(1) == ord("q"):
break
The main bug in your code that you are looping the 100 times saving the same image.
This code is supposed to solve your issue:
import cv2
from datetime import datetime
import numpy as np
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
faces_counter: int = 0
while True:
_,frame = cap.read()
greyImg = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
face = face_cascade.detectMultiScale(greyImg,1.3,5)
if np.any(face):
faces_counter += 1
if faces_counter > 100:
break
for x,y,w,h in face:
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0),5)
now = datetime.now()
current_time = now.strftime("%H_%M_%S")
file_name = f"Img_{current_time}_{faces_counter}-face.png"
cv2.imwrite(f"{str(file_name)}", frame)
cv2.imshow("Face recogniton", cv2.flip(frame,1))
if cv2.waitKey(1) == ord("q"):
break

I'm trying a project with OpenCV and can't get past this error

cv2.error: OpenCV(4.4.0) C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-2y91i_7w\opencv\modules\objdetect\src\cascadedetect.cpp:1689: error: (-215:Assertion failed) !empty() in function 'cv::CascadeClassifier::detectMultiScale'
[ WARN:0] global C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-2y91i_7w\opencv\modules\videoio\src\cap_msmf.cpp (435) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
this is the error i'm facing
below is my code
import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
smile_cascade = cv2.CascadeClassifier('haarcascade_smile.xml')
def detect(gray,frame):
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(frame, (x,y), ((x+w),(y+h)), (2555,0,0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x +w]
smiles = smile_cascade.detectMultiScale(roi_gray, 1.8,20)
for (sx,sy,sw,sh) in smiles:
cv2.rectangle(roi_color ,(sx,sy), ((sx + sw) , (sy + sh)),(0,0,225),2)
return frame
video_capture = cv2.VideoCapture(0)
while True:
_, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
canvas = detect(gray, frame)
cv2.imshow('Video', canvas)
if cv2.waitkey(1) & xff == qrd('q'):
break
video_capture.release()
cv2.destroyAllWindows()
I got a different error and fixed the indentation of the return line of the detect() method, see the comment.
Also, there were some errors with waytkey() function, which actually is waitKey().
This should work (at least it does on my machine):
import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
smile_cascade = cv2.CascadeClassifier('haarcascade_smile.xml')
def detect(gray, frame):
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(frame, (x,y), ((x+w),(y+h)), (2555,0,0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x +w]
smiles = smile_cascade.detectMultiScale(roi_gray, 1.8,20)
for (sx,sy,sw,sh) in smiles:
cv2.rectangle(roi_color ,(sx,sy), ((sx + sw) , (sy + sh)),(0,0,225),2)
return frame # << outdent
video_capture = cv2.VideoCapture(0)
while True:
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
canvas = detect(gray, frame)
cv2.imshow('Video', canvas)
# changed here below the waitKey() and added ret:
keypressed = cv2.waitKey(10)
if keypressed == ord('q') or not ret:
break
video_capture.release()
cv2.destroyAllWindows()

OpenCV template matching does not work if the object is moved farther or closer to the camera

I am working with template matching but i change picture distance program doesn't work
Codes
# -- coding: cp1254 --
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
template = cv2.imread('template.jpg',0)
w, h = template.shape[::-1]
x,y = 0,0
while(True):
ret, frame = cap.read()
i= cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
res = cv2.matchTemplate(i,template,3)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
x = (top_left[0] + bottom_right[0])/2
y = (top_left[1] + bottom_right[1])/2
#Draw Rectangle Match
if x>0:
p1=open('kn.txt','w')
p1.write('p1')
print "p1"
p1.flush()
p1.close()
cv2.rectangle(frame, top_left, bottom_right, (255, 125, 125), 3)
cv2.putText(frame, "["+ str(x)+","+str(y)+"]", (bottom_right[0]+10,bottom_right[1]+10), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,125,0), 2)
cv2.imshow('cikis',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
break
cv2.destroyAllWindows()

python cv2 BackgroundSubtractor imshow function error

problem
I want to detect the motional object in a video, but the cap seems not to read the video. I use conda jupyter and have install the ffmpeg with brew and I use macOS. Still, I am unable to capture the video.
code:
import numpy as np
import cv2
import time
cap = cv2.VideoCapture('test.avi')
time.sleep(2)
fgbg = cv2.createBackgroundSubtractorKNN(detectShadows=True)
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv2.imshow('original',frame)
cv2.imshow('fg',fgmask)
k = cv2.waitKey(30) & 0xff
for c in contours:
# 获取矩形框边界坐标
x, y, w, h = cv2.boundingRect(c)
# 计算矩形框的面积
area = cv2.contourArea(c)
if 500 < area < 3000:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow("detection", frame)
cv2.imshow("back", dilated)
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
error
Couldn't read video stream from file "test.avi"

object tracking by python and opencv

my code is
import numpy as np
import cv2
cap = cv2.VideoCapture("slow.flv")
ret,frame = cap.read()
r,h,c,w = 250,90,400,125 # simply hardcoded the values
track_window = (c,r,w,h)
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)),
np.array((180.,255.,255.)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret ,frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
ret, track_window = cv2.meanShift(dst, track_window, term_crit)
x,y,w,h = track_window
img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
cv2.imshow("img2",img2)
k = cv2.waitKey(60) & 0xff
if k == 27:
break
else:
cv2.imwrite(chr(k)+".jpg",img2)
else:
break
cv2.destroyAllWindows()
cap.release()
the error is:
File "objecttracking.py", line 10, in
roi = frame[r:r+h, c:c+w]
TypeError: 'NoneType' object has no attribute 'getitem'
It would appear that cap.read() is returning a None for the value you're storing to frame. Thus when you try and index into frame with frame[r:r+h, c:c+w] you get an error.

Categories

Resources