working with opencv-python for a tracking project. i get this error when calling the estiamte single marker..any idea?
cv2 version 3.2.0-dev
python version 2.7.12
Traceback (most recent call last):
File "test_aruco_detect.py", line 53, in <module>
rvecs , tvecs = aruco.estimatePoseSingleMarkers(corners, 0.1765, cameraMatrix, distCoeffs)
ValueError: too many values to unpack
the code:
import numpy as np
import cv2
import cv2.aruco as aruco
with np.load('/home/odroid/camera_calibration/testfile.npz') as X:
cameraMatrix,distCoeffs = [X[i] for i in ('arr_0','arr_1')]
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
#print(frame.shape) #480x640
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters = aruco.DetectorParameters_create()
#lists of ids and the corners beloning to each id
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
if ids != None: #if atleast one id is detected
gray = aruco.drawDetectedMarkers(gray, corners,ids)
rvecs , tvecs = aruco.estimatePoseSingleMarkers(corners, 0.1765, cameraMatrix, distCoeffs)
aruco.drawAxis(gray, cameraMatrix, distCoeffs, rvecs, tvecs)
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
rvecs , tvecs, _objPoints = aruco.estimatePoseSingleMarkers(...)
edit:
according to the documentation (http://docs.opencv.org/trunk/d9/d6a/group__aruco.html#ga84dd2e88f3e8c3255eb78e0f79571bd1) i was missing the _objPoints
Related
I'm going to run an openCV people counting tutorial code. tutorials are available here. https://fedemejia.com/?p=68
but while i'm running the following code by cmd. I'm getting this error
File "VideoCapture.py", line 25, in
_, contours0, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
ValueError: not enough values to unpack (expected 3, got 2)
my python version is 3.6.8
opencv 4.0.0
import numpy as np
import cv2
cap = cv2.VideoCapture('peopleCounter.avi') #Open video file
fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows = True) #Create the background substractor
kernelOp = np.ones((3,3),np.uint8)
kernelCl = np.ones((11,11),np.uint8)
areaTH = 500
while(cap.isOpened()):
ret, frame = cap.read() #read a frame
fgmask = fgbg.apply(frame) #Use the substractor
try:
ret,imBin= cv2.threshold(fgmask,200,255,cv2.THRESH_BINARY)
#Opening (erode->dilate) para quitar ruido.
mask = cv2.morphologyEx(imBin, cv2.MORPH_OPEN, kernelOp)
#Closing (dilate -> erode) para juntar regiones blancas.
mask = cv2.morphologyEx(mask , cv2.MORPH_CLOSE, kernelCl)
except:
#if there are no more frames to show...
print('EOF')
break
_, contours0, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
for cnt in contours0:
cv2.drawContours(frame, cnt, -1, (0,255,0), 3, 8)
area = cv2.contourArea(cnt)
print area
if area > areaTH:
#################
# TRACKING #
#################
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
x,y,w,h = cv2.boundingRect(cnt)
cv2.circle(frame,(cx,cy), 5, (0,0,255), -1)
img = cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('Frame',frame)
#Abort and exit with 'Q' or ESC
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release() #release video file
cv2.destroyAllWindows() #close all openCV windows
It was supposed to give the result describe in the tutorial.
In OpenCV 4.0, cv2.findContour() returns only 2 values. So it should be contours0, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE). See here https://docs.opencv.org/master/df/d0d/tutorial_find_contours.html
I wrote a code that takes a video and recognizes face of a human and calculate the distance of the human from the camera (which is the eye of a robot). I need the data in a txt format so I am writing it in the distance_data.txt but my file is always empty.
I had run the code in a mac computer and it worked perfectly fine and the file was not empty.
import cv2
import time
import numpy as np
person_cascade = cv2.CascadeClassifier('human_recognition.txt')
cap = cv2.VideoCapture('oct23_2.avi')
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('oct23_2out.avi',fourcc, 20.0, (640,360))
distance_data = open('distance_data.txt','w')
timer=-1
while (cap.isOpened()):
r, frame = cap.read()
if r:
frame = cv2.resize(frame,(640,360)) # Downscale to improve frame rate
gray_frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) # Haar-cascade classifier needs a grayscale image
cv2.imshow('gray',gray_frame)
#print(gray_frame.type())
rects = person_cascade.detectMultiScale(gray_frame)
max=0
#print(rects)
(x, y, w, h) = rects[0]
cv2.rectangle(frame, (x,y), (x+w ,y+h),(0,255,0),2)
#fshape = frame.shape
#fheight = fshape[0]
#fwidth = fshape[1]
timer=timer+1
xpos=(0.03421)*(w*w)-8.1183*w+521.841
yn=360-y
heightinvid=0.0197377*yn-0.0194
realhieght=((-0.4901)*(heightinvid* heightinvid)+4.4269*heightinvid+8.30356)/ 0.927183854
horizontalp=x+w/2
#print(timer+1 , ",", xpos, )
#if (xpos<=41 and xpos>=40):
#print("height of the human is ", realhieght+149, "or", (-0.000184)*yn*yn+0.08506*yn+8.43461+149)
cm_deviation_in_x=-0.02033898*(320-horizontalp)
#print("Horizontal position of human is ", cm_deviation_in_x, " cm.")
newxpos = format (xpos, '.3f')
distance_data.write("Hi")
distance_data.write(str(newxpos))
distance_data.write(',')
distance_data.write(str(format(cm_deviation_in_x, '.3f')))
distance_data.write("\r\n")
#print(w)
#print("x position of the human is = ",xpos)
#fourcc = cv2.VideoWriter_fourcc(*'XVID')
#out = cv2.VideoWriter('output25.avi',fourcc, 20.0, (fwidth,fheight))
cv2.imshow("preview", frame)
out.write(frame)
k = cv2.waitKey(10)
#print(w,h)
if k & 0xFF == ord("q"): # Exit condition
break
distance_data.flush()
cap.release()
cv2.destroyAllWindows()
I am trying to a write a python3-opencv3 code to read the color video and convert it to grayscale and save it back.
(Trying exercises to learn python & opencv)
As I am working in ubuntu, I found out cv2.VideoCapture isColor flag will not work (it works only windows)
import numpy as np
import cv2
cap = cv2.VideoCapture('output.avi')
ret, frame = cap.read()
print('ret =', ret, 'W =', frame.shape[1], 'H =', frame.shape[0], 'channel =', frame.shape[2])
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
height, width = gray.shape[:2]
print(height,width)
cv2.imshow('frame',gray)
FPS= 20.0
FrameSize=(frame.shape[1], frame.shape[0])
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter('Video_output.avi', fourcc, FPS, (width,height))
while(ret):
ret, frame = cap.read()
if cv2.waitKey(1) & 0xFF == ord('q') or ret == False:
break
print('ret =', ret, 'W =', frame.shape[1], 'H =', frame.shape[0], 'channel =', frame.shape[2])
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Save the video
out.write(gray)
cv2.imshow('frame',gray)
cap.release()
out.release()
cv2.destroyAllWindows()
It is give me this
error: (-215) img.cols == width && img.rows == height*3 in function write
I am guessing, the problem is with framesize and grayscale image conversion but I am not able to figure out ? I have tried different combination of height & widht but none are able execute the program correctly.
Can someone help ?
As requested in the comments:
Traceback (most recent call last):
File "/home/akash/Coded/VideoCode/Test3", line 70, in <module>
out.write(gray)
cv2.error: /home/travis/miniconda/conda-bld/conda_1486587069159/work/opencv-3.1.0/modules/videoio/src/cap_mjpeg_encoder.cpp:834: error: (-215) img.cols == width && img.rows == height*3 in function write
OpenCV error -215 means "assertion failed". The assertion itself is listed in the message (the phrase "Assertion failed" itself probably should, too; you can open a ticket about this in OpenCV's bug tracker). (Update: my pull request with this change was accepted on 06.03 and was released in opencv 3.4.2.)
As one can deduce from the assertion, it expects the frame to be a 3-color-channel one. To make a grayscale video, you need to pass isColor=False to VideoWriter constructor:
out = cv2.VideoWriter('Video_output.avi', fourcc, FPS, (width,height), False)
I am trying to run the following code on my Raspberry Pi, but it is giving me this error:
Traceback (most recent call last):
File "video_capture_thresh.py", line 59, in
main ()
File "video_capture_thresh.py", line 11, in main
crop = frame[180:320, 0:638]
TypeError: 'NoneType' object has no attribute 'getitem
import numpy as np
import cv2
#cap=cv2.VideoCapture(0)
cap = cv2.VideoCapture(1)
def main():
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
crop = frame[180:320, 0:638]
crop2=cv2.cvtColor(crop,cv2.COLOR_BGR2GRAY)
th,crop2 = cv2.threshold(crop2,0,255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
previous = cv2.GaussianBlur(crop2, (5,5),0)
contours, hierarchy = cv2.findContours(crop2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.rectangle(previous,(0,0),(638,140),(0,255,0),5)
i=0
for cnt in contours:
moments = cv2.moments(cnt) # Calculate moments
if moments['m00']!=0:
cx = int(moments['m10']/moments['m00']) # cx = M10/M00
cy = int(moments['m01']/moments['m00']) # cy = M01/M00
moment_area = moments['m00'] # Contour area from moment
contour_area = cv2.contourArea(cnt) # Contour area using in_built function
perimeter = cv2.arcLength(cnt,True)
cv2.drawContours(previous, [cnt], 0, (0,255,0), 3)
px = previous[cy,cx]
if px == 255 :
i=i+1
cv2.circle(previous,(cx,cy),5,(0,0,255),-1)
cv2.imshow("Previous",previous)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
main ()
cap.release()
cv2.destroyAllWindows()
Try adding a check to make sure you actually read in OK before you do your processing
ret, frame = cap.read()
if ret==True:
crop = frame[180:320, 0:638]
The method read
This is the most convenient method for reading video files or capturing data from decode and return the just grabbed frame. If no frames has been grabbed (camera has been disconnected, or there are no more frames in video file), the methods return false and the functions return NULL pointer.
Check if the image is captured from camera well.
What kind of camera are you using?
Note that cv2.VideoCapture does not work with Raspi module camera, it just works with usb webcam.
I'm working on Face Detection in OpenCV(2.4.6) and python(2.7). I have a very simple code, but its not giving me the desired output.
This is my code:
import numpy as np
import cv2
cam = cv2.VideoCapture(0)
name = 'detect'
face_cascade = cv2.CascadeClassifier('C:\opencv\data\haarcascades\haarcascade_frontalface_default.xml')
cv2.namedWindow(name, cv2.WINDOW_AUTOSIZE)
while True:
s, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(img, 1.3, 5)
for (x,y,w,h) in faces:
img = cv2.rectangle(gray,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow(name, img)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyWindow(name)
break
When I run this code, my webcam will start and it will give me a blank window like this
Then the webcam will turn off, and in the editor I will get an error as follows:
%run "D:/6th sem/1.OpenCV + Python/det.py"
---------------------------------------------------------------------------
error Traceback (most recent call last)
C:\Users\HP\AppData\Local\Enthought\Canopy32\App\appdata\canopy-1.3.0.1715.win-x86\lib\site-packages\IPython\utils\py3compat.pyc in execfile(fname, glob, loc)
195 else:
196 filename = fname
--> 197 exec compile(scripttext, filename, 'exec') in glob, loc
198 else:
199 def execfile(fname, *where):
D:\6th sem\1.OpenCV + Python\det.py in <module>()
7 while True:
8 s, img = cam.read()
----> 9 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
10 faces = face_cascade.detectMultiScale(img, 1.3, 5)
11 #print s
error: ..\..\..\src\opencv\modules\imgproc\src\color.cpp:3402: error: (-215) scn == 3 || scn == 4
Any suggestions are welcome. Thanks in advance.
some webcams need a warmup time, and deliver empty frames on startup. you want to check for that.
also, who said , that cv2.rectangle returns anything ? where did you get that idea ? from SO ?
while cap.isOpened():
s, img = cam.read()
if s == None:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5) #hmm, 5 required neighbours is actually a lot.
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # if you want colors, don't paint into a grayscale...
cv2.imshow(name, img)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyWindow(name)
break