I am using a fisheye camera for depth image. I had tried with StereoSGBM() method for the depth map which has not given the desired results but now I am trying it with StereoBM() method.
stereoMatcher = cv2.StereoBM() preset = 2 ndisparities = 32 WinSize = 5
if not left.grab() or not right.grab():
print("No more frames")
break
_, leftFrame = left.retrieve()
#leftFrame = cropHorizontal(leftFrame)
leftHeight, leftWidth = leftFrame.shape[:2]
_, rightFrame = right.retrieve()
#rightFrame = cropHorizontal(rightFrame)
rightHeight, rightWidth = rightFrame.shape[:2]
if (leftWidth, leftHeight) != imageSize:
print("Left camera has different size than the calibration data")
break
if (rightWidth, rightHeight) != imageSize:
print("Right camera has different size than the calibration data")
break
fixedLeft = cv2.remap(leftFrame, leftMapX, leftMapY, REMAP_INTERPOLATION)
fixedRight = cv2.remap(rightFrame, rightMapX, rightMapY, REMAP_INTERPOLATION)
grayLeft = cv2.cvtColor(fixedLeft, cv2.COLOR_BGR2GRAY)
grayRight = cv2.cvtColor(fixedRight, cv2.COLOR_BGR2GRAY)
As compute a function of StereoBM() also needs a third argument as disparity. I calculated it by using
grayLeft = cv2.cvtColor(fixedLeft, cv2.COLOR_BGR2GRAY)
grayRight = cv2.cvtColor(fixedRight, cv2.COLOR_BGR2GRAY)
disparity = stereoMatcher.compute(grayLeft, grayRight)
depth = stereoMatcher.compute(grayLeft, grayRight)
#cv2.imshow('left', fixedLeft)
#cv2.imshow('right', fixedRight)
cv2.imshow('depth', depth / DEPTH_VISUALIZATION_SCALE)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
left.release() right.release() cv2.destroyAllWindows()
But it is throwing an error which is:
Traceback (most recent call last):
File "fisheye_depth.py", line 104, in <module>
disparity = stereoMatcher.compute(grayLeft, grayRight)
TypeError: Incorrect type of self (must be 'StereoMatcher' or its derivative)
If anyone has any idea about it that what mistake I am doing. Let me know what mistake I am doing taking out the disparity for compute operator().
Related
The Code:
I am attempting to construct a python script that takes the RGB values of an image, along with the contours width and height into a neural network to determine if the little rc car I have constructed should go forward or turn right.
Code:
import cv2
cam = cv2.VideoCapture(2)
class Pic_Capture:
def __init__(self):
# print("Fuck you")
self.i = 0
def Img_Mods(self, cam):
self.ret, self.frame = cam.read()
self.buf1 = cv2.flip(self.frame, 1)
# self.buf2 = cv2.cvtColor(self.buf1, cv2.COLOR_BGR2RGB)
self.buf3 = cv2.GaussianBlur(self.buf1,(5,5),0)
cv2.imshow('buf2', self.buf3)
def Measurement_Bullshit(self):
self.buf4 = cv2.cvtColor(self.buf3, cv2.COLOR_RGB2GRAY)
self.buf5 = cv2.adaptiveThreshold(self.buf4,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)
_, contours, hierarchy = cv2.findContours(self.buf5, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
M = cv2.moments(cnt)
self.cx = int(M['m10']/M['m00'])
self.cy = int(M['m01']/M['m00'])
print(self.cx, self.cy)
cv2.imshow('buf4', self.buf4)
running = True
i = 0
Camera = Pic_Capture()
while running:
Camera.Img_Mods(cam)
if i % 30 == 0 and i != 0:
Camera.Off_and_On_Again()
Camera.Measurement_Bullshit()
i += 1
print(i)
if cv2.waitKey(1) == 27:
Camera.Stop_Code()
Question 1:
I consistently receive this within my console and I would like to get to rid of it for debugging later down the line.
Corrupt JPEG data: 4 extraneous bytes before marker 0xd5
Every time this pops up it is at a different marker, so I do not know how to not print that to the console.
Question 2:
After the code within the if statement: if i % 10 == 0 and i != 0: runs the code spits back this error.
File "Nueral_Network_Creation.py", line 23, in Measurement_Bullshit self.cx = int(M['m10']/M['m00']) ZeroDivisionError: float division by zero
I understand that it is most likely that the code is not finding enough contours to be run properly or maybe it is something completely different. As always any help is much appreciated.
I am trying to execute this code but I am getting the below stated error. It says that the array size doesn't match. I have tried a few solutions given on the internet. Changed the values of the color and also tried by removing the np.array from the code in the concerned lines
skin_ycrcb_min = np.array((0, 138, 67))
skin_ycrcb_max = np.array((255, 173, 133))
Bellow is the code:
import cv2
import numpy as np
import util as ut
import svm_train as st
import re
model=st.trainSVM(17)
#create and train SVM model each time coz bug in opencv 3.1.0 svm.load()
https://github.com/Itseez/opencv/issues/4969
cam=int(raw_input("Enter Camera number: "))
cap=cv2.VideoCapture(cam)
font = cv2.FONT_HERSHEY_SIMPLEX
def nothing(x) :
pass
text= " "
temp=0
previouslabel=None
previousText=" "
label = None
while(cap.isOpened()):
_,img=cap.read()
cv2.rectangle(img,(900,100),(1300,500),(255,0,0),3) # bounding box which
captures ASL sign to be detected by the system
img1=img[100:500,900:1300]
img_ycrcb = cv2.cvtColor(img1, cv2.COLOR_BGR2YCR_CB)
blur = cv2.GaussianBlur(img_ycrcb,(11,11),0)
skin_ycrcb_min = np.array((0, 138, 67))
skin_ycrcb_max = np.array((255, 173, 133))
mask = cv2.inRange(blur, skin_ycrcb_min, skin_ycrcb_max); # detecting the
hand in the bounding box using skin detection
contours,hierarchy = cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL, 2)
cnt=ut.getMaxContour(contours,4000) # using contours
to capture the skin filtered image of the hand
if cnt!=None:
gesture,label=ut.getGestureImg(cnt,img1,mask,model) # passing the
trained model for prediction and fetching the result
if(label!=None):
if(temp==0):
previouslabel=label
if previouslabel==label :
previouslabel=label
temp+=1
else :
temp=0
if(temp==40):
if(label=='P'):
label=" "
text= text + label
if(label=='Q'):
words = re.split(" +",text)
words.pop()
text = " ".join(words)
#text=previousText
print text
cv2.imshow('PredictedGesture',gesture) # showing the best
match or prediction
cv2.putText(img,label,(50,150), font,8,(0,125,155),2) # displaying the
predicted letter on the main screen
cv2.putText(img,text,(50,450), font,3,(0,0,255),2)
cv2.imshow('Frame',img)
cv2.imshow('Mask',mask)
k = 0xFF & cv2.waitKey(10)
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Error:
Traceback (most recent call last):
File "C:\Users\hlllv\Desktop\ASL\ASL-Translator-master\ASL.py", line 29, in
<module>
mask = cv2.inRange(blur, skin_ycrcb_min, skin_ycrcb_max); # detecting the
hand in the bounding box using skin detection
error: C:\build\2_4_winpack-bindings-win32-vc14-
static\opencv\modules\core\src\arithm.cpp:2711: error: (-209) The lower
bounary is neither an array of the same size and same type as src, nor a
scalar in function cv::inRange
Please help!
On the first frame of a a video, I'm running an object detector that returns the bounding box of an object like this:
<type 'tuple'>: ((786, 1225), (726, 1217), (721, 1278), (782, 1288))
I want to pass this bounding box as the initial bounding box to the tracker. However, I get the following error:
OpenCV Error: Backtrace (The model is not initialized) in init, file /Users/jenkins/miniconda/1/x64/conda-bld/conda_1486588158526/work/opencv-3.1.0/build/opencv_contrib/modules/tracking/src/tracker.cpp, line 81
Traceback (most recent call last):
File "/Users/mw/Documents/Code/motion_tracking/motion_tracking.py", line 49, in <module>
tracker.init(frame, bbox)
cv2.error: /Users/jenkins/miniconda/1/x64/conda-bld/conda_1486588158526/work/opencv-3.1.0/build/opencv_contrib/modules/tracking/src/tracker.cpp:81: error: (-1) The model is not initialized in function init
The frame shape is 1080 x 1920 and the values I'm passing into tracker look like this:
I'm not sure if the order I'm sending the bounding box is wrong, or if I'm doing something else wrong.
tracker = cv2.Tracker_create("MIL")
init_once = False
while True:
(grabbed, frame) = camera.read()
if not grabbed:
break
symbols = scan(frame)
for symbol in symbols:
if not init_once:
bbox = (float(symbol.location[0][0]), float(symbol.location[0][1]), float(symbol.location[2][0]), float(symbol.location[2][1]))
tracker.init(frame, bbox)
init_once = True
break
# draw_symbol(symbol, frame)
_, newbox = tracker.update(frame)
if _:
top_left = (int(newbox[0]), int(newbox[1]))
bottom_right = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
cv2.rectangle(frame, top_left, bottom_right, (200, 0, 0))
cv2.imshow("asd", frame)
out.write(frame)
out.release()
I'm using opencv 3.1 in RaspberryPi 3. I,m trying to run the following Hough Circle detection algorithm
#! /usr/bin/python
import numpy as np
import cv2
from cv2 import cv
VInstance = cv2.VideoCapture(0)
key = True
"""
params = dict(dp,
minDist,
circles,
param1,
param2,
minRadius,
maxRadius)
"""
def draw_circles(circles, output):
if circles is not None:
for i in circles[0,:]:
#draw the outer circle
cv2.circle(output,(i[0],i[1]),i[2],(0,255,0),2)
#draw the centre of the circle
cv2.circle(output,(i[0],i[1]),2,(0,0,255),3)
print("The number of circles if %d" %(circles[0].shape[0]))
elif circles is None:
print ("The number of circles is 0")
if __name__ == '__main__':
while key:
ret,img = VInstance.read()
## Smooth image to reduce the input noise
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
imgSmooth = cv2.GaussianBlur(imgGray,(5,5),3)
## Compute Hough Circles
circles = cv2.HoughCircles(imgSmooth,cv2.cv.CV_HOUGH_GRADIENT,1,100,
param1=80,
param2=50,
minRadius=50,
maxRadius=100)
draw_circles(circles,img)
## Display the circles
cv2.imshow('detected circles',imgGray)
cv2.imshow("result",img)
k = cv2.waitKey(1)
if k == 27:
cv2.destroyAllWindows()
break
But I'm getting Assertion Failed error, details are below.
OpenCV Error: Assertion failed (scn == 3 || scn == 4) in cvtColor,
file /home/pi/opencv-3.1.0/modules/imgproc/src/color.cpp, line 8000
Traceback (most recent call last): File "HoughCircles.py", line 70,
in
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) cv2.error: /home/pi/opencv-3.1.0/modules/imgproc/src/color.cpp:8000: error:
(-215) scn == 3 || scn == 4 in function cvtColor
Can anyone please check and help!
Error code "Assertion failed (scn == 3 || scn == 4) in cvtColor" means that the input (source) image in your cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) method does not have 3 or 4 channels, which are necessary for this type of conversion. Probably your input image is already grayscale format. Try just not to use that method and your problem should be solved. If it does throw other unsolvable errors or does not solve the problem, post your issues in comments.
This means the input image is invalid, which is why you need to check the ret value in your loop!
The error and question title doesn't have anything to do with your Hough circles, so I'll condense my answer to address the assertion failure (add back in your stuff later!):
#!/usr/bin/python
import numpy as np
import cv2
VInstance = cv2.VideoCapture(0)
if __name__ == '__main__':
while True:
ret,img = VInstance.read()
# Confirm we have a valid image returned
if not ret:
break
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.imshow("result",img)
k = cv2.waitKey(1)
if k == 27:
break
cv2.destroyAllWindows()
I am trying to run the following code on my Raspberry Pi, but it is giving me this error:
Traceback (most recent call last):
File "video_capture_thresh.py", line 59, in
main ()
File "video_capture_thresh.py", line 11, in main
crop = frame[180:320, 0:638]
TypeError: 'NoneType' object has no attribute 'getitem
import numpy as np
import cv2
#cap=cv2.VideoCapture(0)
cap = cv2.VideoCapture(1)
def main():
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
crop = frame[180:320, 0:638]
crop2=cv2.cvtColor(crop,cv2.COLOR_BGR2GRAY)
th,crop2 = cv2.threshold(crop2,0,255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
previous = cv2.GaussianBlur(crop2, (5,5),0)
contours, hierarchy = cv2.findContours(crop2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.rectangle(previous,(0,0),(638,140),(0,255,0),5)
i=0
for cnt in contours:
moments = cv2.moments(cnt) # Calculate moments
if moments['m00']!=0:
cx = int(moments['m10']/moments['m00']) # cx = M10/M00
cy = int(moments['m01']/moments['m00']) # cy = M01/M00
moment_area = moments['m00'] # Contour area from moment
contour_area = cv2.contourArea(cnt) # Contour area using in_built function
perimeter = cv2.arcLength(cnt,True)
cv2.drawContours(previous, [cnt], 0, (0,255,0), 3)
px = previous[cy,cx]
if px == 255 :
i=i+1
cv2.circle(previous,(cx,cy),5,(0,0,255),-1)
cv2.imshow("Previous",previous)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
main ()
cap.release()
cv2.destroyAllWindows()
Try adding a check to make sure you actually read in OK before you do your processing
ret, frame = cap.read()
if ret==True:
crop = frame[180:320, 0:638]
The method read
This is the most convenient method for reading video files or capturing data from decode and return the just grabbed frame. If no frames has been grabbed (camera has been disconnected, or there are no more frames in video file), the methods return false and the functions return NULL pointer.
Check if the image is captured from camera well.
What kind of camera are you using?
Note that cv2.VideoCapture does not work with Raspi module camera, it just works with usb webcam.