Issue with OpenCV in Python - python

I am trying to run the following code on my Raspberry Pi, but it is giving me this error:
Traceback (most recent call last):
File "video_capture_thresh.py", line 59, in
main ()
File "video_capture_thresh.py", line 11, in main
crop = frame[180:320, 0:638]
TypeError: 'NoneType' object has no attribute 'getitem
import numpy as np
import cv2
#cap=cv2.VideoCapture(0)
cap = cv2.VideoCapture(1)
def main():
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
crop = frame[180:320, 0:638]
crop2=cv2.cvtColor(crop,cv2.COLOR_BGR2GRAY)
th,crop2 = cv2.threshold(crop2,0,255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
previous = cv2.GaussianBlur(crop2, (5,5),0)
contours, hierarchy = cv2.findContours(crop2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.rectangle(previous,(0,0),(638,140),(0,255,0),5)
i=0
for cnt in contours:
moments = cv2.moments(cnt) # Calculate moments
if moments['m00']!=0:
cx = int(moments['m10']/moments['m00']) # cx = M10/M00
cy = int(moments['m01']/moments['m00']) # cy = M01/M00
moment_area = moments['m00'] # Contour area from moment
contour_area = cv2.contourArea(cnt) # Contour area using in_built function
perimeter = cv2.arcLength(cnt,True)
cv2.drawContours(previous, [cnt], 0, (0,255,0), 3)
px = previous[cy,cx]
if px == 255 :
i=i+1
cv2.circle(previous,(cx,cy),5,(0,0,255),-1)
cv2.imshow("Previous",previous)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
main ()
cap.release()
cv2.destroyAllWindows()

Try adding a check to make sure you actually read in OK before you do your processing
ret, frame = cap.read()
if ret==True:
crop = frame[180:320, 0:638]

The method read
This is the most convenient method for reading video files or capturing data from decode and return the just grabbed frame. If no frames has been grabbed (camera has been disconnected, or there are no more frames in video file), the methods return false and the functions return NULL pointer.
Check if the image is captured from camera well.
What kind of camera are you using?
Note that cv2.VideoCapture does not work with Raspi module camera, it just works with usb webcam.

Related

How can i captured detected images of face in opencv to make a database?

I am trying to enroll the faces of the user in opencv for facial recognition. I have finihed the detection part but what i wanted to achieve is to save the detected faces.So basically waht i wanted to achieve is :
when i see in the webcam it automatically capture 20-30 or n no images and saves it locally.
Currently i am trying to simply save 1 image when clicked some key ,The program runs fine but nothing saves locally.Here is the code
import cv2
import os
cascade = cv2.CascadeClassifier("../haar-cascade-files-master/haar-cascade-files-master/haarcascade_frontalface_alt2.xml")
cap = cv2.VideoCapture(0)
while True:
ret,frame = cap.read(0)
# gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(frame,1.1,5)
orig = frame.copy()
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,255,0),4)
cv2.imshow("Video ",frame)
if cv2.waitKey(2) & 0xFF == 27:
break
elif cv2.waitKey(2) & 0xFF == ord('s'):
faceimg=frame[y:y+h,x:x+w]
cv2.imwrite("../images/facec.jpeg",faceimg)
# cv2.imshow("Crop Image",faceimg)
cap.release()
cv2.destroyAllWindows()
So whats wrong in the code and how can i save n no of images automatically.
If only one person is looking at the camera in a given scenario, you can use a counter.
N = 20
cnt = 0
while True:
...
...
# If the frame contains only one face
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,255,0),4)
faceimg=frame[y:y+h,x:x+w]
cv2.imwrite("../images/face_"+str(cnt)+".jpeg",faceimg)
cnt = cnt + 1
if cnt == N:
cnt = 0
...
...

Getting this error "ValueError: not enough values to unpack (expected 3, got 2)" while compiling opencv contours

I'm going to run an openCV people counting tutorial code. tutorials are available here. https://fedemejia.com/?p=68
but while i'm running the following code by cmd. I'm getting this error
File "VideoCapture.py", line 25, in
_, contours0, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
ValueError: not enough values to unpack (expected 3, got 2)
my python version is 3.6.8
opencv 4.0.0
import numpy as np
import cv2
cap = cv2.VideoCapture('peopleCounter.avi') #Open video file
fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows = True) #Create the background substractor
kernelOp = np.ones((3,3),np.uint8)
kernelCl = np.ones((11,11),np.uint8)
areaTH = 500
while(cap.isOpened()):
ret, frame = cap.read() #read a frame
fgmask = fgbg.apply(frame) #Use the substractor
try:
ret,imBin= cv2.threshold(fgmask,200,255,cv2.THRESH_BINARY)
#Opening (erode->dilate) para quitar ruido.
mask = cv2.morphologyEx(imBin, cv2.MORPH_OPEN, kernelOp)
#Closing (dilate -> erode) para juntar regiones blancas.
mask = cv2.morphologyEx(mask , cv2.MORPH_CLOSE, kernelCl)
except:
#if there are no more frames to show...
print('EOF')
break
_, contours0, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
for cnt in contours0:
cv2.drawContours(frame, cnt, -1, (0,255,0), 3, 8)
area = cv2.contourArea(cnt)
print area
if area > areaTH:
#################
# TRACKING #
#################
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
x,y,w,h = cv2.boundingRect(cnt)
cv2.circle(frame,(cx,cy), 5, (0,0,255), -1)
img = cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('Frame',frame)
#Abort and exit with 'Q' or ESC
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release() #release video file
cv2.destroyAllWindows() #close all openCV windows
It was supposed to give the result describe in the tutorial.
In OpenCV 4.0, cv2.findContour() returns only 2 values. So it should be contours0, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE). See here https://docs.opencv.org/master/df/d0d/tutorial_find_contours.html

txt file won't be written in python in windows, it works on mac

I wrote a code that takes a video and recognizes face of a human and calculate the distance of the human from the camera (which is the eye of a robot). I need the data in a txt format so I am writing it in the distance_data.txt but my file is always empty.
I had run the code in a mac computer and it worked perfectly fine and the file was not empty.
import cv2
import time
import numpy as np
person_cascade = cv2.CascadeClassifier('human_recognition.txt')
cap = cv2.VideoCapture('oct23_2.avi')
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('oct23_2out.avi',fourcc, 20.0, (640,360))
distance_data = open('distance_data.txt','w')
timer=-1
while (cap.isOpened()):
r, frame = cap.read()
if r:
frame = cv2.resize(frame,(640,360)) # Downscale to improve frame rate
gray_frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) # Haar-cascade classifier needs a grayscale image
cv2.imshow('gray',gray_frame)
#print(gray_frame.type())
rects = person_cascade.detectMultiScale(gray_frame)
max=0
#print(rects)
(x, y, w, h) = rects[0]
cv2.rectangle(frame, (x,y), (x+w ,y+h),(0,255,0),2)
#fshape = frame.shape
#fheight = fshape[0]
#fwidth = fshape[1]
timer=timer+1
xpos=(0.03421)*(w*w)-8.1183*w+521.841
yn=360-y
heightinvid=0.0197377*yn-0.0194
realhieght=((-0.4901)*(heightinvid* heightinvid)+4.4269*heightinvid+8.30356)/ 0.927183854
horizontalp=x+w/2
#print(timer+1 , ",", xpos, )
#if (xpos<=41 and xpos>=40):
#print("height of the human is ", realhieght+149, "or", (-0.000184)*yn*yn+0.08506*yn+8.43461+149)
cm_deviation_in_x=-0.02033898*(320-horizontalp)
#print("Horizontal position of human is ", cm_deviation_in_x, " cm.")
newxpos = format (xpos, '.3f')
distance_data.write("Hi")
distance_data.write(str(newxpos))
distance_data.write(',')
distance_data.write(str(format(cm_deviation_in_x, '.3f')))
distance_data.write("\r\n")
#print(w)
#print("x position of the human is = ",xpos)
#fourcc = cv2.VideoWriter_fourcc(*'XVID')
#out = cv2.VideoWriter('output25.avi',fourcc, 20.0, (fwidth,fheight))
cv2.imshow("preview", frame)
out.write(frame)
k = cv2.waitKey(10)
#print(w,h)
if k & 0xFF == ord("q"): # Exit condition
break
distance_data.flush()
cap.release()
cv2.destroyAllWindows()

Send multiple frame to AWS rekognition

I'm trying to send pictures to the aws rekognition from my webcam to detect the activity of the person sitting in front of it using python.
To do so I take a picture every 5 seconds and I send it to the aws.
But when I do so it seems that he's always sending back information about the first frame that I sent
cap = cv2.VideoCapture(0)
while 1:
ret, img = cap.read()
client=boto3.client('rekognition')
print("hello")
ret, fileImg=cv2.imencode('.png',img)
response = client.detect_labels(Image={'Bytes':fileImg.tobytes()})
print('Detected labels for Camera Capture')
for label in response['Labels']:
print (label['Name'] + ' : ' + str(label['Confidence']))
sleep(5)
Here is the result i get from that call:
Detected labels for Camera Capture
Human : 99.1103897095
People : 99.1103744507
Person : 99.1103897095
Face : 56.5527687073
Crypt : 51.1719360352
hello
Detected labels for Camera Capture
Human : 99.0247421265
People : 99.0247344971
Person : 99.0247421265
Face : 57.7796173096
Lighting : 51.8473701477
Crypt : 51.08152771
hello
Detected labels for Camera Capture
Human : 99.0808181763
People : 99.0808105469
Person : 99.0808181763
Face : 56.4268836975
Lighting : 54.6302490234
Crypt : 50.8622779846
hello
Knowing during the time of the call the image has changed a lot and should (at least I think) show me other results.
Here's some code that I use to put rectangles around faces in a similar way:
import cv2
import numpy as np
import boto3
# Setup
scale_factor = .15
green = (0,255,0)
red = (0,0,255)
frame_thickness = 2
cap = cv2.VideoCapture(0)
rekognition = boto3.client('rekognition')
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
height, width, channels = frame.shape
# Convert frame to jpg
small = cv2.resize(frame, (int(width * scale_factor), int(height * scale_factor)))
ret, buf = cv2.imencode('.jpg', small)
# Detect faces in jpg
faces = rekognition.detect_faces(Image={'Bytes':buf.tobytes()}, Attributes=['ALL'])
# Draw rectangle around faces
for face in faces['FaceDetails']:
smile = face['Smile']['Value']
cv2.rectangle(frame,
(int(face['BoundingBox']['Left']*width),
int(face['BoundingBox']['Top']*height)),
(int((face['BoundingBox']['Left']+face['BoundingBox']['Width'])*width),
int((face['BoundingBox']['Top']+face['BoundingBox']['Height'])*height)),
green if smile else red, frame_thickness)
# Display the resulting frame
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
It scales-down the picture because Rekognition doesn't need full size to detect faces.

ValueError: too many values to unpack opencv-python

working with opencv-python for a tracking project. i get this error when calling the estiamte single marker..any idea?
cv2 version 3.2.0-dev
python version 2.7.12
Traceback (most recent call last):
File "test_aruco_detect.py", line 53, in <module>
rvecs , tvecs = aruco.estimatePoseSingleMarkers(corners, 0.1765, cameraMatrix, distCoeffs)
ValueError: too many values to unpack
the code:
import numpy as np
import cv2
import cv2.aruco as aruco
with np.load('/home/odroid/camera_calibration/testfile.npz') as X:
cameraMatrix,distCoeffs = [X[i] for i in ('arr_0','arr_1')]
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
#print(frame.shape) #480x640
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters = aruco.DetectorParameters_create()
#lists of ids and the corners beloning to each id
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
if ids != None: #if atleast one id is detected
gray = aruco.drawDetectedMarkers(gray, corners,ids)
rvecs , tvecs = aruco.estimatePoseSingleMarkers(corners, 0.1765, cameraMatrix, distCoeffs)
aruco.drawAxis(gray, cameraMatrix, distCoeffs, rvecs, tvecs)
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
rvecs , tvecs, _objPoints = aruco.estimatePoseSingleMarkers(...)
edit:
according to the documentation (http://docs.opencv.org/trunk/d9/d6a/group__aruco.html#ga84dd2e88f3e8c3255eb78e0f79571bd1) i was missing the _objPoints

Categories

Resources