I am getting an error in the code below. I want to take input from the webcam and if any accident occurs then it will shows that accident occurs. But with the code below I am not even getting camera video feed. It shows the following error:
AttributeError: 'module' object has no attribute 'CV_WINDOW_AUTOSIZE'
If I change it to cv2.WINDOW_NORMAL then it shows this error:
C:\build\master_winpack-bindings-win64-vc14-static\opencv\modules\imgproc\src\color.cpp:9748: error: (-215) scn == 3 || scn == 4 in function cv::cvtColor
Sometimes it turns on my laptop camera LED, and I want take video from the webcam.
#Import necessary packages
import numpy as np
import cv2
import math, operator
#Function to find difference in frames
def diffImg(t0, t1, t2):
d1 = cv2.absdiff(t2, t1)
d2 = cv2.absdiff(t1, t0)
return cv2.bitwise_and(d1, d2)
j=1
#Import video from webcam
cam = cv2.VideoCapture(0)
#Creating window to display
winName = "Accident Detector"
cv2.namedWindow(winName, cv2.CV_WINDOW_AUTOSIZE)
#Reading frames at multiple instances from webcam to different variables
t_minus = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)
t = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)
t_plus = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)
while True:
#Display video out through the window we created
cv2.imshow( winName, diffImg(t_minus, t, t_plus) )
#Calling function diffImg() and assign the return value to 'p'
p=diffImg(t_minus, t, t_plus)
#Writing 'p' to a directory
cv2.imwrite("D:\photos\shot.jpg",p)
#From Python Image Library(PIL) import Image class
from PIL import Image
#Open image from the directories and returns it's histogram's
h1 = Image.open("D:\motionpic\shot"+str(j)+".jpg").histogram()
h2 = Image.open("D:\photos\shot.jpg").histogram()
j=j+1
#Finding rms value of the two images opened before
rms = math.sqrt(reduce(operator.add,map(lambda a,b: (a-b)**2, h1, h2))/len(h1))
print int(rms)
#If the RMS value of the images are under our limit
if (rms<250):
#Then there is a similarity between images. i.e., Scene similar to an accident is found
print "accident\a"
#Updates the frames
t_minus = t
t = t_plus
t_plus = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)
#Destroys the window after key press
key = cv2.waitKey(10)
if key == 27:
cv2.destroyWindow(winName)
break
The correct syntax to create a new window is
cv2.namedWindow(winName, cv2.WINDOW_AUTOSIZE)
Also the camera frames should be read under the while loop.
Related
Description
I have a file `FaceDetection.py` which detects the face from webcam footage using `opencv` and then writes frames into `output` object, converting it into a video. If the face is present in the frame then a function `sendImg()` is called which then calls the function `faceRec()` from another imported file. At last this method prints the face name if the face is known or prints "unknown".
Problem I am facing
face recognition process is quite expensive and that's why I thought that running from different file and with a thread will create a background process and output.write() function won't be interrupted while doing face recognition. But, as sendImg() function gets called, output.write() function gets interrupted, resulting in skipped frames in saved video.
Resolution I am seeking
I want to do the face recognition without interrupting the output.write() function. So, resulting video will be smooth and no frames would be skipped.
Here is the FaceDetection.py
import threading
from datetime import datetime
import cv2
from FaceRecognition import FaceRecognition
face_cascade = cv2.CascadeClassifier('face_cascade.xml')
fr = FaceRecognition()
img = ""
cap = cv2.VideoCapture(0)
path = 'C:/Users/-- Kanbhaa --/Documents/Recorded/'
vid_name = str("recording_"+datetime.now().strftime("%b-%d-%Y_%H:%M").replace(":","_")+".avi")
vid_cod = cv2.VideoWriter_fourcc(*'MPEG')
output = cv2.VideoWriter( str(path+vid_name) , vid_cod, 20.0 ,(640,480))
def sendImage(imgI):
fr.faceRec(imgI)
while True:
t,img = cap.read()
output.write(img)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray,2,4)
for _ in faces:
t1 = threading.Thread(target=sendImage,args=(img,))
t1.start()
cv2.imshow('img',img)
if cv2.waitKey(1) & 0XFF == ord('x'):
break
cap.release()
output.release()
Here is the FaceRecognition.py
from simple_facerec import SimpleFacerec
class FaceRecognition:
def __init__(self):
global sfr
# Encode faces from a folder
sfr = SimpleFacerec()
sfr.load_encoding_images("C:/Users/UserName/Documents/FaceRecognitionSystem/images/")
def faceRec(self,frame):
global sfr
# Detect Faces
face_locations, face_names = sfr.detect_known_faces(frame)
for face_loc, name in zip(face_locations, face_names):
if name != "Unknown":
print("known Person detected :) => " + name)
else:
print("Unknown Person detected :( => Frame captured...")
Imported file SimpleFacerec in the above code is from youtube. So, I didn't code it. It can be found below.
SimpleFacerec.py
you write it wrong. It should be
t1 = threading.Thread(target=sendImage, args=(img,))
Try changing, but don't used underscore.
for index in faces:
t1 = threading.Thread(target=sendImage,args=(index,))
t1.start()
I have a camera that faces an LED sensor that I want to restrict on capturing images when the object is in sight otherwise not capturing. I will designate a point or region where the color pixels will change as an object arrives it will wait for 3sec and then capture. Thereafter it will not capture the same object if it still remains in the designated region. Here are my codes.
import cv2
import numpy as np
import os
os.chdir('C:/Users/Man/Desktop')
previous_flag = 0
current_flag = 0
a = 0
video = cv2.VideoCapture(0)
while True:
ret, frame = video.read()
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
cv2.imshow('Real Time', rgb)
cv2.waitKey(1)
if(rgb[400, 200, 0]>200): # selecting a point that will change in terms of pixel values
current_flag = 0
previous_flag = current_flag
print('No object Detected')
else: # object arrived
current_flag = 1
change_in_flags = current_flag - previous_flag
if(change_in_flags==1):
time.sleep(3)
cv2.imwrite('.trial/cam+str(a).jpg', rgb)
a += 1
previous_flag = current_flag
video.release()
cv2.destroyAllWindows()
When I implement the above program, for the first case (if) it prints several lines of No object Detected.
How can I reduce those sequentially printed lines for No object Detected statement? So that the program can just print one line without repetition.
I also tried to add a while loop to keep the current status true after a+=1 like:
while True:
previous_flag = current_flag
continue
It worked but the system became so slow is there any way to avoid such a while loop such that the system becomes faster? Any advice on that
import cv2
import numpy as np
import os
os.chdir('C:/Users/Man/Desktop')
state1=0
a = 0
video = cv2.VideoCapture(0)
while True:
ret, frame = video.read()
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
cv2.imshow('Real Time', rgb)
cv2.waitKey(1)
if(rgb[400, 200, 0]>200): # selecting a point that will change in terms of pixel values
if(state1 == 0):
print('No object Detected')
state1=1
else: # object arrived
time.sleep(1)
if(state1==1)
print('Object is detected')
cv2.imwrite('./trial/cam+str(a).jpg', rgb)
a += 1
state1=0
video.release()
cv2.destroyAllWindows()
I'm trying to get the angle of the middle line in a domino. SimpleBlobDetector can find it, but it gives me an angle value of -1.
Does anyone know why that can be? and how can i fix that still using SimpleBlobDetector? and if SimpleBlobDetector is not an option, what else can i do to find the center and angle of these lines?
import numpy as np
import cv2
import os
import time
#from matplotlib import pyplot as plt
def main():
capWebcam = cv2.VideoCapture(0) # declare a VideoCapture object and associate to webcam, 0 => use 1st webcam
params = cv2.SimpleBlobDetector_Params()
params.filterByCircularity = True
params.minCircularity = 0.73
params2 = cv2.SimpleBlobDetector_Params()
params2.filterByInertia = True
params2.maxInertiaRatio = 0.3
# show original resolution
print "default resolution = " + str(capWebcam.get(cv2.CAP_PROP_FRAME_WIDTH)) + "x" + str(capWebcam.get(cv2.CAP_PROP_FRAME_HEIGHT))
capWebcam.set(cv2.CAP_PROP_FRAME_WIDTH, 320.0) # change resolution to 320x240 for faster processing
capWebcam.set(cv2.CAP_PROP_FRAME_HEIGHT, 240.0)
capWebcam.set(cv2.CAP_PROP_FPS, 1)
# show updated resolution
print "updated resolution = " + str(capWebcam.get(cv2.CAP_PROP_FRAME_WIDTH)) + "x" + str(capWebcam.get(cv2.CAP_PROP_FRAME_HEIGHT))
if capWebcam.isOpened() == False: # check if VideoCapture object was associated to webcam successfully
print "error: capWebcam not accessed successfully\n\n" # if not, print error message to std out
os.system("pause") # pause until user presses a key so user can see error message
return # and exit function (which exits program)
# end if
while cv2.waitKey(1) != 27 and capWebcam.isOpened(): # until the Esc key is pressed or webcam connection is lost
blnFrameReadSuccessfully, img = capWebcam.read() # read next frame
if not blnFrameReadSuccessfully or img is None: # if frame was not read successfully
print "error: frame not read from webcam\n" # print error message to std out
os.system("pause") # pause until user presses a key so user can see error message
break # exit while loop (which exits program)
det = cv2.SimpleBlobDetector_create(params)
det2 = cv2.SimpleBlobDetector_create(params2)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
time.sleep(0.1)
keypts = det.detect(gray)
time.sleep(0.1)
keypts2 = det2.detect(gray)
print len(keypts)
print len(keypts2)
for l in keypts2:
print l.pt
print l.angle
time.sleep(0.3)
imgwk = cv2.drawKeypoints(gray, keypts, img,color= (200,0,139),flags=2)
time.sleep(0.1)
imgwk2 = cv2.drawKeypoints(img, keypts2, gray,color= (200,0,139),flags=2)
cv2.namedWindow("img", cv2.WINDOW_AUTOSIZE) # or use WINDOW_NORMAL to allow window resizing
cv2.imshow("img", img)
# hold windows open until user presses a key
cv2.destroyAllWindows()
return
###################################################################################################
if __name__ == "__main__":
main()
I'm not sure if SimpleBlob detector is the best for this, but have you looked at
Detect centre and angle of rectangles in an image using Opencv
It seems quite similar to you question, and seems to do very well at identifying the angle of the rectangles
Aim : Detect the motion and save only the motion periods in files with names of the starting time.
Now I met the issue about how to save the video to the files with video starting time.
What I tested :
I tested my program part by part. It seems that each part works well except the saving part.
Running status: No error. But in the saving folder, there is no video. If I use a static saving path instead, the video will be saved successfully, but the video will be override by the next video. My codes are below:
import cv2
import numpy as np
import time
cap = cv2.VideoCapture( 0 )
bgst = cv2.createBackgroundSubtractorMOG2()
fourcc=cv2.VideoWriter_fourcc(*'DIVX')
size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
n = "start_time"
while True:
ret, frame = cap.read()
dst = bgst.apply(frame)
dst = np.array(dst, np.int8)
if np.count_nonzero(dst)>3000: # use this value to adjust the "Sensitivity“
print('something is moving %s' %(time.ctime()))
path = r'E:\OpenCV\Motion_Detection\%s.avi' %n
out = cv2.VideoWriter( path, fourcc, 50, size )
out.write(frame)
key = cv2.waitKey(3)
if key == 32:
break
else:
out.release()
n = time.ctime()
print("No motion Detected %s" %n)
What I meant is:
import cv2
import numpy as np
import time
cap = cv2.VideoCapture( 0 )
bgst = cv2.createBackgroundSubtractorMOG2()
fourcc=cv2.VideoWriter_fourcc(*'DIVX')
size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
path = r'E:\OpenCV\Motion_Detection\%s.avi' %(time.ctime())
out = cv2.VideoWriter( path, fourcc, 16, size )
while True:
ret, frame = cap.read()
dst = bgst.apply(frame)
dst = np.array(dst, np.int8)
for i in range(number of frames in the video):
if np.count_nonzero(dst)<3000: # use this value to adjust the "Sensitivity“
print("No Motion Detected")
out.release()
else:
print('something is moving %s' %(time.ctime()))
#label each frame you want to output here
out.write(frame(i))
key = cv2.waitKey(1)
if key == 32:
break
cap.release()
cv2.destroyAllWindows()
If you see the code there will be a for loop, within which the process of saving is done.
I do not know the exact syntax involving for loop with frames, but I hope you have the gist of it. You have to find the number of frames present in the video and set that as the range in the for loop.
Each frame gets saved uniquely (see the else condition.) As I said I do not know the syntax. Please refer and follow this procedure.
Cheers!
I am trying to apply the numpy palette method to an opencv processed video (references: this question and this tutorial ). I aim at replacing all frame pixels of a certain color range by another. The code below is an example that replaces black with green. Unfotunately, my code raises an error on line:
image[np.where((image==[0,0,0]).all(axis=2))]=green
the error: exceptions.ValueError:axis(=2) out of bounds
I am running python 2.7 with PyScripter, and I find it odd because the code did work before, and I did not make any major modification to it. Can someone please help me? I am quite stuck on this one...
My code:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##IMPORTS
import cv2.cv as cv
import numpy as np
import time
##VARIABLES
#colors
green=[0,255,0]
##MAIN
#video file input
frames = raw_input('Please input video file:')
if not frames:
print "This program requires a file as input!"
sys.exit(1)
#create window
cv.NamedWindow("image", 1)
#File capture
vidFile = cv.CaptureFromFile(frames)
nFrames = int( cv.GetCaptureProperty( vidFile, cv.CV_CAP_PROP_FRAME_COUNT ) )
fps = cv.GetCaptureProperty( vidFile, cv.CV_CAP_PROP_FPS )
waitPerFrameInMillisec = int( 1/fps * 1000/1 )
#time adjustment, frame capture
for f in xrange( nFrames ):
frame = cv.QueryFrame( vidFile )
# create the images we need
image = cv.CreateImage (cv.GetSize (frame), 8, 3)
# copy the frame, so we can draw on it
if not frame:
break
else:
cv.Copy (frame, image)
#get pixel HSV colors
rows,cols=cv.GetSize(frame)
image=np.asarray(image)
image[np.where((image==[0,0,0]).all(axis=2))]=green
image=cv.fromarray(image)
#show the image
cv.ShowImage("image", image)
#quit command ESC
if cv.WaitKey(waitPerFrameInMillisec)==27:
break
else:
cv.WaitKey(waitPerFrameInMillisec) % 0x100
I solved it. In fact, the answer was not a modification of the line raising the exception, but rather on a modification of the arguments that were passed to this line. Indeed, it seems that the '[:,:]' arguments are required in opencv when converting cvMat to Numpy and back.
Here is the corrected code:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##IMPORTS
import cv2.cv as cv
import numpy as np
##VARIABLES
#colors
green=[0,255,0]
##MAIN
#start video stream analysis
frames = raw_input('Please enter video file:')
if not frames:
print "This program requires a file as input!"
sys.exit(1)
# first, create the necessary windows
cv.NamedWindow ('image', cv.CV_WINDOW_AUTOSIZE)
#File capture
vidFile = cv.CaptureFromFile(frames)
nFrames = int( cv.GetCaptureProperty( vidFile, cv.CV_CAP_PROP_FRAME_COUNT ) )
fps = cv.GetCaptureProperty( vidFile, cv.CV_CAP_PROP_FPS )
waitPerFrameInMillisec = int( 1/fps * 1000/1 )
for f in xrange( nFrames ):
#time adjustment, frame capture
sec = f/fps
frame = cv.QueryFrame( vidFile )
# create the images we need
image = cv.CreateImage (cv.GetSize (frame), 8, 3)
# copy the frame, so we can draw on it
if not frame:
break
else:
cv.Copy (frame, image)
#Replace pixel colors
rows,cols=cv.GetSize(frame)
image=np.asarray(image[:,:])
image[np.where((image==[0,0,0]).all(axis=2))]=green
image=cv.fromarray(image[:,:])
#show the image
cv.ShowImage("image", image)
#quit command ESC
if cv.WaitKey(waitPerFrameInMillisec)==27:
break
else:
cv.WaitKey(waitPerFrameInMillisec) % 0x100
cv.DestroyAllWindows()