I'm trying to make a tracking laser webcam toy for my cat, but I am currently struggling with the IP webcam from an Android phone as it won't display anything and i get an error "initStream Failed to reset streams" I have attached the code below! I'm still new to Python and would love to learn more! =)
import cv2
import numpy as np
#Cam
url = "http://192.168.x.x:8080/shot.jpg"
img_resp = requests.get(url)
img_arr = np.array(bytearray(img_resp.content),dtype=np.uint8)
img = cv2.imdecode(img_arr,-1)
cap =cv2.VideoCapture(0)
ret, frame = cap.read()
#Movement tracker
while cap.isOpened():
ret = cap.set(3,320)
ret = cap.set(4,240)
diff = cv2.absdiff(frame, frame)
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv2.boundingRect(contour)
if cv2.contourArea(contour) < 900:
continue
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 20, 30), 3)
cv2.putText(frame, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX,
1, (0, 0, 255), 3)
cv2.circle(frame, (x, y), 3, (200, 50, 180), 2)
image = cv2.resize(frame, (1280, 720))
cv2.imshow("feed", frame)
ret, frame2 = img.read()
if cv2.waitKey(1) == 27:
break
cv2.destroyAllWindows()
img.release()
I use IP Webcam application on my android phone. Just need to define url variable as below:
url = 'http://your IP address:port number/video' # e.g. url = 'http://192.168.43.1:8080/video'
You will see IP address and port number on the mobile phone screen when the app is running.
And after that you need to pass url as an argument to cv2.VideoCapture() method like this cap = cv2.VideoCapture(url) instead of cap = cv2.VideoCapture(0). It works fine for me. No need to say that PC and mobile phone are connected via Wi-Fi network (or mobile hotspot).
Related
I followed a video online about motion detection using openCV however I came across the problem that the findContours function is not returning a value. Any help is appreceated.
Here is the code:
import cv2
import time
import datetime
import imutils
def motion_detection():
video_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)
time.sleep(2)
first_frame = None
while True:
frame = video_capture.read()[1]
text = 'Unoccupied'
greyscale_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gaussian_frame = cv2.GaussianBlur(greyscale_frame, (21, 21), 0)
blur_frame = cv2.blur(gaussian_frame, (5, 5))
greyscale_image = blur_frame
if first_frame is None:
first_frame = greyscale_image
else:
pass
frame = imutils.resize(frame, width=500)
frame_delta = cv2.absdiff(first_frame, greyscale_image)
# edit the ** thresh ** depending on the light/dark in room,
# change the 100(anything pixel value over 100 will become 255(white)
thresh = cv2.threshold(frame_delta, 100, 255, cv2.THRESH_BINARY)[1]
dilate_image = cv2.dilate(thresh, None, iterations=2)
cnt = cv2.findContours(dilate_image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[1]
for c in cnt:
if cv2.contourArea(c) > 800:
(x, y, w, h) = cv2.boundingRect(
c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = 'Occupied'
# text that appears when there is motion in video feed
else:
pass
''' now draw text and timestamp on security feed '''
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, '{+} Room Status: %s' % text, (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime('%A %d %B %Y %I:%M:%S%p'),
(10, frame.shape[0] - 10), font, 0.35, (0, 0, 255), 1)
cv2.imshow('Security Feed', frame)
cv2.imshow('Threshold(foreground mask)', dilate_image)
cv2.imshow('Frame_delta', frame_delta)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
cv2.destroyAllWindows()
break
if __name__ == '__main__':
motion_detection()
I have tried to debug and find the problem the code is exactly what the video said to write and I have had no luck.
I have a problem with VideoCapture where the video I am streaming shows only the 1st frame. In the following code I superimpose a video on top of the bounding box yielded from object detection:
if view_img:
####video_name is a path to my video
img = cv2.VideoCapture(video_name)
ret_video, frame_video = img.read()
if not ret_video: ######so that the video can be played in a loop
img = cv2.VideoCapture(video_name)
ret_video, frame_video = img.read()
###here I look for the bounding boxes and superimpose the video
hsv = cv2.cvtColor(im0, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (0, 120, 120), (10, 255, 255))#(110, 120, 120), (130, 255, 255))#<- blue # RED: (0, 120, 120), (10, 255, 255))
thresh = cv2.dilate(mask, None, iterations=2)
contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#contours = contours[0]
contours = imutils.grab_contours(contours)
#frame_counter = 0
for contour in contours:
if cv2.contourArea(contour) < 750:
continue
(x, y, w, h) = cv2.boundingRect(contour)
height = 480
width = 640
if y + h < height and x + w < width:
logo = cv2.resize(frame_video, (w, h))###frame_video is the frame from the video which I superimpose
img2gray = cv2.cvtColor(logo, cv2.COLOR_BGR2GRAY)
_, logo_mask = cv2.threshold(img2gray, 1, 255, cv2.THRESH_BINARY)
roi = im0[y:y+h, x:x+w]
roi[np.where(logo_mask)] = 0
roi += logo
cv2.imshow(str(p), im0)# im0 is the webcam frame
cv2.waitKey(25)
What happens when I run this code, is that instead of showing the entire video on top of the webcam frame, it displays only the first frame of that video.
Superimposing video works fine in another script, modified original: source
I believe that the issue has something to do with waitKey() for the superimposed video, as it is not specified.
If I try to initialize the video with while (cap.isopened()): or while (True) then the program freezes and there is no output at all.
cv2.VideoCapture should be run once for each device source. Using while loop should exclude cv2.VideoCapture (initialize it outside the loop). The reason why it's hang on while loop because you open up the connection for the same device multipletimes without closing it.
I didn't test it. You simply do like this: Btwe
if view_img:
####video_name is a path to my video
img = cv2.VideoCapture(video_name)
while img.isOpened():
ret_video, frame_video = img.read()
if not ret_video: ######so that the video can be played in a loop
break
###here I look for the bounding boxes and superimpose the video
hsv = cv2.cvtColor(im0, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (0, 120, 120), (10, 255, 255))#(110, 120, 120), (130, 255, 255))#<- blue # RED: (0, 120, 120), (10, 255, 255))
thresh = cv2.dilate(mask, None, iterations=2)
contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#contours = contours[0]
contours = imutils.grab_contours(contours)
#frame_counter = 0
for contour in contours:
if cv2.contourArea(contour) < 750:
continue
(x, y, w, h) = cv2.boundingRect(contour)
height = 480
width = 640
if y + h < height and x + w < width:
logo = cv2.resize(frame_video, (w, h))###frame_video is the frame from the video which I superimpose
img2gray = cv2.cvtColor(logo, cv2.COLOR_BGR2GRAY)
_, logo_mask = cv2.threshold(img2gray, 1, 255, cv2.THRESH_BINARY)
roi = im0[y:y+h, x:x+w]
roi[np.where(logo_mask)] = 0
roi += logo
cv2.imshow(str(p), im0)# im0 is the webcam frame
cv2.waitKey(25)
Btw, If you are using OpenCV4.5.5. You may have to add this:
ret,contours = cv2.findContours
I'm thinking if I can take pictures while the motion detection is using my laptop webcam, so I used the code below from Pyimagesearch website. What I'm trying to do is take a picture when the motion detector detected something using webcam, while the frame is different from the previous one, then take a photo and save it to a file, I tried 'ret, frame = cap.read()' but it didn't work well, I may put it the wrong way, could somebody solve it this for me, please?
The code is here:
# import the necessary packages
from imutils.video import VideoStream
import argparse
import datetime
import imutils #pip install imutils on terminal
import time
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file") #video file is optional, if video file equals None, then opencv will use webcam
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size") #500 pixels, no need to process large raw images through webcam
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get('video', None) is None:
vs = VideoStream(src=0).start()
time.sleep(2.0)
# otherwise, we are reading from a video file
else:
vs = cv2.VideoCapture(args["video"])
# initialize the first frame in the video stream
firstFrame = None
# loop over the frames of the video
while True:
frame = vs.read()
frame = frame if args.get('video', None) is None else frame[1]
text = 'Unoccupied'
if frame is None:
break
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if firstFrame is None:
firstFrame = gray
continue
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
if cv2.contourArea(c) < args["min_area"]:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = 'Occupied'
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
cv2.imshow('Motion Detector', frame)
cv2.imshow('Thresh', thresh)
cv2.imshow('Frame Delta', frameDelta)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
vs.stop()if args.get("video", None) is None else vs.release()
cv2.destroyAllWindows()
If I understand your question correctly, you want to take images if motion is detected. An approach I would suggest, is to do not try and extract every frame but to extract one frame every let's say 1 second. So a 1-minute video will give 60 frames(images) and you won't end up with thousands of images(Webcam frame rate * seconds).
count = 0 # initialize counter (outside while loop)
On motion, take a picture every 1 sec(I would place it inside this if statement:
if cv2.contourArea(c) < args["min_area"]:
vs.set(cv2.CAP_PROP_POS_MSEC,(count*1000)) # wait 1 sec between each capture
cv2.imwrite('Motion_det'+str(count)+'.jpg',frame) # save frame as JPEG file
count+=1
The adjust bellow will save image and video:
# import the necessary packages
from imutils.video import VideoStream
import argparse
import datetime
import imutils #pip install imutils on terminal
import time
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file") #video file is optional, if video file equals None, then opencv will use webcam
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size") #500 pixels, no need to process large raw images through webcam
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get('video', None) is None:
vs = VideoStream(src=0).start()
time.sleep(2.0)
# otherwise, we are reading from a video file
else:
vs = cv2.VideoCapture(args["video"])
# initialize the first frame in the video stream
firstFrame = None
# loop over the frames of the video
sta = 0
while True:
frame = vs.read()
frame = frame if args.get('video', None) is None else frame[1]
text = 'Unoccupied'
if frame is None:
break
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if firstFrame is None:
firstFrame = gray
continue
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
record = "No"
for c in cnts:
if cv2.contourArea(c) < args["min_area"]:
record = "No"
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = 'Occupied'
record = "Yes"
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
cv2.imshow('Motion Detector', frame)
cv2.imshow('Thresh', thresh)
cv2.imshow('Frame Delta', frameDelta)
if record = "Yes":
try:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime("%Y.%m.%d_%H.%M")
if sta != st:
filename = 'video-' + st + '.mp4'
out = cv2.VideoWriter(filename, fourcc, float(fps), (1280,720))
frame_name = filename.replace('.mp4','.png').format(frame_index)
cv2.imwrite(frame_name,frame)
sta = st
frame_record = cv2.resize(frame,(1280,720),fx=0,fy=0, interpolation = cv2.INTER_CUBIC)
out.write(frame_record)
except Exception as e:
print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e).__name__, e)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
vs.stop()
vs.release()
cv2.destroyAllWindows()
I have a problem with my code. I don't know where I could put cv.resize() in order to get requirement resolution. I want to change it because the file which I upload is full HD resolution and I want to get smaller resolution. I will be glad of solution and explanation.
Below I show my code:
import cv2
import numpy as np
cap = cv2.VideoCapture('DJI_0037.MP4')
while cap.isOpened():
ret, frame = cap.read()
if ret == True:
frame_resize = cv2.resize(frame, (640, 480), interpolation=cv2.INTER_CUBIC)
else:
break
ret, frame_resize1 = cap.read(frame_resize)
ret, frame_resize2 = cap.read(frame_resize)
diff = cv2.absdiff(frame_resize1, frame_resize2)
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv2.boundingRect(contour)
if cv2.contourArea(contour) < 2000:
continue
cv2.rectangle(frame_resize1, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame_resize1, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255),3)
cv2.imshow("feed", frame_resize1)
frame_resize1 = frame_resize2
ret, frame_resize2 = cap.read()
if cv2.waitKey(40) == 27:
break
cap.release()
cv2.destroyAllWindows()
I was following this tutorial and I tried to save the video to an avi file, but every time I tried the file was corrupted. I was able to save the frames individually using cv2.imwrite(), but stitching together the individual frames was a lot of work, and would lag the entire program. Here is my code:
from imutils.video import VideoStream
import imutils
import time
import cv2
MINIMUM_AREA = 500
# For Video Recording Purposes
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('vibralert_test001.avi', fourcc, 20, (640, 480))
vs = VideoStream(src=0).start()
print("Setting up feed.")
time.sleep(2)
print("Live")
firstFrame = None
while True:
frame = vs.read()
text = "No Movement Detected"
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if firstFrame is None:
firstFrame = gray
continue
delta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(delta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
if cv2.contourArea(c) < MINIMUM_AREA:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Movement Detected"
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow("VibrAlert v0.1", frame)
out.write(frame)
key = cv2.waitKey(1) & 0xFF
if key == 27:
break
vs.stop()
out.release()
cv2.destroyAllWindows()
print('End Feed')
The size of the frame images need to be the same as the frameSize given to the VideoWriter, (640,480). The frame is being resized to have a width of 500, while the videoWriter is expecting a width of 640.
However, changing the resize to frame = imutils.resize(frame, width=640) probably won't work either. imutils.resize will return images with the same aspect ratio as the original image, so the height won't necessarily be 480. I would suggest replacing that line with the opencv.resize:
frame = cv2.resize(frame, (640,480))