problem
I want to detect the motional object in a video, but the cap seems not to read the video. I use conda jupyter and have install the ffmpeg with brew and I use macOS. Still, I am unable to capture the video.
code:
import numpy as np
import cv2
import time
cap = cv2.VideoCapture('test.avi')
time.sleep(2)
fgbg = cv2.createBackgroundSubtractorKNN(detectShadows=True)
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv2.imshow('original',frame)
cv2.imshow('fg',fgmask)
k = cv2.waitKey(30) & 0xff
for c in contours:
# 获取矩形框边界坐标
x, y, w, h = cv2.boundingRect(c)
# 计算矩形框的面积
area = cv2.contourArea(c)
if 500 < area < 3000:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow("detection", frame)
cv2.imshow("back", dilated)
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
error
Couldn't read video stream from file "test.avi"
Related
I'm currently practicing opencv and tried the face recognition code and it's working fine. I'd like to get notified if I'm away from the screen for more than 2 mins. I'm trying to play an audio file when I'm away and stop it when I'm back.
import cv2 as cv
import sys
cascPath = sys.argv[1]
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv.imshow('Video', frame)
# check if the tuple faces is empty
if len(faces) == 0:
start_time = time.time()
while len(faces) == 0:
print('person is away for ',time.time()-start_time)
d_time = time.time()
if d_time-start_time > 120:
pygame.mixer.init()
sound = pygame.mixer.Sound("Recording.mp3")
sound.play(5)
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
I am trying to run my python programme in the Linux server but there have some problem. I am using openCV to build a fall-detection and it can run in my local computer, but when I put the python into the linux server it can't run it... Here is my code:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs, sys , cgi
import cv2
import time
import os
sys.stdout = codecs.getwriter('utf8')(sys.stdout.buffer)
# Start html
print('Content-type: text/html\r\n')
# Video clip
cap = cv2.VideoCapture('Testvideo.mkv')
# Webcam camera
#cap = cv2.VideoCapture(0)
print("Start Fall detection")
#time.sleep(2)
fgbg = cv2.createBackgroundSubtractorMOG2()
f = 0
j = 0
while(True):
ret, frame = cap.read()
# Convert each frame to gray scale and subtract the background
try:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
# Find contours
contours, _ = cv2.findContours(
fgmask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if contours:
# List to hold all areas
areas = []
for contour in contours:
ar = cv2.contourArea(contour)
areas.append(ar)
max_area = max(areas, default=0)
max_area_index = areas.index(max_area)
cnt = contours[max_area_index]
M = cv2.moments(cnt)
x, y, w, h = cv2.boundingRect(cnt)
cv2.drawContours(fgmask, [cnt], 0, (255, 255, 255), 3, maxLevel=0)
if h < w:
j += 1
if j > 25:
print(f"FALL !! ==> {f+1}")
f += 1
if f ==5:
print("Danger!!!")
#os.system("python send.py")
#os.system("python sms.py")
#cv2.putText(fgmask, 'FALL Detect', (x, y), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255,255,255), 2)
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
if h > w:
j = 0
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('video', frame)
if cv2.waitKey(33) == 27:
break
except Exception as e:
break
cv2.destroyAllWindows()
And it only will display
Start Fall detection
in the server, Please give me some help
The error I get while debugging on IDLE python (3.7) is:
cv2.error: OpenCV(3.4.3) (some directory files)
- error : (-215:Assertion failed) !_src.empty() in function 'cvtColor'
The program itself is taken from a website:
import cv2
import numpy as np
face_cascade = cv2.CascadeClassifier('C:\Program Files\Python38\Lib\site-packages\cv2\data\haarcascade_frontalface_alt2.xml')
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)
for (x, y, w, h) in faces:
print(x,y,w,h)
roi_gray = gray[y:y + h, x:x + w] # (ycord_start, ycord_end)
roi_color = frame[y:y + h, x:x + w]
img_item = 'my-image.png'
cv2.imwrite(img_item,roi_gray)
cv2.imshow('frame',frame)
color = (0,0,255)
stroke = 2
width = x + w
height = y + h
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
So I would like to know what is causing this error.
The problem is in
cap = cv2.VideoCapture(0)
In fact 0 is the id of the opened video capturing device (i.e. a camera index). I imagine that the program can't detect any camera and thus it throws this error. You can check if video capturing has been initialized already by printing cap.isOpened() befor the loop, if it is false, then you have a problem in initializing the video capturing.
Regards
I was trying to capture am image on the webcam and extract the text information on it using the language of python.
Here is the code:
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pytesseract
from PIL import Image
from pytesseract import image_to_string
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
def main():
# Use the attached camera to capture images
# 0 stands for the first one
cap = cv2.VideoCapture(0)
if cap.isOpened():
ret, frame = cap.read()
print(ret)
print(frame)
else:
ret = False
img1 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# img = Image.open('image.jpg')
text = pytesseract.image_to_string(img1)
print(text)
# plt.imshow(img1)
# plt.title('Color Image RGB')
# plt.xticks([])
# plt.yticks([])
# plt.show()
cap.release()
if __name__ == "__main__":
main()
The code didn't work. I watched a couple of videos on Youtube, and I saw that people typically use Image.open("image.jpg") to open an image that is located on the computer. But I need to capture the image from the webcam and extract the information on it. So that method won't work in my situation. Is there a way to combine these two methods? Like capture the image using cv2 and extract the information using pytesseract.image_to_string()?
Can you please try by replacing the below code of line,
text = pytesseract.image_to_string(img1)
With the code,
text = pytesseract.image_to_string(Image.fromarray(img1))
Or have a working code snippet here, (Copied your code and updated a little),
def main():
# Use the attached camera to capture images
# 0 stands for the first one
cap = cv2.VideoCapture(0)
while cap.isOpened():
ret, frame = cap.read()
img1 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
text = pytesseract.image_to_string(Image.fromarray(img1))
cv2.imshow('frame', img1)
if cv2.waitKey(0) & 0xFF == ord('q'):
return None
print("Extracted Text: ", text)
cap.release()
Hope This will help you.
I used while look because with if condtion I did not get result, trying to figure it out.
import cv2
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'
framewidth = 640
frameheight = 480
cap = cv2.VideoCapture(0)
cap.set(3, framewidth)
cap.set(4, frameheight)
while True:
success, img = cap.read( )
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# print(pytesseract.image_to_string(img))
## detecting characters
# hImg,wImg,_= img.shape
# boxes=pytesseract.image_to_boxes(img)
# for b in boxes.splitlines():
# # print(b)
# b=b.split(' ')
# print(b)
# x,y,w,h = int(b[1]),int(b[2]),int(b[3]),int(b[4])
# cv2.rectangle(img,(x,hImg-y),(w,hImg-h),(0,0,255),3)
# cv2.putText(img,b[0],(x,hImg-y+25),cv2.FONT_HERSHEY_COMPLEX,1,(50,100,255),2)
# ## detecting words
hImg, wImg, _ = img.shape
boxes = pytesseract.image_to_data(img)
for x, b in enumerate(boxes.splitlines( )):
if x != 0:
b = b.split( )
print(b)
if len(b)==12:
x, y, w, h = int(b[6]), int(b[7]), int(b[8]), int(b[9])
cv2.rectangle(img, (x, y), (w+x, h+y), (0, 0, 255), 3)
cv2.putText(img, b[11], (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (50, 100, 255), 2)
## detecting digits
# hImg, wImg, _ = img.shape
# cong= r'--oem 3 --psm 6 outputbase digits'
# boxes = pytesseract.image_to_data(img,config=cong)
#
# for x, b in enumerate(boxes.splitlines( )):
#
# if x != 0:
# b = b.split( )
# print(b)
# if len(b) == 12:
# x, y, w, h = int(b[6]), int(b[7]), int(b[8]), int(b[9])
# cv2.rectangle(img, (x, y), (w + x, h + y), (0, 0, 255), 3)
# cv2.putText(img, b[11], (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (50, 100, 255), 2)
# reading text don't delete it
# print(pytesseract.image_to_boxes(img))
cv2.imshow("video", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#I don't no how to give answer but i have my code and it's working fine enjoy
I have image with my object
and my object
when i execute my code
import cv2
import numpy as np
cap = cv2.VideoCapture("c.mp4")
while(True):
ret, img_rgb = cap.read()
img_rgb = img_rgb[400:1200,10:1000]
img_rgb = cv2.imread('ccc.jpg')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('job/ff2.jpg',0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
threshold = 0.8
loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
print("yes")
cv2.rectangle(img_gray, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
cv2.imshow('res.png',img_gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.waitKey(0)
cap.release()
cv2.destroyAllWindows()
My object is detected
but when i use video its not find my object in video https://youtu.be/O1IB0dnDrWw
Any help please