how to stop printing repeated values in this code - python

import numpy as np
import cv2
from pyzbar import pyzbar
def read_barcodes(frame):
processed =[]
barcodes = pyzbar.decode(frame)
for barcode in barcodes:
x, y , w, h = barcode.rect
barcode_info = barcode.data.decode('utf-8')
cv2.rectangle(frame, (x, y),(x+w, y+h), (0, 255, 0), 2)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, barcode_info, (x + 6, y - 6), font, 2.0, (255, 255, 255), 1)
#my_list = [barcode_info]
#myFinalList = np.unique(my_list).tolist()
#print(myFinalList)
#print(barcode_info)
with open("barcode_result.txt", mode ='w') as file:
file.write("Recognized Barcode:" + barcode_info)
return frame
def main():
camera = cv2.VideoCapture(0)
ret, frame = camera.read()
while ret:
ret, frame = camera.read()
frame = read_barcodes(frame)
cv2.imshow('Barcode/QR code reader', frame)
if cv2.waitKey(1) & 0xFF == 27:
break
camera.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
how to break the loop from printing the repeating values

Related

cv2.findcontours returning none in Motion Detector App

I followed a video online about motion detection using openCV however I came across the problem that the findContours function is not returning a value. Any help is appreceated.
Here is the code:
import cv2
import time
import datetime
import imutils
def motion_detection():
video_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)
time.sleep(2)
first_frame = None
while True:
frame = video_capture.read()[1]
text = 'Unoccupied'
greyscale_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gaussian_frame = cv2.GaussianBlur(greyscale_frame, (21, 21), 0)
blur_frame = cv2.blur(gaussian_frame, (5, 5))
greyscale_image = blur_frame
if first_frame is None:
first_frame = greyscale_image
else:
pass
frame = imutils.resize(frame, width=500)
frame_delta = cv2.absdiff(first_frame, greyscale_image)
# edit the ** thresh ** depending on the light/dark in room,
# change the 100(anything pixel value over 100 will become 255(white)
thresh = cv2.threshold(frame_delta, 100, 255, cv2.THRESH_BINARY)[1]
dilate_image = cv2.dilate(thresh, None, iterations=2)
cnt = cv2.findContours(dilate_image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[1]
for c in cnt:
if cv2.contourArea(c) > 800:
(x, y, w, h) = cv2.boundingRect(
c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = 'Occupied'
# text that appears when there is motion in video feed
else:
pass
''' now draw text and timestamp on security feed '''
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, '{+} Room Status: %s' % text, (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime('%A %d %B %Y %I:%M:%S%p'),
(10, frame.shape[0] - 10), font, 0.35, (0, 0, 255), 1)
cv2.imshow('Security Feed', frame)
cv2.imshow('Threshold(foreground mask)', dilate_image)
cv2.imshow('Frame_delta', frame_delta)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
cv2.destroyAllWindows()
break
if __name__ == '__main__':
motion_detection()
I have tried to debug and find the problem the code is exactly what the video said to write and I have had no luck.

DNN OpenCV Python using RSTP always crash after few minutes

Description:
I want to create a people counter using DNN. The model I'm using is MobileNetSSD. The camera I use is IPCam from Hikvision. Python communicates with IPCam using the RSTP protocol.
The program that I made is good and there are no bugs, when running the sample video the program does its job well. But when I replaced it with IPcam there was an unknown error.
Error:
Sometimes the error is:
[h264 # 000001949f7adfc0] error while decoding MB 13 4, bytestream -6
[h264 # 000001949f825ac0] left block unavailable for requested intra4x4 mode -1
[h264 # 000001949f825ac0] error while decoding MB 0 17, bytestream 762
Sometimes the error does not appear and the program is killed.
Update Error
After revising the code, I caught the error. The error found is
[h264 # 0000019289b3fa80] error while decoding MB 4 5, bytestream -25
Now I don't know what to do, because the error is not in Google.
Source Code:
Old Code
This is my very earliest code before getting suggestions from the comments field.
import time
import cv2
import numpy as np
import math
import threading
print("Load MobileNeteSSD model")
prototxt = "MobileNetSSD_deploy.prototxt"
model = "MobileNetSSD_deploy.caffemodel"
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
net = cv2.dnn.readNetFromCaffe(prototxt, model)
pos_line = 0
offset = 50
car = 0
detected = False
check = 0
prev_frame_time = 0
def detect():
global check, car, detected
check = 0
if(detected == False):
car += 1
detected = True
def center_object(x, y, w, h):
cx = x + int(w / 2)
cy = y + int(h / 2)
return cx, cy
def process_frame_MobileNetSSD(next_frame):
global car, check, detected
rgb = cv2.cvtColor(next_frame, cv2.COLOR_BGR2RGB)
(H, W) = next_frame.shape[:2]
blob = cv2.dnn.blobFromImage(next_frame, size=(300, 300), ddepth=cv2.CV_8U)
net.setInput(blob, scalefactor=1.0/127.5, mean=[127.5, 127.5, 127.5])
detections = net.forward()
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.5:
idx = int(detections[0, 0, i, 1])
if CLASSES[idx] != "person":
continue
label = CLASSES[idx]
box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = box.astype("int")
center_ob = center_object(startX, startY, endX-startX, endY-startY)
cv2.circle(next_frame, center_ob, 4, (0, 0, 255), -1)
if center_ob[0] < (pos_line+offset) and center_ob[0] > (pos_line-offset):
# car+=1
detect()
else:
check += 1
if(check >= 5):
detected = False
cv2.putText(next_frame, label+' '+str(round(confidence, 2)),
(startX, startY-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.rectangle(next_frame, (startX, startY),
(endX, endY), (0, 255, 0), 3)
return next_frame
def PersonDetection_UsingMobileNetSSD():
cap = cv2.VideoCapture()
cap.open("rtsp://admin:Admin12345#192.168.100.20:554/Streaming/channels/2/")
global car,pos_line,prev_frame_time
frame_count = 0
while True:
try:
time.sleep(0.1)
new_frame_time = time.time()
fps = int(1/(new_frame_time-prev_frame_time))
prev_frame_time = new_frame_time
ret, next_frame = cap.read()
w_video = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
h_video = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
pos_line = int(h_video/2)-50
if ret == False: break
frame_count += 1
cv2.line(next_frame, (int(h_video/2), 0),
(int(h_video/2), int(h_video)), (255, 127, 0), 3)
next_frame = process_frame_MobileNetSSD(next_frame)
cv2.rectangle(next_frame, (248,22), (342,8), (0,0,0), -1)
cv2.putText(next_frame, "Counter : "+str(car), (250, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.putText(next_frame, "FPS : "+str(fps), (0, int(h_video)-10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.imshow("Video Original", next_frame)
# print(car)
except Exception as e:
print(str(e))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print("/MobileNetSSD Person Detector")
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
t1 = threading.Thread(PersonDetection_UsingMobileNetSSD())
t1.start()
New Code
I have revised my code and the program still stops taking frames. I just revised the PersonDetection_UsingMobileNetSSD() function. I've also removed the multithreading I was using. The code has been running for about 30 minutes but after a broken frame, the code will never re-execute the program block if ret == True.
def PersonDetection_UsingMobileNetSSD():
cap = cv2.VideoCapture()
cap.open("rtsp://admin:Admin12345#192.168.100.20:554/Streaming/channels/2/")
global car,pos_line,prev_frame_time
frame_count = 0
while True:
try:
if cap.isOpened():
ret, next_frame = cap.read()
if ret:
new_frame_time = time.time()
fps = int(1/(new_frame_time-prev_frame_time))
prev_frame_time = new_frame_time
w_video = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
h_video = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
pos_line = int(h_video/2)-50
# next_frame = cv2.resize(next_frame,(720,480),fx=0,fy=0, interpolation = cv2.INTER_CUBIC)
if ret == False: break
frame_count += 1
cv2.line(next_frame, (int(h_video/2), 0),
(int(h_video/2), int(h_video)), (255, 127, 0), 3)
next_frame = process_frame_MobileNetSSD(next_frame)
cv2.rectangle(next_frame, (248,22), (342,8), (0,0,0), -1)
cv2.putText(next_frame, "Counter : "+str(car), (250, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.putText(next_frame, "FPS : "+str(fps), (0, int(h_video)-10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.imshow("Video Original", next_frame)
# print(car)
else:
print("Crashed Frame")
else:
print("Cap is not open")
except Exception as e:
print(str(e))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print("/MobileNetSSD Person Detector")
cap.release()
cv2.destroyAllWindows()
Requirement:
Hardware : Intel i5-1035G1, RAM 8 GB, NVIDIA GeForce MX330
Software : Python 3.6.2 , OpenCV 4.5.1, Numpy 1.16.0
Question:
What should i do for fixing this error?
What causes this to happen?
Best Regards,
Thanks
The main problem here is that RSTP always has some corrupted frames in it. The solution is to run video capture on thread 1 and video processing on thread 2.
As an example:
import cv2
import threading
import queue
q=queue.Queue()
def this_receive(q):
cap = cv2.VideoCapture("rtsp://admin:Admin12345#192.168.10.20:554/Streaming/channels/2/")
cap.set(cv2.CAP_PROP_FPS, 5)
ret, next_frame = cap.read()
q.put(next_frame)
while ret:
ret, next_frame = cap.read()
w_video = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
h_video = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
q.put(next_frame)
def main_program(q):
while True:
try:
if q.empty() != True:
next_frame=q.get()
except Exception as e:
print(str(e))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == "__main__":
print("Main Program")
p2 = threading.Thread(target=this_receive,args=((q),))
p2.start()
p1 = threading.Thread(target=main_program,args=((q),))
p1.start()
This example will work according to the case you are experiencing. Damage to the frame will not affect the quality of data processing. It's just that this method can cause delays in processing. Time on video and real time have a delay of up to 10 minutes. Want to know what kind of delay? Just try it!

Python CV2 video writer doesn't save video

I have a simple python code using OpenCV and Keras that performs some detections on frames (follow-up from my previous question here). But when I want to record and save the frames as a video using video_writer, the generated video is empty.
What is wrong in the video_writer?
#........some code
# start the webcam feed
cap = cv2.VideoCapture(1)
canvasImageOriginal = cv2.imread("fg2.png")
canvasImage = cv2.imread("fg2.png")
canvasHappy = cv2.imread("fg2happy.png")
canvasSad = cv2.imread("fg2sad.png")
x0, x1 = 330, 1290
y0, y1 = 155, 700
#=========
w=960#int(cap.get(cv2.CV_CAP_PROP_FRAME_WIDTH ))
h=540#int(cap.get(cv2.CV_CAP_PROP_FRAME_HEIGHT ))
# video recorder
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video_writer = cv2.VideoWriter('output.avi', fourcc, 25.0, (w, h))
#=========
prediction_history = []
LOOKBACK = 5 # how far you want to look back
counter = 0
while True:
# Find haar cascade to draw bounding box around face
ret, frame = cap.read()
frame=cv2.flip(frame,3)
if not ret:
break
facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = facecasc.detectMultiScale(gray,scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
prediction = model.predict(cropped_img)
maxindex = int(np.argmax(prediction))
text = emotion_dict[maxindex]
print(prediction[0][3])
prediction_history.append(maxindex)
most_common_index = max(set(prediction_history[-LOOKBACK:][::-1]), key = prediction_history.count)
text = emotion_dict[most_common_index]
#if ("Sad" in text) or ("Angry" in text) or ("Disgusted" in text):
# text = "Sad"
if ("Happy" in text) or ("Sad" in text) :
cv2.putText(frame, text, (x+20, y-60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
if ("Happy" in text):
counter= counter+1
if counter == 10:
#print("Happy!")
canvasImage = canvasHappy
else:
counter = 0
canvasImage = canvasImageOriginal
dim = (800,480)
frame_shrunk = cv2.resize(frame, (x1 - x0, y1 - y0))
canvasImage[y0:y1, x0:x1] = frame_shrunk
#cv2.imshow('Video', cv2.resize(frame,dim,interpolation = cv2.INTER_CUBIC))
cv2.imshow('Demo', canvasImage)
video_writer.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
video_writer.release()
cv2.destroyAllWindows()
As it is mentioned above, please check print(frame.shape).
When I did it, I saw (300,450,3), and I changed the resolution of videowriter as (450,300) and it worked for me. As a result, I can say that frame.shape=(y, x, maybe color) but the resolution of videowriter=(x,y).

Saving Metadata after object detection with opencv

I am fairly new to this topic but would like to safe the counts of "objects" found within my video, simply the raw counts of objects in each frame into a text file. I have the following code that runs well, but 1) it does not safe the objects as images to my folder, which I dont understand, and 2) it would not / or where are the objects saved as metadata. Any help in this regard?
Here is the code:
import cv2
#############################################
frameWidth = 640
frameHeight = 480
nPlateCascade = cv2.CascadeClassifier("Resources/haarcascades/haarcascade_fullbody.xml")
minArea = 200
color = (255, 0, 255)
###############################################
cap = cv2.VideoCapture("Resources/nyc.mp4")
cap.set(3, frameWidth)
cap.set(4, frameHeight)
cap.set(10, 150)
count = 0
while True:
success, img = cap.read()
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
numberPlates = nPlateCascade.detectMultiScale(imgGray, 1.1, 10)
for (x, y, w, h) in numberPlates:
area = w * h
if area > minArea:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 255), 2)
cv2.putText(img, "Object", (x, y - 5),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, color, 2)
imgRoi = img[y:y + h, x:x + w]
cv2.imshow("ROI", imgRoi)
cv2.imshow("Result", img)
if cv2.waitKey(1) and 0xFF == ord('s'):
cv2.imwrite("Resources/Output/" + str(count) + ".jpg", imgRoi)
cv2.rectangle(img, (0, 200), (640, 300), (0, 255, 0), cv2.FILLED)
cv2.putText(img, "Scan Saved", (150, 265), cv2.FONT_HERSHEY_DUPLEX,
2, (0, 0, 255), 2)
cv2.imshow("Result", img)
cv2.waitKey(500)
count += 1
if cv2.waitKey(1) and 0xFF == ord('s')
that's the issue right there.
and is a logical/boolean operator and has lower precedence than ==. what's written there means
cv2.waitKey(1) and (0xFF == ord('s'))
I think you don't mean that.
You probably wanted the & operator which is a bitwise operator (operates on integers) and has higher priority than ==.
cv2.waitKey(1) & 0xFF == ord('s') # is equivalent to
(cv2.waitKey(1) & 0xFF) == ord('s')

add buttons pause stop rec with opencv and gtk

Hello everyone I need your help for a problem that comes up I have the following code where my webcam is activated and starts recording without any problem.
But I need to create three bots of Record, Pause and Stop, with gtk, but I do not know how I can do it with the code
import cv2
import numpy as np
from datetime import datetime
import gtk
def show_webcam(mirror=False):
cam = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
frame_width = int(cam.get(3))
frame_height = int(cam.get(4))
filename = datetime.now().strftime("%Y-%m-%d_%H.%M.%S") + ".avi"
out = cv2.VideoWriter(filename,fourcc, 30, (frame_width,frame_height))
if event == cv2.EVENT_LBUTTONDOWN:
while True:
ret_val, img = cam.read()
if mirror:
img = cv2.flip(img, 1)
cv2.imshow('Grabacion de Audiencias', img)
out.write(img)
if cv2.waitKey(1) == 27:
break # esc to quit
elif event == cv2.EVENT_RBUTTONDOWN:
pass
cam.release()
out.release()
cv2.destroyAllWindows()
I'm waiting for your help because it's for a project and nothing has been achieved
I managed to use the keyboard to pause the video and continue the recording, in addition to informing the user with visual cues, but I have not managed to create these actions by means of bonuses, using gtk so that the end user can record without problem. help
import cv2
import numpy as np
from datetime import datetime
import gtk
import keyboard
flagrecord=True
def show_webcam(flagrecord):
cam = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
frame_width = int(cam.get(3))
frame_height = int(cam.get(4))
FONT = cv2.FONT_HERSHEY_PLAIN
filename = datetime.now().strftime("%Y-%m-%d_%H.%M.%S") + ".avi"
out = cv2.VideoWriter(filename,fourcc, 30, (frame_width,frame_height))
while True:
ret_val, img = cam.read()
title = datetime.now().strftime("%Y-%m-%d*%H:%M:%S")
if flagrecord:
img = cv2.flip(img,1)
cv2.putText(img, "REC", (40,40), FONT, 3 , (0,0,255), 2)
cv2.circle(img, (20,20), 10 , (0,0,255), -1)
cv2.rectangle(img, (30,430),(600,480),(0,0,0), -1)
cv2.putText(img, title, (40,470), FONT, 3 , (255,255,255), 2)
cv2.imshow('Grabacion de Audiencias', img)
out.write(img)
else:
img = cv2.flip(img,1)
cv2.putText(img, "PAUSE", (40,40), FONT, 3 , (255,0,0), 2)
cv2.circle(img, (20,20), 10 , (255,0,0), -1)
cv2.rectangle(img, (50,430),(570,480),(0,0,0), -1)
cv2.putText(img, "Audiencias En Pausa", (60,470), FONT, 3 , (255,255,255), 2)
cv2.imshow('Grabacion de Audiencias', img)
if cv2.waitKey(1) == 27:
break
if keyboard.is_pressed('p'):
flagrecord=False
if keyboard.is_pressed('c'):
flagrecord=True
if keyboard.is_pressed('q'):
break
cam.release()
out.release()
cv2.destroyAllWindows()
def main():
show_webcam(mirror=True)
if __name__ == '__main__':
main()
Use cv2.waitKey instead Keyboard lib. You can do this changes:
q = cv2.waitKey(1)
if q == 27:
break
if q == ord('p'):
flagrecord=False
if q == ord('c'):
flagrecord=True
if q == ord('q'):
break

Categories

Resources