Python OpenCV crashes when trying to read a video - python

When I am calling the .read() method then my program crashes (with .avi format). My program runs with other formats (.ogv, .mkv) but in that case type(last_frame) == None. Is there any solution to fix these errors?
import cv2
import numpy as np
in_file = r'C:\Users\Johnny\Desktop\zola\animation.avi'
print(in_file)
cap = cv2.VideoCapture(in_file)
print(cap)
ret, last_frame = cap.read() # the program works without this line
try:
if not cap.isOpened() : raise("damn!")
except TypeError as err:
print(err)
Output:
C:\Users\Johnny\Desktop\zola\animation.ogv
<VideoCapture 02843110>
exceptions must be old-style classes or derived from BaseException, not str

Related

wx.Image constructor shows popup instead of raising an exception

When I run this Python code:
import io, wx
with open("imgs/ad.png", "rb") as fid:
img_stream = io.BytesIO(fid.read())
try:
img = wx.Image(img_stream, type=wx.BITMAP_TYPE_PNM)
except:
pass
it results in a message box saying "Error: This is not a PNM file." instead of raising an exception. Is it possible to have wx raise an exception instead?
Test the result within the try ... except ... statement, raising an error if required, manually.
The wx.Image is handling its own exception to be able to report the error in a wx.Message
import io, wx
app = wx.App()
with open("a.png", "rb") as fid:
img_stream = io.BytesIO(fid.read())
try:
img = wx.Image(img_stream, type=wx.BITMAP_TYPE_PNM)
if img.IsOk():
conv_result = True
else:
raise Exception("Image conversion error!")
except Exception as e:
conv_result = False
print(conv_result)
As #Rolf-of-Saxony pointed out, the message box is due to wxPython logging. Two basic options for suppressing it are temporary creation of the wx.LogNull() object
import io
import wx
with open("imgs/Olympic_flag.svg", "rb") as fid:
img_stream = io.BytesIO(fid.read())
noLog = wx.LogNull()
img = wx.Image(img_stream)
if img.IsOk():
pass
else:
pass
del noLog
or calling wx.Log.EnableLogging()
import io
import wx
wx.Log.EnableLogging(False)
img = wx.Image(img_stream)
if img.IsOk():
pass
else:
pass
wx.Log.EnableLogging(True)

Correctly converting base64 bytes into string and displaying in cv2.imshow

I am struggling on finding the solution for this:
I'm trying to create an image stream system where i can get all the frames and pass them through a neural network, but somehow I've not managed to get properly base64 image strings from my functions below.
The provided code works perfectly if i just call the decoded image from streaming instead of passing it through my functions where i convert to base64 and read them in memory and make cv2 show them properly.
My server code functions responsible to convert and decode base64 are described below:
Convert image object from stream into base64 BYTES and convert to one STRING (this is working as intended)
def convertImgBase64(image):
try:
imgString = base64.b64encode(image).decode('utf-8')
print('convertida com sucesso')
return imgString
except os.error as err :
print(f"Erro:'{err}'")
Base64 decoder that should convert to a readable cv2 compatible frame (Here is where the error begins):
def readb64(base64_string):
storage = '/home/caio/Desktop/img/'
try:
sbuf = BytesIO()
sbuf.write(base64.b64decode(str(base64_string)))
pimg = im.open(sbuf)
out = open('arq.jpeg', 'wb')
out.write(sbuf.read())
out.close()
print('leu string b64')
return cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)
except os.error as err :
print(f"Erro:'{err}'")
This is the current server i am building, but before proceeding i need to accomplish the frame capture correctly.
from io import BytesIO, StringIO
import numpy as np
import cv2
from imutils.video import FPS
import imagezmq
import base64
import darknet
import os
from PIL import Image as im
from numpy import asarray
from time import sleep
#imagezmq protocol receiver from client
image_hub = imagezmq.ImageHub()
def convertImgBase64(image):
try:
imgString = base64.b64encode(image).decode('utf-8')
return imgString
except os.error as err :
print(f"Error:'{err}'")
def readb64(base64_string):
try:
sbuf = BytesIO()
sbuf.write(base64.b64decode(str(base64_string)))
pimg = im.open(sbuf)
return cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)
except os.error as err :
print(f"Error:'{err}'")
def capture_img():
while True:
camera, jpg_buffer = image_hub.recv_jpg()
buffer = np.frombuffer(jpg_buffer, dtype='uint8')
imagedecoder = cv2.imdecode(buffer, cv2.IMREAD_COLOR)
img = im.fromarray(imagedecoder)
try:
string = convertImgBase64(imagedecoder)
cvimg = readb64(string)
#cv2.imshow(camera, cvimg) this is the line where its not working!
except os.error as err :
print(f"Error:'{err}'")
cv2.imshow(camera, imagedecoder)
cv2.waitKey(1) #cv2 wont work without this
image_hub.send_reply(b'OK') #imageZMQ needs acknowledge that its ok
Client code (raspberry pi code) is given below:
import sys
import socket
import time
import cv2
from imutils.video import VideoStream
import imagezmq
import argparse
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--server-ip", required=True,
help="ip address of the server to which the client will connect")
args = vars(ap.parse_args())
# initialize the ImageSender object with the socket address of the
# server
sender = imagezmq.ImageSender(connect_to="tcp://{}:5555".format(
args["server_ip"]))
# use either of the formats below to specifiy address of display computer
# sender = imagezmq.ImageSender(connect_to='tcp://192.168.1.190:5555')
rpi_name = socket.gethostname() # send RPi hostname with each image
vs = VideoStream(usePiCamera=True, resolution=(800, 600)).start()
time.sleep(2.0) # allow camera sensor to warm up
jpeg_quality = 95 # 0 to 100, higher is better quality, 95 is cv2 default
while True: # send images as stream until Ctrl-C
image = vs.read()
ret_code, jpg_buffer = cv2.imencode(
".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality])
sender.send_jpg(rpi_name, jpg_buffer)
My error output now is like:
I have been trying solution from here and here
If you would know another better way to pass an Image Object that i can use to process inside yolo/darknet neural network it could be awesome!!
Thanks!
The answers provided by #Christoph Rackwitz are correct. The design of ImageZMQ is to send and receive OpenCV images WITHOUT any base64 encoding. The ImageSender class sends OpenCV images. The ImageHub class receives OpenCV images. Optionally, ImageZMQ can send a jpg buffer (as your Raspberry Pi client code is doing).
Your Raspberry Pi client code is based on the ImageZMQ "send jpg" example.
Your server code should therefore use the matching ImageZMQ "receive jpg" example.
The essence of the ImageZMQ "receive jpg" example code is:
import numpy as np
import cv2
import imagezmq
image_hub = imagezmq.ImageHub()
while True: # show streamed images until Ctrl-C
rpi_name, jpg_buffer = image_hub.recv_jpg()
image = cv2.imdecode(np.frombuffer(jpg_buffer, dtype='uint8'), -1)
# see opencv docs for info on -1 parameter
cv2.imshow(rpi_name, image) # 1 window for each RPi
cv2.waitKey(1)
image_hub.send_reply(b'OK')
No base64 decoding required. The variable image already contains an OpenCV image. (FYI, I am the author of ImageZMQ)

How to pass video stream from one Python script to another?

In my previous post, we found a way to pass an image file from one Python script to another:
pass video data from one python script to another
I am now trying to pass a video (successive images):
write.py
import sys
import numpy as np
import cv2
from PIL import Image
import io
import time
while True:
img = cv2.imread('cat.jpg')
bimg = cv2.imencode('.jpg',img)[1]
sys.stdout.buffer.write(bimg)
sys.stdout.flush()
time.sleep(1)
read.py:
import sys
from PIL import Image
import io
import cv2
import numpy as np
from io import BytesIO
while True:
data = sys.stdin.buffer.read()
img_np = cv2.imdecode(np.frombuffer(BytesIO(data).read(), np.uint8), cv2.IMREAD_UNCHANGED)
cv2.imshow('image', img_np)
cv2.waitKey(0)
If I output the write.py data to terminal, it prints. If I manually hand data to read.py that gets read. But put them together (python3 write.py | python3 read.py) and it just hangs. write.py just writes once, and read.py never seems to get it.
My guess is that the read code is waiting for the write code to "end" before it wraps up the data package and calls it an image. Though if that were the case, I would think it that doing a flush would fix it.
I think I figured it out. In read.py, sys.stdin.buffer.read() reads and waits until the stdin pipe is closed but write.py never actually closes its stdout because of the while True loop. This proof of concept simplified example works:
write.py
import sys
import time
sys.stdout.buffer.write(b"Hello world")
sys.stdout.buffer.flush()
# Note if we comment out the code bellow it works again
while True:
# Keep this alive but don't have `while True:pass`
# because my computer might crash :D
time.sleep(10)
and read.py
import sys
with open("output.txt", "w") as file:
file.write(sys.stdin.read())
This will also hang and will never actually write anything to "output.txt". If we remove the while True loop from write.py the code will no longer hang and "Hello World" will be written to "output.py" because when write.py is finished writing it will close its process and that will close the pipe. To fix this issue I recommend changing read.py to something like this:
import sys
while True:
with open("output.txt", "a") as file:
file.write(sys.stdin.read(1))
Solution:
write.py
import sys
import time
MAX_FILE_SIZE = 16 # bytes
msg = b"Hello world"
# Tell `reader.py` that it needs to read x number of bytes.
length = len(msg)
# We also need to tell `read.py` how many bytes it needs to read.
# This means that we have reached the same problem as before.
# To fix that issue we are always going to send the number of bytes but
# We are going to pad it with `0`s at the start.
# https://stackoverflow.com/a/339013/11106801
length = str(length).zfill(MAX_FILE_SIZE)
sys.stdout.buffer.write(length.encode())
sys.stdout.buffer.write(msg)
sys.stdout.buffer.flush()
# We also need to tell `read.py` that it was the last file that we send
# Sending `1` means that the file has ended
sys.stdout.buffer.write(b"1")
sys.stdout.buffer.flush()
# Note if we comment out the code bellow it works again
while True:
# Keep this alive but don't have `while True:pass`
# because my computer might crash :D
time.sleep(10)
and read.py
import sys
import time
MAX_FILE_SIZE = 16 # bytes
while True:
time.sleep(1) # Make sure `write.py` has sent the data
# Read `MAX_FILE_SIZE` number of bytes and convert it to an int
# So that we know the size of the file comming in
length = int(sys.stdin.buffer.read(MAX_FILE_SIZE))
time.sleep(1) # Make sure `write.py` has sent the data
# Here you can switch to a different file every time `writer.py`
# Sends a new file
with open("output.txt", "wb") as file:
file.write(sys.stdin.buffer.read(length))
file_ended = sys.stdin.buffer.read(1)
if file_ended == b"1":
# File has ended
break
else:
# We are going to start reading again for the next file:
pass
Edit:
The solution works like this:
Send the size of the file
Send the actual file data
Send a byte that tell read.py if it should be expecting another file or not
For part 1, we just encode the length of the file as a string that is padded with 0s at the front. Note: Make sure that the MAX_FILE_SIZE is larger than the size of the largest file (large numbers will slightly decrease the performance). For part 3, if we send a "1" it will mean that there are no more files to be sent. Otherwise reader.py will wait and accept the next file. So write.py will become:
from math import log
import time
import sys
import cv2
MAX_FILE_SIZE = 62914560 # bytes
MAX_FILE_SIZE = int(log(MAX_FILE_SIZE, 2)+1)
def write_file(buffer, data, last_file=False):
# Tell `reader.py` that it needs to read x number of bytes.
length = len(data)
# We also need to tell `read.py` how many bytes it needs to read.
# This means that we have reached the same problem as before.
# To fix that issue we are always going to send the number of bytes but
# We are going to pad it with `0`s at the start.
# https://stackoverflow.com/a/339013/11106801
length = str(length).zfill(MAX_FILE_SIZE)
with open("output.txt", "w") as file:
file.write(length)
buffer.write(length.encode())
# Write the actual data
buffer.write(data)
# We also need to tell `read.py` that it was the last file that we send
# Sending `1` means that the file has ended
buffer.write(str(int(last_file)).encode())
buffer.flush()
while True:
img = cv2.imread("img.jpg")
bimg = cv2.imencode(".jpg", img)[1]
# Call write_data
write_file(sys.stdout.buffer, bimg, last_file=False)
# time.sleep(1) # Don't need this
and read.py will become:
from io import BytesIO
from math import log
import numpy as np
import time
import cv2
import sys
MAX_FILE_SIZE = 62914560 # bytes
MAX_FILE_SIZE = int(log(MAX_FILE_SIZE, 2)+1)
def read(buffer, number_of_bytes):
output = b""
while len(output) < number_of_bytes:
output += buffer.read(number_of_bytes - len(output))
assert len(output) == number_of_bytes, "An error occured."
return output
def read_file(buffer):
# Read `MAX_FILE_SIZE` number of bytes and convert it to an int
# So that we know the size of the file comming in
length = int(read(buffer, MAX_FILE_SIZE))
# Here you can switch to a different file every time `writer.py`
# Sends a new file
data = read(buffer, length)
# Read a byte so that we know if it is the last file
file_ended = read(buffer, 1)
return data, (file_ended == b"1")
while True:
print("Reading file")
data, last_file = read_file(sys.stdin.buffer)
img_np = cv2.imdecode(np.frombuffer(BytesIO(data).read(), np.uint8),
cv2.IMREAD_UNCHANGED)
cv2.imshow("image", img_np)
cv2.waitKey(0)
if last_file:
break;
You have mentioned that your image to send is not a consistent size, but I have to assume if it's coming from the same camera (for a given video stream) the raw image size does not change, rather just the compressed image size. I would imagine you likely have plenty of RAM to store at least one frame un-compressed in memory at a time, and you're just introducing processing overhead with all the compression and decompression.
Given that I would create a shared buffer using multiprocessing.shared_memory which can share frames between the two processes (you can even create a circular buffer of a couple frames if you wanna get real fancy, and prevent screen tearing, but it wasn't a big problem in my test)
Given that cv2.VideoCapture().read() can read straight into an existing array, and you can create a numpy array which uses the shared memory as it's buffer, you can read the data into the shared memory with zero extra copying. Using this I was able to read nearly 700 frames per second from a video file encoded with H.264 at 1280x688 resolution.
from multiprocessing.shared_memory import SharedMemory
import cv2
from time import sleep
import numpy as np
vid_device = r"D:\Videos\movies\GhostintheShell.mp4" #a great movie
#get the first frame to calculate size
cap = cv2.VideoCapture(vid_device)
success, frame = cap.read()
if not success:
raise Exception("error reading from video")
#create a shared memory for sending the frame shape
frame_shape_shm = SharedMemory(name="frame_shape", create=True, size=frame.ndim*4) #4 bytes per dim as long as int32 is big enough
frame_shape = np.ndarray(3, buffer=frame_shape_shm.buf, dtype='i4') #4 bytes per dim as long as int32 is big enough
frame_shape[:] = frame.shape
#create the shared memory for the frame buffer
frame_buffer_shm = SharedMemory(name="frame_buffer", create=True, size=frame.nbytes)
frame_buffer = np.ndarray(frame_shape, buffer=frame_buffer_shm.buf, dtype=frame.dtype)
input("writer is ready: press enter once reader is ready")
try: #use keyboardinterrupt to quit
while True:
cap.read(frame_buffer) #read data into frame buffer
# sleep(1/24) #limit framerate-ish (hitting actual framerate is more complicated than 1 line)
except KeyboardInterrupt:
pass
#cleanup: IMPORTANT, close this one first so the reader doesn't unlink() the
# shm's before this file has exited. (less important on windows)
cap.release()
frame_buffer_shm.close()
frame_shape_shm.close()
The reader process looks very similar, but instead of creating a video device, and reading frames, we just construct the shared array, and imshow a bunch. The GUI isn't quite as fast as just dumping the data, so we don't get quite 700 fps, but up to 500's isn't bad...
from multiprocessing.shared_memory import SharedMemory
import cv2
import numpy as np
#create a shared memory for reading the frame shape
frame_shape_shm = SharedMemory(name="frame_shape")
frame_shape = np.ndarray([3], buffer=frame_shape_shm.buf, dtype='i4')
#create the shared memory for the frame buffer
frame_buffer_shm = SharedMemory(name="frame_buffer")
#create the framebuffer using the shm's memory
frame_buffer = np.ndarray(frame_shape, buffer=frame_buffer_shm.buf, dtype='u1')
try:
while True:
cv2.imshow('frame', frame_buffer)
cv2.waitKey(1) #this is needed for cv2 to update the gui
except KeyboardInterrupt:
pass
#cleanup: IMPORTANT the writer process should close before this one, so nothing
# tries to access the shm after unlink() is called. (less important on windows)
frame_buffer_shm.close()
frame_buffer_shm.unlink()
frame_shape_shm.close()
frame_shape_shm.unlink()
EDIT: the user's other questions suggested a version of python earlier than 3.8 may be a requirement (or even working across versions), so here's an example of using posix_ipc in-place of multiprocessing.shared_memory to create the frame buffer (and how to clean it up):
#creation
shm = posix_ipc.SharedMemory(name="frame_buf",
flags=posix_ipc.O_CREX, #if this fails, cleanup didn't happen properly last time
size=frame.nbytes)
shm_map = mmap.mmap(shm.fd, shm.size)
buf = memoryview(shm_map)
#create the frame buffer
frame_buffer = np.ndarray(frame.shape, buffer=buf, dtype=frame.dtype)
frame_buffer[:] = frame[:] #copy first frame into frame buffer
#cleanup
shm.close_fd() #can happen after opening mmap
buf.release() #must happen after frame_buffer is no longer needed and before closing mmap
shm_map.close()
shm.unlink() #must only call from one of the two processes. unlink tells the os to reclaim the space once all handles are closed.
Two Solutions: ZeroMQ | DiskCache
It is quite easy to send frames from one python file to another using ZeroMQ.
ZeroMQ
Install via PyPI: pip install -U pyzmq. There are multiple way to send frames.
This is an example of using PUBLISHER and SUBSCRIBER
# writer | publisher
import base64
import time
import zmq
import cv2
# Prepare our context and publisher
context = zmq.Context()
publisher = context.socket(zmq.PUB)
publisher.bind("tcp://*:5563")
CAM_INDEX_OR_URI = 0
capture = cv2.VideoCapture(CAM_INDEX_OR_URI)
assert capture.isOpened(), "Cannot open camera"
while True:
# Write two messages, each with an envelope and content
# capture frame-by-frame
ret, frame = capture.read()
if not ret:
print("[+] No frame received. Stream ended.")
break
# resize the frame
frame = cv2.resize(frame, (640, 480))
encoded, buffer = cv2.imencode(".jpg", frame)
# all is good
# cv2.imshow("Frames", frame)
# stop with Esc key (27)
if cv2.waitKey(1) == 27:
break
sent_frame = base64.b64encode(buffer)
publisher.send_multipart([b"camera_A", sent_frame])
time.sleep(0.01)
# We never get here but clean up anyhow
publisher.close()
context.term()
capture.release()
cv2.destroyAllWindows()
# reader.py | subscriber
import numpy as np
import base64
import zmq
import cv2
# Prepare our context and publisher
context = zmq.Context()
subscriber = context.socket(zmq.SUB)
subscriber.connect("tcp://localhost:5563")
subscriber.setsockopt_string(zmq.SUBSCRIBE, "camera_A")
while True:
# Read envelope with address
[address, contents] = subscriber.recv_multipart()
receive_frame = base64.b64decode(contents)
frame = np.frombuffer(receive_frame, dtype=np.uint8)
frame = cv2.imdecode(frame, 1)
cv2.namedWindow("Frames", cv2.WINDOW_NORMAL)
cv2.imshow("Frames", frame)
# stop with Esc key (27)
if cv2.waitKey(1) == 27:
break
subscriber.close()
context.term()
cv2.destroyAllWindows()
DiskCache
You could also consider using diskcache. It allows passing python objects through memory. It is like Redis but all Python and does not require a server. NB: pip install --upgrade diskcache. You can tweak to start sending live frames from camera | video
# writer.py
import time
from pathlib import Path
import diskcache as dc
import cv2
tmp = Path("/tmp/stream")
with dc.Cache(tmp) as cache:
print(f"[+] Ready to push data to {tmp}.")
while True:
img = cv2.imread("cat.jpg")
cache.push(img, expire=5)
time.sleep(10)
# reader.py
import time
from pathlib import Path
import diskcache as dc
import cv2
tmp = Path("/tmp/stream")
with dc.Cache(tmp) as cache:
print(f"[+] Ready to pull data from {tmp}")
while True:
(key, value), _ = cache.pull(expire_time=True)
if key:
cv2.imshow("cat", value)
cv2.waitKey(0)
cv2.destroyAllWindows()
time.sleep(0.1)
I will go in these directions and not sys because you have total control over stream data. See diskcache Documentation
What abut using ROS publisher and subscriber. It will be simple to implement and easy to perceive.

Python exits at random times without throwing exception when running OpenCV

First time asking, I havent found any answers to this after some days of looking. Also im absolutely not a programming pro so I might have missed something obvious.
I am running a code that goes through a large library of images (some 2M) and extracts information from them. The code runs fine for several iterations (20-10000ish) but then the program just stops without any errors reported.
The main code exits (terminates?) most of the time at an OpenCV edge detection or houghlines, but not all the time. about 10% of the time it is at some other point in the code.
Using:
OpenCV 4.1.1
Python 3.7.1
Windows 10
Image at: https://www.dropbox.com/s/5lfzkw6sqmu73eb/Image_00_00_00_00.bmp?dl=0
I have tried to get an exception from the Opencv code in some ways, but nothing pops up. The latest attempt is in the code below.
I have tried to use the trace and log modules but I cant really get them to do anything useful.
import numpy as np
import cv2
import traceback
import sys
def cropping_points2(img):
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # Gray image needed for edge detection
try:
edges = cv2.Canny(gray,20,70,apertureSize = 3)
except Exception as e:
print(traceback.format_exc())
print(sys.exc_info())
print(e, 'there was actually an exception!')
exit()
lines = []
try:
lines = cv2.HoughLines(edges,3,np.pi/180,65, 50, 10, min_theta = 1.4, max_theta = 1.75) # analyse image to find lines - thetha setting looks for horizontal
except Exception as e:
print(traceback.format_exc())
print(sys.exc_info())
print(e, 'there was actually an exception!')
exit()
if lines is None: # All black images return a NoneType image
print('no lines!') # Capture NoneType without crashiing
rect = []
has_lines = False
return rect , has_lines
has_lines = True
return lines[0][0], has_lines
if __name__ == "__main__":
img= cv2.imread('Image_00_00_00_00.bmp')
for index in range(10000):
rect, has_lines = cropping_points2(img)
print(rect)
print(index)
I expect the program to give me a hint of why it stops running =|

Segmentation fault in recognize_ndarray OpenALPR

I’m trying to detect plates using openalpr + python with an IP cam, but I’m getting the following error:
The openalpr version is the Open Source.
I've alrealdy tryied before recognize_file function, unscessufully
Fatal Python error: Segmentation fault
Current thread 0x00007fa8c2fee740 <python> (most recent call first):
File "/usr/lib/python2.7/dist-packages/openalpr/openalpr.py", line 184 in recognize_ndarray
File "main9.py", line 45 in main
File "main9.py", line 59 in <module>
Bellow the code:
import numpy as np
import cv2
from openalpr import Alpr
import sys
import faulthandler; faulthandler.enable()
RTSP_SOURCE = 'rtsp://user:pass#ip:port/cam/realmonitor?channel=1&subtype=0'
WINDOW_NAME = 'openalpr'
FRAME_SKIP = 15
def main():
alpr= Alpr("us", "/etc/openalpr/openalpr.conf", "/home/alan/openalpr/runtime_data")
if not alpr.is_loaded():
print('Error loading OpenALPR')
sys.exit(1)
alpr.set_top_n(3)
alpr.set_default_region('pa')
cap = cv2.VideoCapture(RTSP_SOURCE)
cv2.namedWindow('op', cv2.WINDOW_NORMAL)
if not cap.isOpened():
alpr.unload()
sys.exit('Failed to open video file!')
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_AUTOSIZE)
cv2.setWindowTitle(WINDOW_NAME, 'OpenALPR video test')
_frame_number = 0
while True:
ret_val, frame = cap.read()
if not ret_val:
print('VidepCapture.read() failed. Exiting...')
break
_frame_number += 1
if _frame_number % FRAME_SKIP != 0:
continue
cv2.imshow(WINDOW_NAME, frame)
results = alpr.recognize_ndarray(frame)
for i, plate in enumerate(results['results']):
best_candidate = plate['candidates'][0]
print('Plate #{}: {:7s} ({:.2f}%)'.format(i, best_candidate['plate'].upper(), best_candidate['confidence']))
if cv2.waitKey(1) == 27:
break
cv2.destroyAllWindows()
cap.release()
alpr.unload()
if __name__ == "__main__":
main()
Does anybody faced this error before?
I know this is a very old post but I've currently been working on a very similar project and came across this very same issue. experimenting around with the code led me to discover that if you include the following lines of code in a function python will throw a segmentation error:
alpr =Alpr("eu","/etc/openalpr/openalpr.conf","/usr/share/openalpr/runtime_data")
alpr.unload()
Luckily however you only need to run these lines once in a python script to be able to use openalpr so run the first line just at the start of your code before the function is called and run the second line only after you've finished using the function.

Categories

Resources