Given below is the code written for getting live stream from an IP Camera.
from cv2 import *
from cv2 import cv
import urllib
import numpy as np
k=0
capture=cv.CaptureFromFile("http://IPADDRESS of the camera/axis-cgi/mjpg/video.cgi")
namedWindow("Display",1)
while True:
frame=cv.QueryFrame(capture)
if frame is None:
print 'Cam not found'
break
else:
cv.ShowImage("Display", frame)
if k==0x1b:
print 'Esc. Exiting'
break
On running the code the output that I am getting is:
Cam not found
Where am I going wrong? Also, why is frame None here? Is there some problem with the conversion?
import cv2
import urllib
import numpy as np
stream = urllib.urlopen('http://localhost:8080/frame.mjpg')
bytes = ''
while True:
bytes += stream.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a != -1 and b != -1:
jpg = bytes[a:b+2]
bytes = bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
cv2.imshow('i', i)
if cv2.waitKey(1) == 27:
exit(0)
edit (explanation)
I just saw that you mention that you have c++ code that is working, if that is the case your camera may work in python as well. The code above manually parses the mjpeg stream without relying on opencv, since in some of my projects the url will not be opened by opencv no matter what I did(c++,python).
Mjpeg over http is multipart/x-mixed-replace with boundary frame info and jpeg data is just sent in binary. So you don't really need to care about http protocol headers. All jpeg frames start with marker 0xff 0xd8 and end with 0xff 0xd9. So the code above extracts such frames from the http stream and decodes them one by one. like below.
...(http)
0xff 0xd8 --|
[jpeg data] |--this part is extracted and decoded
0xff 0xd9 --|
...(http)
0xff 0xd8 --|
[jpeg data] |--this part is extracted and decoded
0xff 0xd9 --|
...(http)
edit 2 (reading from mjpg file)
Regarding your question of saving the file, yes the file can be directly saved and reopened using the same method with very small modification. For example you would do curl http://IPCAM > output.mjpg
and then change the line stream=urllib.urlopen('http://localhost:8080/frame.mjpg')so that the code becomes this
import cv2
import urllib
import numpy as np
stream = open('output.mjpg', 'rb')
bytes = ''
while True:
bytes += stream.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a != -1 and b != -1:
jpg = bytes[a:b+2]
bytes = bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
cv2.imshow('i', i)
if cv2.waitKey(1) == 27:
exit(0)
Of course you are saving a lot of redundant http headers, which you might want to strip away. Or if you have extra cpu power, maybe just encode to h264 first. But if the camera is adding some meta data to http header frames such as channel, timestamp, etc. Then it may be useful to keep them.
edit 3 (tkinter interfacing)
import cv2
import urllib
import numpy as np
import Tkinter
from PIL import Image, ImageTk
import threading
root = Tkinter.Tk()
image_label = Tkinter.Label(root)
image_label.pack()
def cvloop():
stream=open('output.mjpg', 'rb')
bytes = ''
while True:
bytes += stream.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a != -1 and b != -1:
jpg = bytes[a:b+2]
bytes = bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
tki = ImageTk.PhotoImage(Image.fromarray(cv2.cvtColor(i, cv2.COLOR_BGR2RGB)))
image_label.configure(image=tki)
image_label._backbuffer_ = tki #avoid flicker caused by premature gc
cv2.imshow('i', i)
if cv2.waitKey(1) == 27:
exit(0)
thread = threading.Thread(target=cvloop)
thread.start()
root.mainloop()
First of all, please be aware that you should first try simply using OpenCV's video capture functions directly, e.g. cv2.VideoCapture('http://localhost:8080/frame.mjpg')!
This works just fine for me:
import cv2
cap = cv2.VideoCapture('http://localhost:8080/frame.mjpg')
while True:
ret, frame = cap.read()
cv2.imshow('Video', frame)
if cv2.waitKey(1) == 27:
exit(0)
Anyways, here is Zaw Lin's solution ported to OpenCV 3 (only change is cv2.CV_LOAD_IMAGE_COLOR to cv2.IMREAD_COLOR and Python 3 (string vs byte handling changed plus urllib):
import cv2
import urllib.request
import numpy as np
stream = urllib.request.urlopen('http://localhost:8080/frame.mjpg')
bytes = bytes()
while True:
bytes += stream.read(1024)
a = bytes.find(b'\xff\xd8')
b = bytes.find(b'\xff\xd9')
if a != -1 and b != -1:
jpg = bytes[a:b+2]
bytes = bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)
cv2.imshow('i', i)
if cv2.waitKey(1) == 27:
exit(0)
Here is an answer using the Python 3 requests module instead of urllib.
The reason for not using urllib is that it cannot correctly interpret a URL like http://user:pass#ipaddress:port
Adding authentication parameters is more complex in urllib than the requests module.
Here is a nice, concise solution using the requests module:
import cv2
import requests
import numpy as np
r = requests.get('http://192.168.1.xx/mjpeg.cgi', auth=('user', 'password'), stream=True)
if(r.status_code == 200):
bytes = bytes()
for chunk in r.iter_content(chunk_size=1024):
bytes += chunk
a = bytes.find(b'\xff\xd8')
b = bytes.find(b'\xff\xd9')
if a != -1 and b != -1:
jpg = bytes[a:b+2]
bytes = bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)
cv2.imshow('i', i)
if cv2.waitKey(1) == 27:
exit(0)
else:
print("Received unexpected status code {}".format(r.status_code))
I don't think the first anwser is fine with other format image data, eg png.
So I write the following code, which can handle other type of images
"""
MJPEG format
Content-Type: multipart/x-mixed-replace; boundary=--BoundaryString
--BoundaryString
Content-type: image/jpg
Content-Length: 12390
... image-data here ...
--BoundaryString
Content-type: image/jpg
Content-Length: 12390
... image-data here ...
"""
import io
import requests
import cv2
import numpy as np
class MjpegReader():
def __init__(self, url: str):
self._url = url
def iter_content(self):
"""
Raises:
RuntimeError
"""
r = requests.get(self._url, stream=True)
# parse boundary
content_type = r.headers['content-type']
index = content_type.rfind("boundary=")
assert index != 1
boundary = content_type[index+len("boundary="):] + "\r\n"
boundary = boundary.encode('utf-8')
rd = io.BufferedReader(r.raw)
while True:
self._skip_to_boundary(rd, boundary)
length = self._parse_length(rd)
yield rd.read(length)
def _parse_length(self, rd) -> int:
length = 0
while True:
line = rd.readline()
if line == b'\r\n':
return length
if line.startswith(b"Content-Length"):
length = int(line.decode('utf-8').split(": ")[1])
assert length > 0
def _skip_to_boundary(self, rd, boundary: bytes):
for _ in range(10):
if boundary in rd.readline():
break
else:
raise RuntimeError("Boundary not detected:", boundary)
mr = MjpegReader("http://127.0.0.1/mjpeg.cgi")
for content in mr.iter_content():
i = cv2.imdecode(np.frombuffer(content, dtype=np.uint8), cv2.IMREAD_COLOR)
cv2.imshow('i', i)
if cv2.waitKey(1) == 27:
break
I had the same problem.
The solution without requests or urllib: just add the user and password in the cam address, using VideoCapture, like this:
E.g.
cv2.VideoCapture('http://user:password#XXX.XXX.XXX.XXX/video')
using IPWebcam for android.
Related
I'm trying to get frames from my home security camera (Provision-ISR).
So, I see when I open the web client, that the video frames are sent in a WebSocket.
I copy one of the frames,and I try to save it to file, but it's not working.
import numpy as np
from cv2 import cv2
frame_buffer = np.frombuffer(bytearray(frame), np.int16,int(len(frame) / 2))
cv2.imwrite("image.jpg",frame_buffer)
this is example of the hex editor
Solved!
av.open(rawData, format="h264", mode='r') - do the decode
def save_banch_of_frames(rawData):
global count
rawData.seek(0)
container = av.open(rawData, format="h264", mode='r')
for packet in container.demux():
if packet.size == 0:
continue
for frame in packet.decode():
cv2.imwrite(f"frames/file{count}.jpg", frame.to_ndarray(format="bgr24"))
count += 1
def check_is_keyframe(frame):
frameData = io.BytesIO()
frameData.write(frame)
frameData.seek(0)
container = av.open(frameData, format="h264", mode='r')
for packet in container.demux():
if packet.is_keyframe:
return True
return False
data = get_frame_from_response(video_socket)
while True:
rawData = io.BytesIO()
is_keyframe = False
while not is_keyframe:
rawData.write(data)
data = get_frame_from_response(video_socket)
is_keyframe = check_is_keyframe(data)
save_banch_of_frames(rawData)
As I saw in the Python documentation,
https://docs.python.org/3/library/mmap.html.
Python in Linux can fully support the memory-mapped file. However, while I am trying to apply this idea to my application. I cannot run the sample.
My application is that sending the frames from Python file (client) to the another Python file (server).
Client Code
import mmap
import time
import os
import cv2 as cv
print("Opening camera...")
cap = cv.VideoCapture('/home/hunglv/Downloads/IMG_8442.MOV')
mm = None
try:
while True:
ret, img = cap.read()
if not ret:
break
if mm is None:
mm = mmap.mmap(-1,img.size,mmap.MAP_SHARED, mmap.PROT_WRITE)
# write image
start = time.time()
buf = img.tobytes()
mm.seek(0)
mm.write(buf)
mm.flush()
stop = time.time()
print("Writing Duration:", (stop - start) * 1000, "ms")
except KeyboardInterrupt:
pass
print("Closing resources")
cap.release()
mm.close()
Server Code
import mmap
import time
import os
import cv2 as cv
import numpy as np
shape = (1080, 1920, 3)
n = np.prod(shape)
mm = mmap.mmap(-1, n)
while True:
# read image
print (mm)
start = time.perf_counter()
mm.seek(0)
buf = mm.read(12)
img = np.frombuffer(buf, dtype=np.uint8).reshape(shape)
stop = time.perf_counter()
print("Reading Duration:", (stop - start) * 1000, "ms")
cv.imshow("img", img)
key = cv.waitKey(1) & 0xFF
key = chr(key)
if key.lower() == "q":
break
cv.destroyAllWindows()
mm.close()
On the server-side, I set the memory index at 0, and try to read the bytes from memory. However, it seems to be that the server cannot read correctly the data from client.
[Updated]
I've tried to read out the first 12 bytes at the server-side. The value is constant, not changing anymore.
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
Besides,
The first 12 bytes of a random frame is
b'\xf5\xff\xff\xf0\xfa\xfe\xdf\xe9\xed\xd2\xdc\xe0'
First I found example which probably works but it uses tagName (the same for client and server) and it means it is only for Window:
python-mmap-ipc
Next I found code which works on Linux:
Sharing Python data between processes using mmap.
It creates real file on disk, resizes it to size of image and then it uses its fd in mmap()
I use web camera for test.
Server
import mmap
import time
import os
import cv2
print("Opening camera...")
cap = cv2.VideoCapture(0)
#print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) # 640
#print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) # 480
shape = (480, 640, 3)
n = (480*640*3)
fd = os.open('/tmp/mmaptest', os.O_CREAT | os.O_TRUNC | os.O_RDWR)
#os.write(fd, b'\x00' * n) # resize file
os.truncate(fd, n) # resize file
mm = None
try:
while True:
ret, img = cap.read()
if not ret:
break
if mm is None:
mm = mmap.mmap(fd, n, mmap.MAP_SHARED, mmap.PROT_WRITE) # it has to be only for writing
# write image
start = time.perf_counter()
buf = img.tobytes()
mm.seek(0)
mm.write(buf)
mm.flush()
stop = time.perf_counter()
print("Writing Duration:", (stop - start) * 1000, "ms")
except KeyboardInterrupt:
pass
print("Closing resources")
cap.release()
mm.close()
Client
import mmap
import time
import os
import cv2
import numpy as np
shape = (480, 640, 3)
n = (480*640*3)
fd = os.open('/tmp/mmaptest', os.O_RDONLY)
mm = mmap.mmap(fd, n, mmap.MAP_SHARED, mmap.PROT_READ) # it has to be only for reading
while True:
# read image
start = time.perf_counter()
mm.seek(0)
buf = mm.read(n)
img = np.frombuffer(buf, dtype=np.uint8).reshape(shape)
stop = time.perf_counter()
print("Reading Duration:", (stop - start) * 1000, "ms")
cv2.imshow("img", img)
key = cv2.waitKey(1) & 0xFF
key = chr(key)
if key.lower() == "q":
break
cv2.destroyAllWindows()
mm.close()
BTW: probably mmap() with -1 (without creating file on disk) could work with threads (or forks) because they share the same memory.
import numpy as np
import cv2
from hikvisionapi import Client
cap = cv2.VideoCapture()
#cap.open("rtsp://admin:DocoutBolivia#192.168.1.64:554/h264/ch0/sub")
cap.open("rtsp://admin:DocoutBolivia#192.168.1.64:554/Streaming/Channels/102/")
#cam = Client('http://192.168.1.64', 'admin', 'DocoutBolivia')
#rtsp://admin:password#192.168.1.64/h264/ch1/sub/
#response = cam.System.deviceInfo(method='get')
ret, frame = cap.read()
cv2.imwrite("holo.jpg", frame)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
# Display the resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
I have this code and it's connecting and showing well but it really slow there's another way for doing this? and have a bit less of delay? I want to make face recognition with my HikVision IP camera
Trying to load the steam directly with Python will not get you anywhere.
The only way to get extremely low lantency is to make use of the .dll or .so files from the SDK provided by HikVision, and use ctypes to call the internal functions.
Below is a simple example I made before to access the NET_DVR_PTZControl_Other. It is a lot of work if you want to develop your own application with their SDK. I'd suggest you to request a sample python application from your vendor.
For example,
import os, ctypes
import cv2
def add_dll(path, dll_list):
files = os.listdir(path)
for file in files:
if not os.path.isdir(path + file):
if file.endswith(".dll"):
dll_list.append(path + file)
else:
add_dll(path + file + "/", dll_list)
def callCpp(func_name, *args):
for so_lib in so_list:
try:
lib = ctypes.cdll.LoadLibrary(so_lib)
try:
value = eval("lib.%s" % func_name)(*args)
print("Success:" + str(value))
return value
except:
continue
except:
print("Fail:" + so_lib)
continue
return False
def NET_DVR_PTZControl_Other(lUserID, lChannel, dwPTZCommand, dwStop):
res = callCpp("NET_DVR_PTZControl_Other", lUserID, lChannel, dwPTZCommand, dwStop)
if res:
print("Control Success")
else:
print("Control Fail: " + str(callCpp("NET_DVR_GetLastError")))
Get Steam Example
class NET_DVR_JPEGPARA(ctypes.Structure):
_fields_ = [
("wPicSize", ctypes.c_ushort), # WORD
("wPicQuality", ctypes.c_ushort)] # WORD
def NET_DVR_CaptureJPEGPicture():
sJpegPicFileName = bytes("pytest.jpg", "ascii")
lpJpegPara = NET_DVR_JPEGPARA()
lpJpegPara.wPicSize = 2
lpJpegPara.wPicQuality = 1
res = callCpp("NET_DVR_CaptureJPEGPicture", lUserID, lChannel, ctypes.byref(lpJpegPara), sJpegPicFileName)
if res == False:
error_info = callCpp("NET_DVR_GetLastError")
print("Success:" + str(error_info))
else:
print("Grab stream fail")
I'm trying to stream video from my raspberry pi using flask api in python. So that I may process individual frames on my workstation. It is working fine as far as data delivery is concerned. However on client side the process of reading frames introduces a lag of 1-3 seconds that is undesirable in a real time application. I can view the video playback in my web browser without any latency that proves that my raspberry pi and network are innocent. The problem is with the method of reading individual frames from byte stream. Any thoughts about eliminating latency in such an application. Below is my code for client side application. Complete source to a sample application can be found here: https://github.com/shehzi-khan/video-streaming
import cv2
import urllib
import numpy as np
stream = urllib.urlopen('http://192.168.100.128:5000/video_feed')
bytes = ''
while True:
bytes += stream.read(1024)
a = bytes.find(b'\xff\xd8')
b = bytes.find(b'\xff\xd9')
if a != -1 and b != -1:
jpg = bytes[a:b+2]
bytes = bytes[b+2:]
img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)
cv2.imshow('Video', img)
if cv2.waitKey(1) == 27:
exit(0)
Main suggestions:
Search end-mark and then search start-mark
read more data (e.g. 64kb)
drop other frames and show only last
I can't test it, but here is general code:
import cv2
import urllib
import numpy as np
stream = urllib.urlopen('http://192.168.100.128:5000/video_feed')
bytes = ''
while True:
buff = stream.read(64 * 1024)
bytes += buff
if buff.rfind(b'\xff\xd9') != -1: # buff is smaller than bytes
endmark = bytes.rfind(b'\xff\xd9') + 2
startmark = bytes[:endmark - 2].rfind(b'\xff\xd8')
jpg = bytes[startmark:endmark] # please, check indexes! I could mess up with them.
bytes = bytes[endmark:]
img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)
cv2.imshow('Video', img)
if cv2.waitKey(1) == 27:
exit(0)
I can't find how stream.read behave. If he wait until buffer be full, than you need to decrease buffer size. If he just read N bytes OR until end of stream, than it will work.
I'm using OpenCV and Python to take images. However currently I can only take one picture at a time. I would like to have OpenCV to take multiple pictures. This is my current code.
import cv2.cv as cv
import time
cv.NamedWindow("camera", 1)
capture = cv.CaptureFromCAM(0)
while True:
img = cv.QueryFrame(capture)
cv.ShowImage("camera", img)
cv.SaveImage('pic.jpg', img)
if cv.WaitKey(10) == 27:
break
Your code overwrite a file. Save to different file each time.
For example:
import cv2.cv as cv
import time
cv.NamedWindow("camera", 1)
capture = cv.CaptureFromCAM(0)
i = 0
while True:
img = cv.QueryFrame(capture)
cv.ShowImage("camera", img)
cv.SaveImage('pic{:>05}.jpg'.format(i), img)
if cv.WaitKey(10) == 27:
break
i += 1
A minimal example of what you'd like to do, based on the c++ binded interface.
import cv2
cpt = 0
maxFrames = 5 # if you want 5 frames only.
try:
vidStream = cv2.VideoCapture(0) # index of your camera
except:
print "problem opening input stream"
sys.exit(1)
while cpt < maxFrames:
ret, frame = vidStream.read() # read frame and return code.
if not ret: # if return code is bad, abort.
sys.exit(0)
cv2.imshow("test window", frame) # show image in window
cv2.imwrite("image%04i.jpg" %cpt, frame)
cpt += 1
A full example of script, able to read from a camera index, or a file. Includes some failsafes and some information about read device.
usage: record.py [source] [target folder]
#!/usr/bin/env python
import cv2
import sys
import os
cpt = 0
maxFrames = 30
try:
targetDir = sys.argv[2]
except:
targetDir = "" # if no argument, then use current directory
try: # read input. eval if to transform video index to int
vidStream = cv2.VideoCapture(eval(sys.argv[1]))
except:
print "problem opening input stream"
sys.exit(1)
if not vidStream.isOpened():
print "capture stream not open"
sys.exit(1)
# informations in case the input is a video file.
nFrames = vidStream.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
print "frame number: %s" %nFrames
fps = vidStream.get(cv2.cv.CV_CAP_PROP_FPS)
print "FPS value: %s" %fps
# note that we could use frame number here, or "while 1"
# so we could read from a live written file or capture devide.
while cpt < maxFrames:
ret, frame = vidStream.read() # read frame and return code.
if not ret:
print "end of stream"
sys.exit(0)
cv2.imshow("test window", frame) # show image in window
cv2.imwrite(os.path.join(targetDir, "image_%04i.jpg" %cpt), frame)
cpt += 1
keyPressed = cv2.waitKey(1) # time to wait between frames
if keyPressed != -1: # if user pressed a key, stop recording.
sys.exit(0)
change the name of the image to be saved to " [image name] [a number which increase after every loop] "
By doing this your image will be stored with a new name after every loop.. otherwise all the images will overwrite the same name !
import cv2.cv as cv
import time
cv.NamedWindow("camera", 1)
capture = cv.CaptureFromCAM(0)
num = 0
while True:
img = cv.QueryFrame(capture)
cv.ShowImage("camera", img)
cv.SaveImage('pic'+str(num)+'.jpg', img)
if cv.WaitKey(10) == 27:
break
num += 1
now your images will be saved as pic0.jpg, pic1.jpg, pic2.jpg and so on..
i think this wil helpful...
import cv2
vid = cv2.VideoCapture("video.mp4")
d = 0
ret, frame = vid.read()
while ret:
ret, frame = vid.read()
filename = "images/file_%d.jpg"%d
cv2.imwrite(filename, frame)
d+=1
this will save every frame with different name.