How to connect video stream from python to Kurento Media Server - python

I am working on a Real Time Video Streaming project using RTMP protocol. I have to use DirectX to capture the screen and then Kurento Media Server to stream.
For capturing, I am using dxcam in python:
import dxcam
import cv2
# import time
# camera = dxcam.create() # returns a DXCamera instance on primary monitor
target_fps = 30
camera = dxcam.create(output_idx=0, output_color="BGR")
camera.start(target_fps=target_fps, video_mode=True)
writer = cv2.VideoWriter(
"video.mp4", cv2.VideoWriter_fourcc(*"mp4v"), target_fps, (1920, 1080)
)
for i in range(1000):
writer.write(camera.get_latest_frame())
# time.sleep(10)
camera.stop()
writer.release()
del camera
I need help starting with the Kurento Media server to stream the video captured in real time but I can't find any tutorial to do that. Can someone help me with that?

Related

Passing a video stream from Python to VB.NET through UDP

I need to pass a live video stream from a Python analytics backend to a VB.NET WPF frontend. I'm using a Python server and a VB.NET client communicating via UDP.
This is what I have so far on both ends:
Python Server:
import cv2
from socket import *
import socket
import numpy as np
import sys
_s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
_address = ('localhost',5000)
_quality=[int(cv2.IMWRITE_JPEG_QUALITY),80]
vcap = cv2.VideoCapture("rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov")
while(1):
ret, frame = vcap.read()
image = frame
# processing
result, imgencode = cv2.imencode('.jpg', image, _quality)
data = np.array(imgencode)
stringData = data.tobytes()
_s.sendto(stringData,_address)
VB.NET client (Using a console app for testing):
Imports System.Net
Imports System.Net.Sockets
Module Program
Dim Mat = New Emgu.CV.Mat(New System.Drawing.Size(640, 480), Emgu.CV.CvEnum.DepthType.Cv8U, 3)
Sub Main(args As String())
Using socket As UdpClient = New UdpClient(5000)
Try
While True
Dim remoteEP = New IPEndPoint(IPAddress.Any, 5000)
Dim data As Byte() = socket.Receive(remoteEP)
Emgu.CV.CvInvoke.Imdecode(data, Emgu.CV.CvEnum.ImreadModes.ReducedColor8, Mat)
Emgu.CV.CvInvoke.Imshow("", Mat)
End While
Catch __unusedSocketException1__ As SocketException
Throw
End Try
End Using
End Sub
End Module
The code I've got so far doesn't throw any errors, but the output isn't displayed in EmguCV ImShow() window. Any help or alternative solutions for my use case are appreciated!

Correctly converting base64 bytes into string and displaying in cv2.imshow

I am struggling on finding the solution for this:
I'm trying to create an image stream system where i can get all the frames and pass them through a neural network, but somehow I've not managed to get properly base64 image strings from my functions below.
The provided code works perfectly if i just call the decoded image from streaming instead of passing it through my functions where i convert to base64 and read them in memory and make cv2 show them properly.
My server code functions responsible to convert and decode base64 are described below:
Convert image object from stream into base64 BYTES and convert to one STRING (this is working as intended)
def convertImgBase64(image):
try:
imgString = base64.b64encode(image).decode('utf-8')
print('convertida com sucesso')
return imgString
except os.error as err :
print(f"Erro:'{err}'")
Base64 decoder that should convert to a readable cv2 compatible frame (Here is where the error begins):
def readb64(base64_string):
storage = '/home/caio/Desktop/img/'
try:
sbuf = BytesIO()
sbuf.write(base64.b64decode(str(base64_string)))
pimg = im.open(sbuf)
out = open('arq.jpeg', 'wb')
out.write(sbuf.read())
out.close()
print('leu string b64')
return cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)
except os.error as err :
print(f"Erro:'{err}'")
This is the current server i am building, but before proceeding i need to accomplish the frame capture correctly.
from io import BytesIO, StringIO
import numpy as np
import cv2
from imutils.video import FPS
import imagezmq
import base64
import darknet
import os
from PIL import Image as im
from numpy import asarray
from time import sleep
#imagezmq protocol receiver from client
image_hub = imagezmq.ImageHub()
def convertImgBase64(image):
try:
imgString = base64.b64encode(image).decode('utf-8')
return imgString
except os.error as err :
print(f"Error:'{err}'")
def readb64(base64_string):
try:
sbuf = BytesIO()
sbuf.write(base64.b64decode(str(base64_string)))
pimg = im.open(sbuf)
return cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)
except os.error as err :
print(f"Error:'{err}'")
def capture_img():
while True:
camera, jpg_buffer = image_hub.recv_jpg()
buffer = np.frombuffer(jpg_buffer, dtype='uint8')
imagedecoder = cv2.imdecode(buffer, cv2.IMREAD_COLOR)
img = im.fromarray(imagedecoder)
try:
string = convertImgBase64(imagedecoder)
cvimg = readb64(string)
#cv2.imshow(camera, cvimg) this is the line where its not working!
except os.error as err :
print(f"Error:'{err}'")
cv2.imshow(camera, imagedecoder)
cv2.waitKey(1) #cv2 wont work without this
image_hub.send_reply(b'OK') #imageZMQ needs acknowledge that its ok
Client code (raspberry pi code) is given below:
import sys
import socket
import time
import cv2
from imutils.video import VideoStream
import imagezmq
import argparse
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--server-ip", required=True,
help="ip address of the server to which the client will connect")
args = vars(ap.parse_args())
# initialize the ImageSender object with the socket address of the
# server
sender = imagezmq.ImageSender(connect_to="tcp://{}:5555".format(
args["server_ip"]))
# use either of the formats below to specifiy address of display computer
# sender = imagezmq.ImageSender(connect_to='tcp://192.168.1.190:5555')
rpi_name = socket.gethostname() # send RPi hostname with each image
vs = VideoStream(usePiCamera=True, resolution=(800, 600)).start()
time.sleep(2.0) # allow camera sensor to warm up
jpeg_quality = 95 # 0 to 100, higher is better quality, 95 is cv2 default
while True: # send images as stream until Ctrl-C
image = vs.read()
ret_code, jpg_buffer = cv2.imencode(
".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality])
sender.send_jpg(rpi_name, jpg_buffer)
My error output now is like:
I have been trying solution from here and here
If you would know another better way to pass an Image Object that i can use to process inside yolo/darknet neural network it could be awesome!!
Thanks!
The answers provided by #Christoph Rackwitz are correct. The design of ImageZMQ is to send and receive OpenCV images WITHOUT any base64 encoding. The ImageSender class sends OpenCV images. The ImageHub class receives OpenCV images. Optionally, ImageZMQ can send a jpg buffer (as your Raspberry Pi client code is doing).
Your Raspberry Pi client code is based on the ImageZMQ "send jpg" example.
Your server code should therefore use the matching ImageZMQ "receive jpg" example.
The essence of the ImageZMQ "receive jpg" example code is:
import numpy as np
import cv2
import imagezmq
image_hub = imagezmq.ImageHub()
while True: # show streamed images until Ctrl-C
rpi_name, jpg_buffer = image_hub.recv_jpg()
image = cv2.imdecode(np.frombuffer(jpg_buffer, dtype='uint8'), -1)
# see opencv docs for info on -1 parameter
cv2.imshow(rpi_name, image) # 1 window for each RPi
cv2.waitKey(1)
image_hub.send_reply(b'OK')
No base64 decoding required. The variable image already contains an OpenCV image. (FYI, I am the author of ImageZMQ)

Connecting webcam through ip gives error "Cannot capture source"

I have been trying to use my smartphone camera with YOLO v3 to get better frames but an error keeps on coming "Cannot capture source". I am using the IP Webcam app on my android phone.
The IP Webcam app works by connecting devices using a local network. I tried uninstalling OpenCV and then reinstalled it as it was suggested somewhere on the internet but nothing happened.
{ installation: pip install opencv-python}
I am posting a small block of code that keeps on showing an error. The whole file or code can be accessed at https://github.com/ayooshkathuria/pytorch-yolo-v3/blob/master/cam_demo.py
'''
if CUDA:
model.cuda()
model.eval()
videofile = 'video.avi'
cap = cv2.VideoCapture('http://192.168.43.1:8080/browserfs.html')
assert cap.isOpened(), 'Cannot capture source'
frames = 0
start = time.time()
while cap.isOpened():
ret, frame = cap.read()
'''
I want to use my smartphone camera as a webcam because my system lacks it.
ERROR :
assert cap.isOpened(), 'Cannot capture source'
AssertionError: Cannot capture source

I need hikvision camera which has ip 20.0.0.14 and user name/password is admin/12345 to run by python code

I need hikvision camera which has ip 20.0.0.14 and user name/password is admin/12345 to run by python code
the original camera code is
import cv2.cv as cv
import time
cv.NamedWindow("camera", 1)
capture = cv.CaptureFromCAM(0)
while True:
img = cv.QueryFrame(capture)
cv.ShowImage("camera", img)
if cv.WaitKey(10) == 27:
break
cv.DestroyAllWindows()
i need help please
Here's the solution when using OpenCV3. In your sample code, you are not only not using the OpenCV2 interface, but you are accessing the very old cv (prior to OpenCV 2) interface. So my first suggestion is to get a current install of OpenCV working.
Possible source of rtsp urls for use with hikvision cameras:
https://www.ispyconnect.com/man.aspx?n=Hikvision
import cv2
# Note the following is the typical rtsp url for streaming from an ip cam
# source = "rtsp://user:password#ipaddress:port/<camera specific stuff>"
# Each manufacturer is different. For my alibi cameras, this would be
# a valid url to use with the info you provided.
source = "rtsp://admin:12345#20.0.0.14//Streaming/Channels/2"
cap = cv2.VideoCapture(source)
ok_flag = True
while ok_flag:
(ok_flag, img) = cap.read()
if not ok_flag: break
cv2.imshow("some window", img)
if cv2.waitKey(10) == 27:
break
cv2.destroyAllWindows()
Also note that this code works the same if the source is the path to a valid video file (like an .avi), or for a web camera (in which case you pass the integer number of the webcam, like 0).
Another error in your post is the cv.CaptureFromCAM(0), which would be capturing from the first webcam installed on the computer, not an ip stream.

Stream video and audio frames from YouTube

I am trying to stream video and audio data from a YouTube video so that I can do some video and audio analysis seperately which is then superimposed on the frames using OpenCV. I have this working perfectly fine with files but want to extend this to streaming from YouTube.
At the moment, I've thought of usings VLC Python bindings to stream from YouTube but I'm not sure how to extract the frames from this video.
Here is the vlc code that performs YouTube streaming at the moment:
import vlc
import time
import numpy as np
from ctypes import *
class MyDisplay(vlc.VideoDisplayCb):
def __doc__(o,p):
print "fsdfs"
class MyLock(vlc.VideoLockCb):
def __doc__():
raise Exception("sdsds")
return np.array(500,500).__array_interface__['data']
class MyPlayback(vlc.AudioPlayCb):
def from_param(self,a,b,c,d):
print "asfds"
def callbck(a,b,c,d):
print 'aa'
print a
print b
print c
print d
return 'a'
if __name__ == '__main__':
url = 'https://www.youtube.com/watch?v=F82XtLmL0tU'
i = vlc.Instance('--verbose 2'.split())
media = i.media_new(url)
media_list = i.media_list_new([url])
p = i.media_player_new()
p.set_media(media)
lp = i.media_list_player_new()
lp.set_media_player(p)
lp.set_media_list(media_list)
CMPFUNC = CFUNCTYPE(c_char, c_void_p, c_void_p, c_uint, c_long)
lp.next()
lock = MyLock()
display = MyDisplay()
playback = MyPlayback()
p.audio_set_callbacks(CMPFUNC(callbck),None,None,None,None,None)
p.play()
time.sleep(5)
r = p.video_take_snapshot(0,'rnd.pong',0,0)
How could I produce a stream of frames and audio data using VLC (with Python bindings)? Also is there another way to do this (using ffmpeg for example)?
Thanks

Categories

Resources