I am getting the following warning:
[ WARN:0#1.140] global /private/var/folders/sy/f16zz6x50xz3113nwtb9bvq00000gp/T/abs_506zufg7xt/croots/recipe/opencv-suite_1664548331847/work/modules/videoio/src/cap_gstreamer.cpp (862) isPipelinePlaying OpenCV | GStreamer warning: GStreamer: pipeline have not been created
I am attempting to run the following code on my MacBook Pro with MacOS 10.15.7 to stream video from a pycamera on my raspberrypi to my MacBook in order to use it as a virtual camera:
import cv2
import numpy as np
import io
import socket
import struct
import time
from Quartz import CoreVideo
import CoreMedia
IMG_W = 1280
IMG_H = 720
server_socket = socket.socket()
server_socket.bind(('0.0.0.0', 8000))
server_socket.listen(0)
connection = server_socket.accept()[0].makefile('rb')
cam = cv2.VideoCapture(0)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, IMG_W)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, IMG_H)
try:
display_link = CoreVideo.CVDisplayLinkCreateWithActiveCGDisplays(None)
while True:
ret, frame = cam.read()
flipped = cv2.flip(frame, 1)
if frame is not None:
print("frame is not None, proceed with flip")
flipped = cv2.flip(frame, 1)
frame[0 : IMG_H, IMG_W//2 : IMG_W] = flipped[0 : IMG_H, IMG_W//2 : IMG_W]
else:
print("frame is None, skipping flip")
# Read the length of the image as a 32-bit unsigned int.
image_len_packed = connection.read(struct.calcsize('<L'))
if not image_len_packed:
break
image_len = struct.unpack('<L', image_len_packed)[0]
# Read the image data.
image_stream = io.BytesIO()
remaining = image_len
while remaining > 0:
chunk = connection.read(min(remaining, 4096))
if not chunk:
break
image_stream.write(chunk)
remaining -= len(chunk)
# Rewind the stream
image_stream.seek(0)
image = np.array(bytearray(image_stream.read()), dtype=np.uint8)
frame = cv2.imdecode(image, cv2.IMREAD_COLOR)
# Resize the frame to the desired size for the virtual camera
if frame is not None and frame.shape == (IMG_H, IMG_W, 3):
print("frame is not None and has correct shape")
frame = cv2.resize(frame, (IMG_W, IMG_H))
else:
print("frame is None or has incorrect shape, skipping resize")
# Convert the frame to a format suitable for OS X's AVFoundation framework
if frame is not None and frame.shape[0] > 0 and frame.shape[1] > 0:
print("frame is not None and has dimensions")
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
else:
print("frame is None or has no dimensions, skipping cvtColor")
# Create a `CVPixelBuffer` from the image data
if frame is not None:
print("frame and pixel_buffer are not none")
pixel_buffer = CoreVideo.CVPixelBufferCreateWithBytes(
IMG_W, IMG_H, CoreVideo.kCVPixelFormatType_32BGRA,
frame.tobytes(), frame.strides[0],
None, None, None, None, None,
)
# Create a `CMSampleBuffer` from the pixel buffer
sample_buffer = CoreMedia.CMSampleBufferCreateForImageBuffer(
None, pixel_buffer, True, None, None,
CoreMedia.kCMSampleAttachmentKey_DisplayImmediately,
)
# Send the sample buffer to the virtual camera
CoreVideo.CVDisplayLinkStart(display_link)
CoreVideo.CVPixelBufferLockBaseAddress(pixel_buffer, 0)
CoreVideo.CVDisplayLinkRender(display_link, sample_buffer)
CoreVideo.CVPixelBufferUnlockBaseAddress(pixel_buffer, 0)
CoreVideo.CVDisplayLinkStop(display_link)
else:
print("frame is None, skipping")
except Exception as e:
print(e)
finally:
cam.release()
CoreVideo.CVDisplayLinkStop(display_link)
CoreVideo.CVDisplayLinkRelease(display_link)
And here is my corresponding code that I am running on my RaspberryPi to transmit the stream:
import io
import socket
import struct
import time
import picamera
# Create a socket connection between the Raspberry Pi and the computer
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('10.37.68.171', 8000))
connection = client_socket.makefile('wb')
try:
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
# Start a stream to the socket
camera.start_recording(connection, format='h264')
while True:
camera.wait_recording(1)
finally:
camera.stop_recording()
connection.close()
client_socket.close()
First: How can I fix the warning? Or is it even necessary to fix it?
Second: Is there anything in general that I should be doing differently with my code in order to be able to use the stream as a virtual camera on my MacBook?
Related
I have to make a project that shares screen and webcam video of multiple clients at the same time. The screen sharing and webcam sharing work separately but I can't combine them and make them work at the same time. I copied and pasted the code and then made changes to it, so I don't understand all of the code that well.
Server:
import socket, cv2, pickle, struct
import imutils
import threading
import pyshine as ps
import cv2
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
print('HOST IP:', host_ip)
port = 9999
socket_address = (host_ip, port)
server_socket.bind(socket_address)
server_socket.listen()
print("Listening at", socket_address)
def show_client(addr, client_socket):
try:
print('CLIENT {} CONNECTED!'.format(addr))
if client_socket: # if a client socket exists
data = b""
payload_size = struct.calcsize("Q")
while True:
while len(data) < payload_size:
packet = client_socket.recv(4 * 1024) # 4K
if not packet: break
data += packet
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("Q", packed_msg_size)[0]
while len(data) < msg_size:
data += client_socket.recv(4 * 1024)
frame_data = data[:msg_size]
data = data[msg_size:]
frame = pickle.loads(frame_data)
text = f"CLIENT: {addr}"
frame = ps.putBText(frame, text, 10, 10, vspace=10, hspace=1, font_scale=0.7,
background_RGB=(255, 0, 0), text_RGB=(255, 250, 250))
cv2.imshow(f"FROM {addr}", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
client_socket.close()
except Exception as e:
print(f"CLINET {addr} DISCONNECTED")
pass
def run_show_client():
while True:
client_socket, addr = server_socket.accept()
thread = threading.Thread(target=show_client, args=(addr, client_socket))
thread.start()
print("TOTAL CLIENTS ", threading.activeCount() - 1)
Screen sharing client:
import socket, cv2, pickle, struct
import imutils
import pyautogui
import numpy as np
def student_screen_show():
camera = True
if camera == True:
vid = cv2.VideoCapture(0)
else:
vid = cv2.VideoCapture('videos/mario.mp4')
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host_ip = ' '
port = 9999
client_socket.connect((host_ip, port))
resolution = (1920, 1080)
codec = cv2.VideoWriter_fourcc(*"XVID")
filename = "Recording.avi"
fps = 60.0
out = cv2.VideoWriter(filename, codec, fps, resolution)
if client_socket:
while (vid.isOpened()):
try:
img = pyautogui.screenshot()
frame = np.array(img)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
out.write(frame)
frame = imutils.resize(frame, width=380)
a = pickle.dumps(frame)
message = struct.pack("Q", len(a)) + a
client_socket.sendall(message)
cv2.imshow(f"TO: {host_ip}", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
client_socket.close()
except:
print('VIDEO FINISHED!')
break
Webcam sharing client:
import socket, cv2, pickle, struct
import imutils
def student_show():
camera = True
if camera == True:
vid = cv2.VideoCapture(0)
else:
vid = cv2.VideoCapture('videos/mario.mp4')
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host_ip = ' '
port = 9999
client_socket.connect((host_ip, port))
if client_socket:
while (vid.isOpened()):
try:
img, frame = vid.read()
frame = imutils.resize(frame, width=380)
a = pickle.dumps(frame)
message = struct.pack("Q", len(a)) + a
client_socket.sendall(message)
cv2.imshow(f"TO: {host_ip}", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
client_socket.close()
except:
print('VIDEO FINISHED!')
break
So what I have to do is merge the code so that instead of only running screen sharing or webcam sharing, it shares both at the same time from the same client. While I didn't write the Ip address in the code shown here I am writing my IP address in my code, I just felt weird sharing my Ip address.
As I saw in the Python documentation,
https://docs.python.org/3/library/mmap.html.
Python in Linux can fully support the memory-mapped file. However, while I am trying to apply this idea to my application. I cannot run the sample.
My application is that sending the frames from Python file (client) to the another Python file (server).
Client Code
import mmap
import time
import os
import cv2 as cv
print("Opening camera...")
cap = cv.VideoCapture('/home/hunglv/Downloads/IMG_8442.MOV')
mm = None
try:
while True:
ret, img = cap.read()
if not ret:
break
if mm is None:
mm = mmap.mmap(-1,img.size,mmap.MAP_SHARED, mmap.PROT_WRITE)
# write image
start = time.time()
buf = img.tobytes()
mm.seek(0)
mm.write(buf)
mm.flush()
stop = time.time()
print("Writing Duration:", (stop - start) * 1000, "ms")
except KeyboardInterrupt:
pass
print("Closing resources")
cap.release()
mm.close()
Server Code
import mmap
import time
import os
import cv2 as cv
import numpy as np
shape = (1080, 1920, 3)
n = np.prod(shape)
mm = mmap.mmap(-1, n)
while True:
# read image
print (mm)
start = time.perf_counter()
mm.seek(0)
buf = mm.read(12)
img = np.frombuffer(buf, dtype=np.uint8).reshape(shape)
stop = time.perf_counter()
print("Reading Duration:", (stop - start) * 1000, "ms")
cv.imshow("img", img)
key = cv.waitKey(1) & 0xFF
key = chr(key)
if key.lower() == "q":
break
cv.destroyAllWindows()
mm.close()
On the server-side, I set the memory index at 0, and try to read the bytes from memory. However, it seems to be that the server cannot read correctly the data from client.
[Updated]
I've tried to read out the first 12 bytes at the server-side. The value is constant, not changing anymore.
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
Besides,
The first 12 bytes of a random frame is
b'\xf5\xff\xff\xf0\xfa\xfe\xdf\xe9\xed\xd2\xdc\xe0'
First I found example which probably works but it uses tagName (the same for client and server) and it means it is only for Window:
python-mmap-ipc
Next I found code which works on Linux:
Sharing Python data between processes using mmap.
It creates real file on disk, resizes it to size of image and then it uses its fd in mmap()
I use web camera for test.
Server
import mmap
import time
import os
import cv2
print("Opening camera...")
cap = cv2.VideoCapture(0)
#print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) # 640
#print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) # 480
shape = (480, 640, 3)
n = (480*640*3)
fd = os.open('/tmp/mmaptest', os.O_CREAT | os.O_TRUNC | os.O_RDWR)
#os.write(fd, b'\x00' * n) # resize file
os.truncate(fd, n) # resize file
mm = None
try:
while True:
ret, img = cap.read()
if not ret:
break
if mm is None:
mm = mmap.mmap(fd, n, mmap.MAP_SHARED, mmap.PROT_WRITE) # it has to be only for writing
# write image
start = time.perf_counter()
buf = img.tobytes()
mm.seek(0)
mm.write(buf)
mm.flush()
stop = time.perf_counter()
print("Writing Duration:", (stop - start) * 1000, "ms")
except KeyboardInterrupt:
pass
print("Closing resources")
cap.release()
mm.close()
Client
import mmap
import time
import os
import cv2
import numpy as np
shape = (480, 640, 3)
n = (480*640*3)
fd = os.open('/tmp/mmaptest', os.O_RDONLY)
mm = mmap.mmap(fd, n, mmap.MAP_SHARED, mmap.PROT_READ) # it has to be only for reading
while True:
# read image
start = time.perf_counter()
mm.seek(0)
buf = mm.read(n)
img = np.frombuffer(buf, dtype=np.uint8).reshape(shape)
stop = time.perf_counter()
print("Reading Duration:", (stop - start) * 1000, "ms")
cv2.imshow("img", img)
key = cv2.waitKey(1) & 0xFF
key = chr(key)
if key.lower() == "q":
break
cv2.destroyAllWindows()
mm.close()
BTW: probably mmap() with -1 (without creating file on disk) could work with threads (or forks) because they share the same memory.
is it possible to write in python a webcam stream script for mobile networks. The biggest requirement is that it shall be realtime as possible without bigger lags and delay. I have tried some standard examples I found on google via UDP. While beeing in my private Wifi it works perfect for a 320x240 resolution.
But as soon I switch to my LTE Surfstick where I have a about 3-4 Mbits of upload, the picture is lagging extremly. It has a big delay and lot of frame drops.
I wonder why, because 3 Mbits should be enough...
So my guess is that I need some kind of compresssion? Or I'm missing something essential here and its even not possible without buffering a lot, which would make realtime impossible?
Here is the code i use vor the raspberry:
import socket
import cv2 as cv
addr = ('myserver.xx', 1331)
buf = 512
width = 320
height = 240
cap = cv.VideoCapture(0)
cap.set(3, width)
cap.set(4, height)
cap.set(cv.CAP_PROP_FPS, 25)
cap.set(cv.CAP_PROP_FOURCC, cv.VideoWriter.fourcc('M','J','P','G'))
code = 'start'
code = ('start' + (buf - len(code)) * 'a').encode('utf-8')
if __name__ == '__main__':
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while(cap.isOpened()):
ret, frame = cap.read()
#frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
if ret:
s.sendto(code, addr)
data = frame.tostring()
for i in range(0, len(data), buf):
s.sendto(data[i:i+buf], addr)
# cv.imshow('send', frame)
# if cv.waitKey(1) & 0xFF == ord('q'):
# break
else:
break
# s.close()
# cap.release()
# cv.destroyAllWindows()
I am getting the camera feed from a camera on a Raspberry PI to the computer using a TCP socket to use it for openCV. It works well with minimal delay. However, sometimes the frame gets stuck(freeze) and after a while the feed gets back or the openCV window crashes. I have looked everywhere and tried multiple things, but I just don't know what is causing it.
Server (PC):
import socket
import struct
import numpy as np
import cv2
host = "192.168.0.12"
portCar = 8010
# Camera socket
camS = socket.socket()
camS.bind((host, portCar))
camS.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Listen for camera
camS.listen(0)
print("Waiting for camera connection...")
camCon = camS.accept()[0]
camFile = camCon.makefile("rb")
print("Connection made with camera")
camS.settimeout(0.00001)
numOfBytes = struct.calcsize("<L")
try:
while(True):
camS.setblocking(False)
imageLength = struct.unpack("<L", camFile.read(numOfBytes))[0]
if imageLength == 0:
break
nparr = np.frombuffer(camFile.read(imageLength), np.uint8)
frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
cv2.imshow('RC Car Video stream', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
finally:
camFile.close()
camS.close()
cv2.destroyAllWindows()
print("Server - Camera connection closed")
Client (PI):
import io
import socket
import struct
import time
import picamera
client_socket = socket.socket()
client_socket.connect(('192.168.0.12', 8010))
client_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
connection = client_socket.makefile('wb')
class SplitFrames(object):
def __init__(self, connection):
self.connection = connection
self.stream = io.BytesIO()
def write(self, buf):
if buf.startswith(b'\xff\xd8'):
size = self.stream.tell()
if size > 0:
self.connection.write(struct.pack('<L', size))
self.connection.flush()
self.stream.seek(0)
self.connection.write(self.stream.read(size))
self.stream.seek(0)
self.stream.write(buf)
try:
output = SplitFrames(connection)
with picamera.PiCamera(resolution='VGA', framerate=30) as camera:
time.sleep(2)
camera.rotation = 180
camera.start_recording(output, format='mjpeg')
camera.wait_recording(2000)
camera.stop_recording()
# Write the terminating 0-length to the connection to let the
# server know we're done
connection.write(struct.pack('<L', 0))
finally:
connection.close()
client_socket.close()
print("Client - Connection closed")
Any help will be greatly appreciated.
What I try to do: Recording a video and then sending it from one pc to another one, with Socket Stream. For that I use the OpenCV
library CV2, NumPy and Socket.
The problem: When I send the the frames, it only sends a part of the whole (43776 of 921600), which leaves an error afterwards, when I
try to display to frame on the other pc.
The Server:
#!/usr/bin/python
import socket
import cv2
import numpy as np
ip = "XXX.XXX.X.XXX"
def Test(Given_List):
y = 0
temp = []
Frame_List = []
for kappa in range(480):
Frame_List.append([])
for each in Given_List:
if len(temp) < 3:
temp.append(each)
if len(temp) >= 3:
Frame_List[y].append(temp)
temp = []
if len(Frame_List[y]) >= 640:
y += 1
return Frame_List
while True:
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect((ip,50000))
data = client_socket.recv(10240000)
s = np.fromstring(data, dtype=np.uint8)
nice = np.asarray(Test(s))
cv2.imshow('frame', nice)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
The Client:
#!/usr/bin/python
import socket
import cv2
#Receiver ip
ip = "XXX.XXX.X.XXX"
port = 50000
#Set up socket and stuff
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(("XXX.XXX.X.XXX", port))
server.listen(1)
#Define the cam stuff
cap = cv2.VideoCapture(0)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)
while(cap.isOpened()):
s, add = server.accept()
ret, frame = cap.read()
n = frame.tobytes()
if ret:
s.sendall(n)
else:
break
# Release everything if job is finished
cap.release()
out.release()
s.close()
It seems like the amount of data exceeds the maximum amount of the socket, so I changed the video resolution to 320x240 and now it works fine.