I used python socket to make a server on my Raspberry Pi 3 (Raspbian) and a client on my laptop (Windows 10). The server stream images to the laptop at a rate of 10fps, and can reach 15fps if I push it. The problem is when I want the laptop to send back a command based on the image, the frame rate drop sharply to 3fps. The process is like this:
Pi send img => Laptop receive img => Quick process => Send command based on process result => Pi receive command, print it => Pi send img => ...
The process time for each frame does not cause this (0.02s at most for each frame), so currently I am at a loss as to why the frame rate drop so much. The image is quite large, at around 200kB and the command is only a short string at 3B. The image is in matrix form and is pickled before sending, while the command is sent as is.
Can someone please explain to me why sending back such a short command would make the frame rate drop so much? And if possible, a solution for this problem. I tried making 2 servers, one dedicated to sending images and one for receiving command, but the result is the same.
Server:
import socket
import pickle
import time
import cv2
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera
from SendFrameInOO import PiImageServer
def main():
# initialize the server and time stamp
ImageServer = PiImageServer()
ImageServer2 = PiImageServer()
ImageServer.openServer('192.168.0.89', 50009)
ImageServer2.openServer('192.168.0.89', 50002)
# Initialize the camera object
camera = PiCamera()
camera.resolution = (320, 240)
camera.framerate = 10 # it seems this cannot go higher than 10
# unless special measures are taken, which may
# reduce image quality
camera.exposure_mode = 'sports' #reduce blur
rawCapture = PiRGBArray(camera)
# allow the camera to warmup
time.sleep(1)
# capture frames from the camera
print('<INFO> Preparing to stream video...')
timeStart = time.time()
for frame in camera.capture_continuous(rawCapture, format="bgr",
use_video_port = True):
# grab the raw NumPy array representing the image, then initialize
# the timestamp and occupied/unoccupied text
image = frame.array
imageData = pickle.dumps(image)
ImageServer.sendFrame(imageData) # send the frame data
# receive command from laptop and print it
command = ImageServer2.recvCommand()
if command == 'BYE':
print('BYE received, ending stream session...')
break
print(command)
# clear the stream in preparation for the next one
rawCapture.truncate(0)
print('<INFO> Video stream ended')
ImageServer.closeServer()
elapsedTime = time.time() - timeStart
print('<INFO> Total elapsed time is: ', elapsedTime)
if __name__ == '__main__': main()
Client:
from SupFunctions.ServerClientFunc import PiImageClient
import time
import pickle
import cv2
def main():
# Initialize
result = 'STP'
ImageClient = PiImageClient()
ImageClient2 = PiImageClient()
# Connect to server
ImageClient.connectClient('192.168.0.89', 50009)
ImageClient2.connectClient('192.168.0.89', 50002)
print('<INFO> Connection established, preparing to receive frames...')
timeStart = time.time()
# Receiving and processing frames
while(1):
# Receive and unload a frame
imageData = ImageClient.receiveFrame()
image = pickle.loads(imageData)
cv2.imshow('Frame', image)
key = cv2.waitKey(1) & 0xFF
# Exit when q is pressed
if key == ord('q'):
ImageClient.sendCommand('BYE')
break
ImageClient2.sendCommand(result)
ImageClient.closeClient()
elapsedTime = time.time() - timeStart
print('<INFO> Total elapsed time is: ', elapsedTime)
print('Press any key to exit the program')
#cv2.imshow('Picture from server', image)
cv2.waitKey(0)
if __name__ == '__main__': main()
PiImageServer and PiImageClient:
import socket
import pickle
import time
class PiImageClient:
def __init__(self):
self.s = None
self.counter = 0
def connectClient(self, serverIP, serverPort):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((serverIP, serverPort))
def closeClient(self):
self.s.close()
def receiveOneImage(self):
imageData = b''
lenData = self.s.recv(8)
length = pickle.loads(lenData) # should be 921764 for 640x480 images
print('Data length is:', length)
while len(imageData) < length:
toRead = length-len(imageData)
imageData += self.s.recv(4096 if toRead>4096 else toRead)
#if len(imageData)%200000 <= 4096:
# print('Received: {} of {}'.format(len(imageData), length))
return imageData
def receiveFrame(self):
imageData = b''
lenData = self.s.recv(8)
length = pickle.loads(lenData)
print('Data length is:', length)
'''length = 921764 # for 640x480 images
length = 230563 # for 320x240 images'''
while len(imageData) < length:
toRead = length-len(imageData)
imageData += self.s.recv(4096 if toRead>4096 else toRead)
#if len(imageData)%200000 <= 4096:
# print('Received: {} of {}'.format(len(imageData), length))
self.counter += 1
if len(imageData) == length:
print('Successfully received frame {}'.format(self.counter))
return imageData
def sendCommand(self, command):
if len(command) != 3:
print('<WARNING> Length of command string is different from 3')
self.s.send(command.encode())
print('Command {} sent'.format(command))
class PiImageServer:
def __init__(self):
self.s = None
self.conn = None
self.addr = None
#self.currentTime = time.time()
self.currentTime = time.asctime(time.localtime(time.time()))
self.counter = 0
def openServer(self, serverIP, serverPort):
print('<INFO> Opening image server at {}:{}'.format(serverIP,
serverPort))
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.bind((serverIP, serverPort))
self.s.listen(1)
print('Waiting for client...')
self.conn, self.addr = self.s.accept()
print('Connected by', self.addr)
def closeServer(self):
print('<INFO> Closing server...')
self.conn.close()
self.s.close()
#self.currentTime = time.time()
self.currentTime = time.asctime(time.localtime(time.time()))
print('Server closed at', self.currentTime)
def sendOneImage(self, imageData):
print('<INFO> Sending only one image...')
imageDataLen = len(imageData)
lenData = pickle.dumps(imageDataLen)
print('Sending image length')
self.conn.send(lenData)
print('Sending image data')
self.conn.send(imageData)
def sendFrame(self, frameData):
self.counter += 1
print('Sending frame ', self.counter)
frameDataLen = len(frameData)
lenData = pickle.dumps(frameDataLen)
self.conn.send(lenData)
self.conn.send(frameData)
def recvCommand(self):
commandData = self.conn.recv(3)
command = commandData.decode()
return command
I believe the problem is two-fold. First, you are serializing all activity: The server is sending a complete image, then instead of continuing on to send the next image (which would better fit the definition of "streaming"), it is stopping, waiting for all bytes of the previous image to make themselves across the network to the client, then for the client to receive all bytes of the image, unpickle it, send a response and for the response to then make its way across the wire to the server.
Is there a reason you need them to be in lockstep like this? If not, try to parallelize the two sides. Have your server create a separate thread to listen for commands coming back (or simply use select to determine when the command socket has something to receive).
Second, you are likely being bitten by Nagle's algorithm (https://en.wikipedia.org/wiki/Nagle%27s_algorithm), which is intended to prevent sending numerous packets with small payloads (but lots of overhead) across the network. So, your client-side kernel has gotten your three-bytes of command data and has buffered it, waiting for you to provide more data before it sends the data to the server (it will eventually send it anyway, after a delay). To change that, you would want to use the TCP_NODELAY socket option on the client side (see https://stackoverflow.com/a/31827588/1076479).
Related
I need help figuring out what I'm doing wrong with my python sockets.
I have made a pair of server/client programmes where the the client sends a live-feed from the camera of the Raspberry Pi it is running on to the server, which is running on a Linux laptop on the same network. This part works.
My problem is that I now want to be able to send a string back to the Raspberry Pi to control the zoom level of the image. Unlike the camera-feed, this communication does not need to be continuously happening, and it would in fact be preferable for it to only occur as needed.
Unfortunately, I have almost no experience with python sockets. I only managed to get the video-feed working thanks to a helpful YouTube video, which I then modified to work in a Tkinter window with some help from the Stack Overflow community. (The video in question is: https://www.youtube.com/watch?v=bWwZF_zVf00)
My current attempts have consisted of trying to send the string data back along the same socket connection that I am using to get the images, but I suspect that this method is probably flawed.
So my question is this: How can I get the string data back to the client that's running on the Raspberry Pi?
From what I've read, I suspect that the answer involves making a second socket connection between the two devices using threads, but when I tried putting the function that runs the image socket into a thread, the picture began to flicker wildly. FAIR WARNING TO EPILEPTICS!
I stripped down the programme I was working on to make this simplified version to illustrate the problem. If it were working correctly, the Z+ button would zoom in the image and print "Received String = ZOOM|0.5" to the console on the client side, while the Z- button would zoom out the image and print "Received String = ZOOM|1.0".
I'll put what I think are the relevant functions from the server and client first, and then include a full copy of the simplified version in case anyone needs to see more or wants to use this themselves, since the video-feed works quite well (if you comment out the feedback functions).
I realise that at the moment I am trying to send the string on every frame of video. This is because I had a few ideas for improvements as I was writing this post.
Sorry if this post comes across as long-winded, but this IS me trying to be concise.
Relevant functions from the server (rearranged so that the feedback sending function is at the top of its section):
#=====================================================================================
def Vid_Button(self):
if (self.myvar_vid_on == False):
self.myvar_vid_on = True
# comUnicate = Thread(target = self.Listen_For_Data)
# comUnicate.start() # Threading causes image to strobe.
self.Listen_For_Data()
#=====================================================================================
## Functions Relating To The Camera Live-Feed From The Robot:
# (LFD = Listen For Data)
# (These functions support the main function of this group, Listen_For_Data() )
# Send zoom level:
def LFD_Send_Feedback(self):
global pos_ZOOM
print("pos_ZOOM = " + str(pos_ZOOM))
# Prepare the string to be sent:
send_String = "ZOOM|" + str(pos_ZOOM)
print("send_String = " + send_String)
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# Send the string: # I HAVE NO IDEA HOW TO SEND THE INFORMATION!!
self.connection_Image.send(send_String) # I think I'm trying to send the wrong datatype here.
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Create the server for the robot to send images to:
def LFD_Image_Server_Create(self):
global HOST
global PORT
self.server_socket = socket.socket()
self.server_socket.bind((HOST, PORT))
self.server_socket.listen(0)
# Display the image from the camera live-feed in the window:
def LFD_Display_Image(self):
# Change the image being displayed:
self.label__Image.image = self.img
self.label__Image.configure(image = self.img)
# Refresh the window so that the image will actually be displayed:
self.label__Image.update()
# Image acquisition loop:
def LFD_Loop_Get_Image(self):
while(programme_Open):
global pos_Zoom
# Raise active communication flag:
self.myvar_comms_active = True
# Read the length of the image as a 32-bit unsigned int:
self.image_len = struct.unpack('<L', self.connection_Image.read(struct.calcsize('<L')))[0]
# If the length is zero (i.e. no image), quit the loop:
if not self.image_len:
self.myvar_comms_active = False # Lower active communication flag.
break
# Construct a stream to hold the image data and read the image data from the connection:
self.image_stream = io.BytesIO()
self.image_stream.write(self.connection_Image.read(self.image_len))
# Extract image data:
self.image_stream.seek(0) # Rewind the stream.
self.live_feed = Image.open(self.image_stream) # Open the stream as an image with PIL.
self.img = ImageTk.PhotoImage(self.live_feed) # Convert the image into a format that Tkinter can use.
# Display image in the window:
self.LFD_Display_Image()
# Send zoom level to the robot:
self.LFD_Send_Feedback()
# Lower active communication flag:
self.myvar_comms_active = False
# Used to get a video feed from the Raspberry Pi's camera:
def Listen_For_Data(self):
global programme_Open
try:
# Create server:
self.LFD_Image_Server_Create()
# Accept connection from the Raspberry Pi for sending images: (Accept a single connection and make a file-like object out of it)
self.connection_Image = self.server_socket.accept()[0].makefile('rb')
except:
print("Failed To Establish Connection")
else:
# Indicate that the connection was established:
print("Established Connection")
self.Status_Label_Connected()
# Get images from the Raspberry Pi:
try:
self.LFD_Loop_Get_Image()
except struct.error:
print("Connection Broken")
finally:
try:
# This try statement fixes an error that occurs when quitting programme while the video connection is still running.
# Without the try statement, the program would try to alter a label that no longer exists, which results in an error.
self.Status_Label_Not_Connected()
except:
pass
# Close the connection with the Raspberry Pi:
try:
self.connection_Image.close()
except AttributeError:
print("Can't close a connection that doesn't exist!")
finally:
self.server_socket.close()
# Lower "Video On" flag:
self.myvar_vid_on = False
#=====================================================================================
Relevant functions from the client:
# Create the socket connection to the control device:
def MF_Connection_Create():
global HOST
global PORT
global client_socket
global connection
client_socket = socket.socket() # Create a socket.
client_socket.connect((HOST, PORT)) # Tell the socket to talk to the control device.
connection = client_socket.makefile('wb') # Make a file-like object out of the connection.
# Get feedback data:
def MF_Get_Feedback():
global connection
global client_socket
global level_ZOOM
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# Get the string: # THIS IS THE PART I NEED HELP WITH
value_received = connection.recv(2048)
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Display the received string:
print("Received String = " + str(value_received))
# Extract the first 4 characters of the string:
value_split = value_received.split("|")
# Update the desired setting:
if (value_split[0] == "ZOOM"):
level_ZOOM = float(value_split[1])
print("ZOOM = " + str(level_ZOOM))
camera.zoom = (0.0, 0.0, level_ZOOM, level_ZOOM)
# Stream images to the control device:
def MF_Stream_Images():
global connection
global camera
global imag_Format
# Construct a stream to hold image data temporarily:
stream = io.BytesIO()
# Continuously capture images in video mode:
for foo in camera.capture_continuous(stream, format = imag_Format, use_video_port = True):
try:
# Write the length of the capture to the stream and flush to ensure it actually gets sent:
connection.write(struct.pack('<L', stream.tell()))
connection.flush()
# Rewind the stream and send the image data over the WiFi link:
stream.seek(0)
connection.write(stream.read())
# Get feedback values: (Under Development)
MF_Get_Feedback()
except (BrokenPipeError, ConnectionResetError):
print("Connection Broken")
break
else:
# Reset the stream for the next capture
stream.seek(0)
stream.truncate()
# If communications are cut, write a length of zero to the stream to signal that we're done:
connection.write(struct.pack('<L', 0))
Full copy of the simplified server:
##############################################################################################
## IMPORT LIBRARIES:
# GUI Libraries:
import tkinter as tk
from PIL import Image, ImageTk
# Communication Libraries:
import io
import socket
import struct
# Threading Libraries:
import _thread
from threading import Thread
##############################################################################################
## DEFINE CONSTANTS:
# WiFi connection settings:
ROBO = 'IP Address' # Network IP address of the robot (Raspberry Pi).
HOST = 'IP Address' # Network IP address of the laptop.
PORT = 8000 # Port to connect to.
# Define state colours for the connection label:
conctLab_NO = '#ff8888'
conctLab_YES = '#88ff88'
# Image size settings:
pixels_x = 640 # Width of the images, in pixels
pixels_y = 480 # Height of the images, in pixels
##############################################################################################
## DEFINE VARIABLES:
programme_Open = True # Flag used to help close the programme
comms_Connected = False # Flag used to indicate if the there is a connection between the control device and the robot
pos_ZOOM = 1.0 # Zoom level of the image
##############################################################################################
class Application(tk.Frame):
#=====================================================================================
def __init__(self, master = None):
#-------------------------------------------------------------------------
tk.Frame.__init__(self, master) # super().__init__(master)
#-------------------------------------------------------------------------
self.myvar_vid_on = False
self.myvar_comms_active = False
#-------------------------------------------------------------------------
self.grid( sticky = tk.N + tk.S + tk.E + tk.W ) # Resize window contents when window is resized.
self.Create_Widgets()
#-------------------------------------------------------------------------
#=====================================================================================
def Create_Widgets(self):
#-------------------------------------------------------------------------
## Create Window:
top = self.winfo_toplevel()
top.rowconfigure( 0, weight = 1 )
top.columnconfigure( 0, weight = 1 )
#-------------------------------------------------------------------------
## Create Widgets:
# Image display:
self.label__Image = tk.Label( self ) # Displays video feed.
# Connection status label:
self.label__Status = tk.Label( self, text = "Not Connected", bg = conctLab_NO, fg = "black" ) # Indicates if the video link is working.
# Programme control buttons:
self.button_Start = tk.Button( self, text = "Start Connection", fg = "black", command = self.Vid_Button ) # Connect to camera.
self.button_Quit = tk.Button( self, text = "QUIT", fg = "black", command = self.Applicat_Quit ) # Quit programme.
# Zoom buttons:
self.button_C_Zoom_In = tk.Button( self, text = "Z+", fg = "black", command = self.PanTilt_Zoom_In ) # Zoom in
self.button_C_Zoom_Out = tk.Button( self, text = "Z-", fg = "black", command = self.PanTilt_Zoom_Out ) # Zoom out
#-------------------------------------------------------------------------
## Define Layouts:
# Set relative column widths within the window:
self.columnconfigure( 0, weight = 1 )
self.columnconfigure( 1, weight = 1 )
# Set relative row heights within the window:
self.rowconfigure( 0, minsize = pixels_y)
self.rowconfigure( 1, weight = 1 )
self.rowconfigure( 2, weight = 1 )
self.rowconfigure( 3, weight = 1 )
#-------------------------------------------------------------------------
## Organise Widgets:
self.label__Image.grid( column = 0, row = 0, columnspan = 2, sticky = tk.N + tk.E + tk.S + tk.W )
self.label__Status.grid( column = 0, row = 1, columnspan = 2, sticky = tk.N + tk.E + tk.S + tk.W )
self.button_Start.grid( column = 0, row = 2, sticky = tk.N + tk.E + tk.S + tk.W )
self.button_Quit.grid( column = 0, row = 3, sticky = tk.N + tk.E + tk.S + tk.W )
self.button_C_Zoom_In.grid( column = 1, row = 2, sticky = tk.N + tk.E + tk.S + tk.W )
self.button_C_Zoom_Out.grid( column = 1, row = 3, sticky = tk.N + tk.E + tk.S + tk.W )
#-------------------------------------------------------------------------
#=====================================================================================
## Pan/Tilt Control Functions:
def PanTilt_Zoom_In(self):
print("Zoom In")
global pos_Zoom
pos_Zoom = 0.5
def PanTilt_Zoom_Out(self):
print("Zoom Out")
global pos_Zoom
pos_Zoom = 1.0
#=====================================================================================
## Functions For Updating The Label Which Displays The Video Connection Status:
def Status_Label_Connected(self):
self.label__Status.configure(text = "Connected" )
self.label__Status.configure(bg = conctLab_YES)
def Status_Label_Not_Connected(self):
self.label__Status.configure(text = "Not Connected")
self.label__Status.configure(bg = conctLab_NO )
#=====================================================================================
def Vid_Button(self):
if (self.myvar_vid_on == False):
self.myvar_vid_on = True
# comUnicate = Thread(target = self.Listen_For_Data)
# comUnicate.start() # Threading causes image to strobe.
self.Listen_For_Data()
#=====================================================================================
## Functions Relating To The Camera Live-Feed From The Robot:
# (LFD = Listen For Data)
# (These functions support the main function of this group, Listen_For_Data() )
# Create the server for the robot to send images to:
def LFD_Image_Server_Create(self):
global HOST
global PORT
self.server_socket = socket.socket()
self.server_socket.bind((HOST, PORT))
self.server_socket.listen(0)
# Display the image from the camera live-feed in the window:
def LFD_Display_Image(self):
# Change the image being displayed:
self.label__Image.image = self.img
self.label__Image.configure(image = self.img)
# Refresh the window so that the image will actually be displayed:
self.label__Image.update()
# Image acquisition loop:
def LFD_Loop_Get_Image(self):
while(programme_Open):
global pos_Zoom
# Raise active communication flag:
self.myvar_comms_active = True
# Read the length of the image as a 32-bit unsigned int:
self.image_len = struct.unpack('<L', self.connection_Image.read(struct.calcsize('<L')))[0]
# If the length is zero (i.e. no image), quit the loop:
if not self.image_len:
self.myvar_comms_active = False # Lower active communication flag.
break
# Construct a stream to hold the image data and read the image data from the connection:
self.image_stream = io.BytesIO()
self.image_stream.write(self.connection_Image.read(self.image_len))
# Extract image data:
self.image_stream.seek(0) # Rewind the stream.
self.live_feed = Image.open(self.image_stream) # Open the stream as an image with PIL.
self.img = ImageTk.PhotoImage(self.live_feed) # Convert the image into a format that Tkinter can use.
# Display image in the window:
self.LFD_Display_Image()
# Send zoom level to the robot:
self.LFD_Send_Feedback()
# Lower active communication flag:
self.myvar_comms_active = False
# Send zoom level:
def LFD_Send_Feedback(self):
global pos_ZOOM
print("pos_ZOOM = " + str(pos_ZOOM))
# Prepare the string to be sent:
send_String = "ZOOM|" + str(pos_ZOOM)
print("send_String = " + send_String)
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# Send the string: # I HAVE NO IDEA HOW TO DO THIS!!
self.connection_Image.send(send_String) # I think I'm trying to send the wrong datatype here.
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Used to get a video feed from the Raspberry Pi's camera:
def Listen_For_Data(self):
global programme_Open
try:
# Create server:
self.LFD_Image_Server_Create()
# Accept connection from the Raspberry Pi for sending images: (Accept a single connection and make a file-like object out of it)
self.connection_Image = self.server_socket.accept()[0].makefile('rb')
except:
print("Failed To Establish Connection")
else:
# Indicate that the connection was established:
print("Established Connection")
self.Status_Label_Connected()
# Get images from the Raspberry Pi:
try:
self.LFD_Loop_Get_Image()
except struct.error:
print("Connection Broken")
finally:
try:
# This try statement fixes an error that occurs when quitting programme while the video connection is still running.
# Without the try statement, the program would try to alter a label that no longer exists, which results in an error.
self.Status_Label_Not_Connected()
except:
pass
# Close the connection with the Raspberry Pi:
try:
self.connection_Image.close()
except AttributeError:
print("Can't close a connection that doesn't exist!")
finally:
self.server_socket.close()
# Lower "Video On" flag:
self.myvar_vid_on = False
#=====================================================================================
## Used To Quit The Programme:
def Applicat_Quit(self):
# Indicate that the programme is beginning to close:
print(MyVar_Divider_String)
print("Starting Closing Procedures")
# Signal to the programme that it should start to close:
global programme_Open
programme_Open = False
# Close the window:
print("About To Quit Tkinter") # Indicate that the Tkinter window is about to be destroyed.
self.master.destroy() # Close Tkinter window.
#=====================================================================================
##############################################################################################
## RUN PROGRAMME:
MyVar_Divider_String = "================================"
# Signify start of programme execution:
print(MyVar_Divider_String)
print("Programme Start")
print(MyVar_Divider_String)
print("Command Log:")
# Run the Tkinter programme:
root = tk.Tk()
root.geometry("1000x800") # Set the default size of the window.
app = Application() # Create Application instance.
app.master.title("Window Title") # Set the title of the window.
app.mainloop() # Start Application.
# Signify end of programme execution:
print(MyVar_Divider_String)
print("Programme Ended")
print(MyVar_Divider_String)
##############################################################################################
Full copy of the simplified client:
##############################################################################################
## IMPORT LIBRARIES:
import io
import socket
import struct
import time
import picamera
##############################################################################################
## CONSTANTS:
## WiFi connection settings:
HOST = 'IP Address' # Network IP address of the laptop controlling the robot.
PORT = 8000 # Port to connect to.
PESTER = 5 # How often the robot will attempt to connect to the controller device (in seconds).
# Camera settings:
cam_res = (640, 480) # Camera resolution. # cam_res = (1280, 720) # = HD
imag_Format = 'jpeg' # Image format.
##############################################################################################
## VARIABLES:
programme_Open = True # Used to help close the programme.
level_ZOOM = 1.0 # Zoom level of the camera. (1.0 = no zoom)
##############################################################################################
## FUNCTIONS: # (MF = My Function)
# Create the camera object:
def MF_Camera_Create():
global camera
global cam_res
global level_ZOOM
# Create camera object:
camera = picamera.PiCamera() # Make the camera object.
camera.rotation = 180 # Rotate image by 180 degrees. (Camera is mounted upsidedown)
camera.resolution = cam_res # Set the resolution of the image.
camera.zoom = (0.0, 0.0, level_ZOOM, level_ZOOM) # Set zoom level.
# Turn on the camera:
def MF_Camera_Start():
global camera
# Start the camera:
camera.start_preview(alpha = 200) # Start the camera & make preview see-through.
time.sleep(2) # Let the camera warm up for 2 seconds.
# Turn off the camera:
def MF_Camera_Stop():
global camera
camera.stop_preview() # Turn off the camera.
# Destroy the camera object:
def MF_Camera_Destroy():
global camera
camera.close() # Destroy the camera object.
# Create the socket connection to the control device:
def MF_Connection_Create():
global HOST
global PORT
global client_socket
global connection
client_socket = socket.socket() # Create a socket.
client_socket.connect((HOST, PORT)) # Tell the socket to talk to the control device.
connection = client_socket.makefile('wb') # Make a file-like object out of the connection.
# Close the socket connection to the control device:
def MF_Connection_Destroy():
global connection
global client_socket
try:
connection.close()
except:
pass # Connection was already closed.
finally:
client_socket.close()
# Get feedback data:
def MF_Get_Feedback():
global connection
global client_socket
global level_ZOOM
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# Get the string: # THIS IS THE PART I NEED HELP WITH
value_received = connection.recv(2048)
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Display the received string:
print("Received String = " + str(value_received))
# Extract the first 4 characters of the string:
value_split = value_received.split("|")
# Update the desired setting:
if (value_split[0] == "ZOOM"):
level_ZOOM = float(value_split[1])
print("ZOOM = " + str(level_ZOOM))
camera.zoom = (0.0, 0.0, level_ZOOM, level_ZOOM)
# Stream images to the control device:
def MF_Stream_Images():
global connection
global camera
global imag_Format
# Construct a stream to hold image data temporarily:
stream = io.BytesIO()
# Continuously capture images in video mode:
for foo in camera.capture_continuous(stream, format = imag_Format, use_video_port = True):
try:
# Write the length of the capture to the stream and flush to ensure it actually gets sent:
connection.write(struct.pack('<L', stream.tell()))
connection.flush()
# Rewind the stream and send the image data over the WiFi link:
stream.seek(0)
connection.write(stream.read())
# Get feedback values: (Under Development)
MF_Get_Feedback()
except (BrokenPipeError, ConnectionResetError):
print("Connection Broken")
break
else:
# Reset the stream for the next capture
stream.seek(0)
stream.truncate()
# If communications are cut, write a length of zero to the stream to signal that we're done:
connection.write(struct.pack('<L', 0))
# Quit application: (In future versions, this will also shutdown the robot.)
def applicat_Quit():
MF_Camera_Stop() # Turn off the camera.
MF_Camera_Destroy() # Destroy the camera object.
print("End Programme")
# Add line of code to turn off the robot (Raspberry Pi).
##############################################################################################
## RUN PROGRAMME:
print("Start Programme")
MF_Camera_Create() # Create the camera object.
MF_Camera_Start() # Turn on the camera.
while (programme_Open):
try:
MF_Connection_Create() # Create a socket connection to the control device.
except ConnectionRefusedError:
print("Could Not Connect To Control Device")
pass
else:
print("Connected To Control Device")
try:
MF_Stream_Images() # Stream images to the control device.
finally:
print("Close Connection")
MF_Connection_Destroy() # Close the socket connection.
# Wait PESTER seconds before next attempt to connect to control device:
time.sleep(PESTER)
# Quit application:
applicat_Quit()
##############################################################################################
I'm streaming the video from my raspberryPi using piCamera to a web socket, so that I can view it within my local network.
I want to make my own motion detection script from scratch, therefore I want to get the first image from the video stream (which is going to be the plain background) then compare with a function next frames to the first one to check whether something has changed (I have written those functions separately), I am not really worrying about efficiency here.
MAIN ISSUE:
I want to get the data from those frames in a BytesIO object, then convert them to a 2D numpy array in B&W so I can perform operations. All this while keeping the stream going (I have in fact reduced the frame rate to 4 per second to make it run faster on my computer).
PROBLEM ENCOUNTERED WITH THE FOLLOWING CODE:
One part of the problem that I have identified is that the numbers are way off. In my settings my camera to have a resolution of around 640*480 (= 307,200 length numpy array pixels data in B&W) whereas my computations in len() return less that 100k pixels.
def main():
print('Initializing camera')
base_image = io.BytesIO()
image_captured = io.BytesIO()
with picamera.PiCamera() as camera:
camera.resolution = (WIDTH, HEIGHT)
camera.framerate = FRAMERATE
camera.vflip = VFLIP # flips image rightside up, as needed
camera.hflip = HFLIP # flips image left-right, as needed
sleep(1) # camera warm-up time
print('Initializing websockets server on port %d' % WS_PORT)
WebSocketWSGIHandler.http_version = '1.1'
websocket_server = make_server(
'', WS_PORT,
server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app=WebSocketWSGIApplication(handler_cls=StreamingWebSocket))
websocket_server.initialize_websockets_manager()
websocket_thread = Thread(target=websocket_server.serve_forever)
print('Initializing HTTP server on port %d' % HTTP_PORT)
http_server = StreamingHttpServer()
http_thread = Thread(target=http_server.serve_forever)
print('Initializing broadcast thread')
output = BroadcastOutput(camera)
broadcast_thread = BroadcastThread(output.converter, websocket_server)
print('Starting recording')
camera.start_recording(output, 'yuv')
try:
print('Starting websockets thread')
websocket_thread.start()
print('Starting HTTP server thread')
http_thread.start()
print('Starting broadcast thread')
broadcast_thread.start()
time.sleep(0.5)
camera.capture(base_image, use_video_port=True, format='jpeg')
base_data = np.asarray(bytearray(base_image.read()), dtype=np.uint64)
base_img_matrix = cv2.imdecode(base_data, cv2.IMREAD_GRAYSCALE)
while True:
camera.wait_recording(1)
#insert here the code for frame analysis
camera.capture(image_captured, use_video_port=True, format='jpeg')
data_next = np.asarray(bytearray(image_captured.read()), dtype=np.uint64)
next_img_matrix = cv2.imdecode(data_next, cv2.IMREAD_GRAYSCALE)
monitor_changes(base_img_matrix, next_img_matrix)
except KeyboardInterrupt:
pass
finally:
print('Stopping recording')
camera.stop_recording()
print('Waiting for broadcast thread to finish')
broadcast_thread.join()
print('Shutting down HTTP server')
http_server.shutdown()
print('Shutting down websockets server')
websocket_server.shutdown()
print('Waiting for HTTP server thread to finish')
http_thread.join()
print('Waiting for websockets thread to finish')
websocket_thread.join()
if __name__ == '__main__':
main()
Solved, basically the problem was all in the way I was handling data and BytesIO files. First of all I needed to use unsigned int8 as type of the file to decode id. Then I have switched to np.frombuffer to read the files in its entirety, because the base image is not going to change, hence it will read always the same thing, and the next one will be inizialized and eliminated in every while loop. Also I can replace cv2.IMREAD_GRAYSCALE with 0 in the function.
camera.start_recording(output, 'yuv')
base_image = io.BytesIO()
try:
print('Starting websockets thread')
websocket_thread.start()
print('Starting HTTP server thread')
http_thread.start()
print('Starting broadcast thread')
broadcast_thread.start()
time.sleep(0.5)
camera.capture(base_image, use_video_port=True, format='jpeg')
base_data = np.frombuffer(base_image.getvalue(), dtype=np.uint8)
base_img_matrix = cv2.imdecode(base_data, 0)
while True:
camera.wait_recording(0.25)
image_captured = io.BytesIO()
#insert here the code for frame analysis
camera.capture(image_captured, use_video_port=True, format='jpeg')
data_next = np.frombuffer(image_captured.getvalue(), dtype=np.uint8)
next_img_matrix = cv2.imdecode(data_next, cv2.IMREAD_GRAYSCALE)
monitor_changes(base_img_matrix, next_img_matrix)
image_captured.close()
I've been working on a project where I use a raspberry pi to send a live video feed to my server. This kinda works but not how I'd like it to.
The problem mainly is the speed. Right now I can send a 640x480 video stream with a speed of around 3.5 FPS and a 1920x1080 with around 0.5 FPS, which is terrible. Since I am not a professional I thought there should be a way of improving my code.
The sender (Raspberry pi):
def send_stream():
connection = True
while connection:
ret,frame = cap.read()
if ret:
# You might want to enable this while testing.
# cv2.imshow('camera', frame)
b_frame = pickle.dumps(frame)
b_size = len(b_frame)
try:
s.sendall(struct.pack("<L", b_size) + b_frame)
except socket.error:
print("Socket Error!")
connection = False
else:
print("Received no frame from camera, exiting.")
exit()
The Receiver (Server):
def recv_stream(self):
payload_size = struct.calcsize("<L")
data = b''
while True:
try:
start_time = datetime.datetime.now()
# keep receiving data until it gets the size of the msg.
while len(data) < payload_size:
data += self.connection.recv(4096)
# Get the frame size and remove it from the data.
frame_size = struct.unpack("<L", data[:payload_size])[0]
data = data[payload_size:]
# Keep receiving data until the frame size is reached.
while len(data) < frame_size:
data += self.connection.recv(32768)
# Cut the frame to the beginning of the next frame.
frame_data = data[:frame_size]
data = data[frame_size:]
frame = pickle.loads(frame_data)
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
end_time = datetime.datetime.now()
fps = 1/(end_time-start_time).total_seconds()
print("Fps: ",round(fps,2))
self.detect_motion(frame,fps)
self.current_frame = frame
except (socket.error,socket.timeout) as e:
# The timeout got reached or the client disconnected. Clean up the mess.
print("Cleaning up: ",e)
try:
self.connection.close()
except socket.error:
pass
self.is_connected = False
break
One potential reason could because of I/O latency when reading frames. Since cv2.VideoCapture().read() is a blocking operation, the main program is stalled until a frame is read from the camera device and returned. A method to improve performance would be to spawn another thread to handle grabbing frames in parallel instead of relying on a single thread to grab frames in sequential order. We can improve performance by creating a new thread that only polls for new frames while the main thread handles processing/graphing the most recent frame.
Your current approach (Sequential):
Thread 1: Grab frame -> Process frame -> Plot
Proposed approach (Parallel):
Thread 1: Grab frame
from threading import Thread
import time
def get_frames():
while True:
ret, frame = cap.read()
time.sleep(.01)
thread_frames = Thread(target=self.get_frames, args=())
thread_frames.daemon = True
thread_frames.start()
Thread 2: Process frame -> Plot
def process_frames():
while True:
# Grab most recent frame
# Process/plot frame
...
By having separate threads, your program will be in parallel since there will always be a frame ready to be processed instead of having to wait for a frame to be read in before processing can be done.
Note: This method will give you a performance boost based on I/O latency reduction. This isn't a true increase of FPS as it is a dramatic reduction in latency (a frame is always available for processing; we don't need to poll the camera device and wait for the I/O to complete).
After searching the internet for ages, I found a quick solution which doubled the fps (This is still way too low: 1.1 fps #1080p). What I did was I stopped using pickle and used base64 instead. apparently pickling the image just takes a while. Anyway this is my new code:
The sender (Raspberry pi):
def send_stream():
global connected
connection = True
while connection:
if last_frame is not None:
# You might want to uncomment these lines while testing.
# cv2.imshow('camera', frame)
# cv2.waitKey(1)
frame = last_frame
# The old pickling method.
#b_frame = pickle.dumps(frame)
encoded, buffer = cv2.imencode('.jpg', frame)
b_frame = base64.b64encode(buffer)
b_size = len(b_frame)
print("Frame size = ",b_size)
try:
s.sendall(struct.pack("<L", b_size) + b_frame)
except socket.error:
print("Socket Error!")
connection = False
connected = False
s.close()
return "Socket Error"
else:
return "Received no frame from camera"
The Receiver (Server):
def recv_stream(self):
payload_size = struct.calcsize("<L")
data = b''
while True:
try:
start_time = datetime.datetime.now()
# keep receiving data until it gets the size of the msg.
while len(data) < payload_size:
data += self.connection.recv(4096)
# Get the frame size and remove it from the data.
frame_size = struct.unpack("<L", data[:payload_size])[0]
data = data[payload_size:]
# Keep receiving data until the frame size is reached.
while len(data) < frame_size:
data += self.connection.recv(131072)
# Cut the frame to the beginning of the next frame.
frame_data = data[:frame_size]
data = data[frame_size:]
# using the old pickling method.
# frame = pickle.loads(frame_data)
# Converting the image to be sent.
img = base64.b64decode(frame_data)
npimg = np.fromstring(img, dtype=np.uint8)
frame = cv2.imdecode(npimg, 1)
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
end_time = datetime.datetime.now()
fps = 1/(end_time-start_time).total_seconds()
print("Fps: ",round(fps,2))
self.detect_motion(frame,fps)
self.current_frame = frame
except (socket.error,socket.timeout) as e:
# The timeout got reached or the client disconnected. Clean up the mess.
print("Cleaning up: ",e)
try:
self.connection.close()
except socket.error:
pass
self.is_connected = False
break
I also increased the packet size which increased the fps when sending from my local machine to my local machine while testing, but this didn't change anything whatsoever when using the raspberry pi.
You can see the full code on my github: https://github.com/Ruud14/SecurityCamera
I've written a code which is supposed to receive some images and make them black & white. I'm measuring the response time for each task (response time = the time each image is received and is turned to black & white). Here is the code:
from __future__ import print_function
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
from select import select
import socket
from struct import pack
from struct import unpack
#from collections import deque
import commands
from PIL import Image
import time
host = commands.getoutput("hostname -I")
port = 5005
backlog = 5
BUFSIZE = 4096
queueList = []
start = []
end = []
temp = []
def processP(q):
i = 0
while q:
name = q.pop(0)
col = Image.open(name)
gray = col.convert('L')
bw = gray.point(lambda x: 0 if x<128 else 255, '1')
bw.save("%d+30.jpg" % (i+1))
end.append(time.time())
#print(temp)
i = i + 1
class Receiver:
''' Buffer binary data from socket conn '''
def __init__(self, conn):
self.conn = conn
self.buff = bytearray()
def get(self, size):
''' Get size bytes from the buffer, reading
from conn when necessary
'''
while len(self.buff) < size:
data = self.conn.recv(BUFSIZE)
if not data:
break
self.buff.extend(data)
# Extract the desired bytes
result = self.buff[:size]
# and remove them from the buffer
del self.buff[:size]
return bytes(result)
def save(self, fname):
''' Save the remaining bytes to file fname '''
with open(fname, 'wb') as f:
if self.buff:
f.write(bytes(self.buff))
while True:
data = self.conn.recv(BUFSIZE)
if not data:
break
f.write(data)
def read_tcp(s):
conn, addr = s.accept()
print('Connected with', *addr)
# Create a buffer for this connection
receiver = Receiver(conn)
# Get the length of the file name
name_size = unpack('B', receiver.get(1))[0]
name = receiver.get(name_size).decode()
# Save the file
receiver.save(name)
conn.close()
print('saved\n')
queueList.append(name)
print('name', name)
start.append(time.time())
if (name == "sample.jpg"):
print('------------ok-------------')
processP(queueList)
print("Start: ", start)
print('--------------------------')
print("End: ", end)
while start:
temp.append(end.pop(0) - start.pop(0))
print('****************************')
print("Start: ", start)
print('--------------------------')
print("End: ", end)
print("Temp: ", temp)
i = 0
while i < len(temp)-1:
if (temp[i]<temp[i+1]):
print('yes')
else:
print('No')
i = i + 1
def read_udp(s):
data,addr = s.recvfrom(1024)
print("received message:", data)
def run():
# create tcp socket
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
tcp.bind((host,port))
except socket.error as err:
print('Bind failed', err)
return
tcp.listen(1)
# create udp socket
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
udp.bind((host,port))
print('***Socket now listening at***:', host, port)
input = [tcp,udp]
try:
while True:
inputready,outputready,exceptready = select(input,[],[])
for s in inputready:
if s == tcp:
read_tcp(s)
elif s == udp:
read_udp(s)
else:
print("unknown socket:", s)
# Hit Break / Ctrl-C to exit
except KeyboardInterrupt:
print('\nClosing')
raise
tcp.close()
udp.close()
if __name__ == '__main__':
run()
Now for some evaluation purposes, I send a single image many times. When I look at the response times I see that sometimes the response time of the 8th image, for example, is more than the response time of the 9th one.
So my question is that since the size and the time needed for processing each of images are the same (I'm sending a single image several times), Why is the response time for each image variable? Shouldn't the response time of the next image be longer (or at least equal) that the previous one (For example, the response time for 4th image > the response time for 3rd image)?
Your list contains the actual elapsed time it took for each image processing call. This value will be influenced by many things, including the amount of load on the system at that time.
When your program is running, it does not have exclusive access to all of the resources (cpu, ram, disk) of the system it's running on. There could be dozens, hundreds or thousands of other processes being managed by the OS vying for resources. Given this, it is highly unlikely that you would ever see even the same image processed in the exact same amount of time between two runs, when you are measuring with sub-second accuracy. The amount of time it takes can (and will) go up and down with each successive call.
I have no idea how to solve this problem. Please help me :)
I would like to send sound data, recorded by one PC, to the other PC and play it. (by UDP)
The program might work correctly, but the sound contain(?) uncomfortable noise.
when I tried to record & play sound in one program sequence, it worked correctly. There was no noise.
In case of using UDP even in one PC, use IP 127.0.0.1, the noise appeared.
At first, I thought the factor is because played sound is out in the other PC and I fixed it by making buffer.
It solved little noise, but almost all the noise is still remaining.
the following code is it
Client
import pyaudio
import socket
from threading import Thread
frames = []
def udpStream():
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True:
if len(frames) > 0:
udp.sendto(frames.pop(0), ("127.0.0.1", 12345))
udp.close()
def record(stream, CHUNK):
while True:
frames.append(stream.read(CHUNK))
if __name__ == "__main__":
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
p = pyaudio.PyAudio()
stream = p.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
frames_per_buffer = CHUNK,
)
Tr = Thread(target = record, args = (stream, CHUNK,))
Ts = Thread(target = udpStream)
Tr.setDaemon(True)
Ts.setDaemon(True)
Tr.start()
Ts.start()
Tr.join()
Ts.join()
Server
import pyaudio
import socket
from threading import Thread
frames = []
def udpStream(CHUNK):
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp.bind(("127.0.0.1", 12345))
while True:
soundData, addr = udp.recvfrom(CHUNK)
frames.append(soundData)
udp.close()
def play(stream, CHUNK):
BUFFER = 10
while True:
if len(frames) == BUFFER:
while True:
stream.write(frames.pop(0), CHUNK)
if __name__ == "__main__":
FORMAT = pyaudio.paInt16
CHUNK = 1024
CHANNELS = 2
RATE = 44100
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels = CHANNELS,
rate = RATE,
output = True,
frames_per_buffer = CHUNK,
)
Ts = Thread(target = udpStream, args=(CHUNK,))
Tp = Thread(target = play, args=(stream, CHUNK,))
Ts.setDaemon(True)
Tp.setDaemon(True)
Ts.start()
Tp.start()
Ts.join()
Tp.join()
sorry for long source code. Feel free to play this program.
I have searched for the reason of this noise. Finally I could detect why this happened.
Actually, This program UDP transfer did not cause packet loss.
Even if it did, the sound do not have such a serious noise.
This program sent data correctly, and there are almost no packet loss, but the "receive" method could not receive data correctly.
In server program
def udpStream(CHUNK):
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp.bind(("127.0.0.1", 12345))
while True:
soundData, addr = udp.recvfrom(CHUNK)
frames.append(soundData)
udp.close()
This program could data only "25%". (I checked the amount of data)
So, I tried to receive the data multiply (CHANNELS * 2)
soundData, addr = udp.recvfrom(CHUNK * CHANNELS * 2)
This results in the sound data can be received 100% completely.
Finally, the sound recorded by one PC is played in the other PC without noise.
I've run into the same problem, but your solution didn't help me. What I discovered was that using
stream.write(frames.pop(0))
instead of
stream.write(frames.pop(0), CHUNK)
Clears all the noise in the received signal.