i have a script that recognize plates from camera, and now i need the same script to recognize from other camera so in short it needs to recognize from two cameras at once ,i am using Tensoflow/keras and YOLO object detection , can someone suggest sollution to this , i tried with different threads but i could not start the second thread , i will post what i have tried
import sys, os
import threading
import keras
import cv2
import traceback
import numpy as np
import time
import sqlite3
import pyodbc
import time
from imutils.video import VideoStream
from pattern import apply_pattern
import darknet.python.darknet as dn
from os.path import splitext, basename
from glob import glob
from darknet.python.darknet import detect
from src.label import dknet_label_conversion
from src.utils import nms
from src.keras_utils import load_model
from glob import glob
from os.path import splitext, basename
from src.utils import im2single
from src.keras_utils import load_model, detect_lp
from src.label import Shape, writeShapes
import imutils
cam_vlez ="rtsp://"
cam_izlez = "rtsp://a"
def adjust_pts(pts,lroi):
return pts*lroi.wh().reshape((2,1)) + lroi.tl().reshape((2,1))
def start_vlez(cam):
while True:
cap = VideoStream(cam).start()
start_time = time.time()
sky = cap.read()
frame = sky[100:700, 300:1800]
w = frame.shape[0]
h = frame.shape[1]
ratio = float(max(frame.shape[:2])) / min(frame.shape[:2])
side = int(ratio * 288.)
bound_dim = min(side + (side % (2 ** 4)), 608)
Llp,LlpImgs,_ = detect_lp(wpod_net,im2single(frame),bound_dim,2**4,(240,80),lp_threshold)
cv2.imshow('detected_plate', frame)
if len(LlpImgs):
Ilp = LlpImgs[0]
s = Shape(Llp[0].pts)
for shape in [s]:
ptsarray = shape.pts.flatten()
try:
frame = cv2.rectangle(frame,(int(ptsarray[0]*h), int(ptsarray[5]*w)),(int(ptsarray[1]*h),int(ptsarray[6]*w)),(0,255,0),3)
cv2.imshow('detected_plate', frame)
except:
traceback.print_exc()
sys.exit(1)
Ilp = cv2.cvtColor(Ilp, cv2.COLOR_BGR2GRAY)
Ilp = cv2.cvtColor(Ilp, cv2.COLOR_GRAY2BGR)
cv2.imwrite('%s/_lp.png' % (output_dir),Ilp*255.)
cv2.imshow('lp_bic', Ilp)
R,(width,height) = detect(ocr_net, ocr_meta, 'lp_images/_lp.png' ,thresh=ocr_threshold, nms=None)
if len(R):
L = dknet_label_conversion(R,width,height)
L = nms(L,.45)
L.sort(key=lambda x: x.tl()[0])
lp_str = ''.join([chr(l.cl()) for l in L])
result =apply_pattern(lp_str)
write_to_database(result)
print("License Plate Detected: ", lp_str)
print("Written in database: ", result)
print("--- %s seconds ---" % (time.time() - start_time))
#updateSqliteTable(lp_str)
def start_izlez(cam):
while True:
cap = VideoStream(cam).start()
start_time = time.time()
sky = cap.read()
frame = sky[100:700, 300:1800]
w = frame.shape[0]
h = frame.shape[1]
ratio = float(max(frame.shape[:2])) / min(frame.shape[:2])
side = int(ratio * 288.)
bound_dim = min(side + (side % (2 ** 4)), 608)
Llp,LlpImgs,_ = detect_lp(wpod_net,im2single(frame),bound_dim,2**4,(240,80),lp_threshold)
cv2.imshow('detected_plate1', frame)
if len(LlpImgs):
Ilp = LlpImgs[0]
s = Shape(Llp[0].pts)
for shape in [s]:
ptsarray = shape.pts.flatten()
try:
frame = cv2.rectangle(frame,(int(ptsarray[0]*h), int(ptsarray[5]*w)),(int(ptsarray[1]*h),int(ptsarray[6]*w)),(0,255,0),3)
cv2.imshow('detected_plate1', frame)
except:
traceback.print_exc()
sys.exit(1)
Ilp = cv2.cvtColor(Ilp, cv2.COLOR_BGR2GRAY)
Ilp = cv2.cvtColor(Ilp, cv2.COLOR_GRAY2BGR)
cv2.imwrite('%s/_lp.png' % (output_dir),Ilp*255.)
cv2.imshow('lp_bic', Ilp)
R,(width,height) = detect(ocr_net, ocr_meta, 'lp_images/_lp.png' ,thresh=ocr_threshold, nms=None)
if len(R):
L = dknet_label_conversion(R,width,height)
L = nms(L,.45)
L.sort(key=lambda x: x.tl()[0])
lp_str = ''.join([chr(l.cl()) for l in L])
result =apply_pattern(lp_str)
write_to_database(result)
print("License Plate Detected: ", lp_str)
print("Written in database: ", result)
print("--- %s seconds ---" % (time.time() - start_time))
#updateSqliteTable(lp_str)
if __name__ == '__main__':
try:
output_dir = 'lp_images/'
lp_threshold = .5
wpod_net_path = "./my-trained-model/my-trained-model1_final.json"
wpod_net = load_model(wpod_net_path)
ocr_threshold = .6
ocr_weights = b'data/ocr/ocr-net.weights'
ocr_netcfg = b'data/ocr/ocr-net.cfg'
ocr_dataset = b'data/ocr/ocr-net.data'
ocr_net = dn.load_net(ocr_netcfg, ocr_weights, 0)
ocr_meta = dn.load_meta(ocr_dataset)
t = threading.Thread(target=start_vlez(cam_izlez))
t1 = threading.Thread(target=start_izlez(cam_vlez))
t.start()
t1.start()
except:
print ("Error: unable to start thread")
target= in Thread needs function's name without () and arguments - and it will later use () to start it.
Your current code doesn't run functions in threads but it works like
result = start_vlez(cam_izlez)
result1 = start_izlez(cam_vlez)
t = threading.Thread(target=result)
t1 = threading.Thread(target=result1)
t.start()
t2.start()
so it runs first function in main thread and wait for it ends. And next it runs second function also in main thread and wait for it ends. And after that it tries to use Thread
If you have arguments then you need use function's name without () in target=and use tuple with arguments in args=
t = threading.Thread(target=start_vlez, args=(cam_izlez,))
t1 = threading.Thread(target=start_izlez, args=(cam_vlez,))
args= needs tuple even for single argument so I use , in (cam_izlez,) and (cam_vlez,)
Related
I am trying to run this code to make images compatible with a polaroid z2300 camera:
import PIL
import PIL.Image
import pyexiv2
import os
import shutil
import re
import random
ok = re.compile(r'^PICT\d\d\d\d$')
def rename(f):
while True:
nf = "PICT%04d.JPG" % random.randint(1111,9999)
if not os.path.isfile(nf):
print (f,"->",nf)
return nf
# Fix image paths
for f in os.listdir('.'):
if f.upper().endswith('JPG'):
a, b = os.path.splitext(f)
if not ok.match(a):
shutil.move(f, rename(f))
for f in os.listdir('.'):
if f.upper().endswith('JPG'):
print ("Checking", f)
# Find the image size
im = PIL.Image.open(f)
im_size = im.size
if im_size != (3648,2736) and im_size != (2736,3648):
print ("Resizing image to POLAROID dimensions")
im = im.resize((im_size[0] * 4, im_size[1] * 4))
if im_size[0] > im_size[1]:
into = PIL.Image.new(im.mode, (3648,2736))
im.thumbnail((3648,2736), PIL.Image.ANTIALIAS)
else:
into = PIL.Image.new(im.mode, (2736,3648))
im.thumbnail((2736,3648), PIL.Image.ANTIALIAS)
into.paste(im, (int((into.size[0] - im.size[0]) / 2), int((into.size[1] - im.size[1]) / 2)))
into.save(f)
im = into
im_size = im.size
del im
source_image = pyexiv2.ImageMetadata[f]
source_image.read()
try:
exif_width = int(source_image["Exif.Photo.PixelXDimension"].raw_value)
exif_height = int(source_image["Exif.Photo.PixelYDimension"].raw_value)
except KeyError:
exif_width = None
exif_height = None
# for k,v in source_image.items():
# print k,v
if exif_width != im_size[0] or exif_height != im_size[1]:
print ("EXIF data was bad for %s" % f)
print (" width - ", exif_width, im_size[0])
print (" height - ", exif_height, im_size[1])
source_image["Exif.Photo.PixelXDimension"] = im_size[0]
source_image["Exif.Photo.PixelYDimension"] = im_size[1]
source_image.write()
At the beginning everything looks fine the code changes the image name and size but then the AttributeError appears and the conversion fails to finish:
source_image = pyexiv2.ImageMetadata(f)
AttributeError: module 'pyexiv2' has no attribute 'ImageMetadata'
Any idea how to fix this do I have to change something in the code or do I have to install something else to make it work?
I've seen ways of using the time.sleep() function, however that stops the rest of the code from running.
I'm making a hand recognition script and want the video output to be hindered by a certain value being printed every second.
This is my code:
import cv2
from cvzone.HandTrackingModule import HandDetector
import time
cap = cv2.VideoCapture(0)
detector = HandDetector(maxHands=1, detectionCon=0.7)
length = 0
while True:
success, img = cap.read()
hands, img = detector.findHands(img)
if hands:
hand1 = hands[0]
lmlist1 = hand1["lmList"]
bbox = hand1["bbox"]
cp1 = hand1["center"]
HandType = hand1["type"]
#fingers1 = detector.fingersUp(hand1)
#print(fingers1)
length, info, img = detector.findDistance(lmlist1[8], lmlist1[5], img)
print(length)
time.sleep(1)
cv2.imshow("Image", img)
cv2.waitKey(1)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
The problem is that because of:
print(length)
time.sleep(1)
the frame rate of the video is reduced to 1fps.
Is there a way to run the printing command so that it doesn't affect the frame rate?
Thanks.
EDIT
This is the new code and fps is still low on my computer.
import cv2
from cvzone.HandTrackingModule import HandDetector
import time
import threading
import math
#Resolution: 720 x 1280
cap = cv2.VideoCapture(0)
detector = HandDetector(maxHands=1, detectionCon=0.7)
length = 0
fingerpos = [1,1,1,1,1]
def index(img):
lil, info, img = detector.findDistance(lmlist1[8], lmlist1[5], img)
lib, info, img = detector.findDistance(lmlist1[6], lmlist1[5], img)
lit, info, img = detector.findDistance(lmlist1[8], lmlist1[6], img)
index_angle = (lib**2 + lit**2 - lil**2) / (2 * lib * lit)
index_angle = math.degrees(math.acos(index_angle))
return int(index_angle)
def middle(img):
lml, info, img = detector.findDistance(lmlist1[12], lmlist1[9], img)
lmb, info, img = detector.findDistance(lmlist1[12], lmlist1[10], img)
lmt, info, img = detector.findDistance(lmlist1[10], lmlist1[9], img)
middle_angle = (lmb**2 + lmt**2 - lml**2) / (2 * lmb * lmt)
middle_angle = math.degrees(math.acos(middle_angle))
return int(middle_angle)
def ring(img):
lrl, info, img = detector.findDistance(lmlist1[16], lmlist1[13], img)
lrb, info, img = detector.findDistance(lmlist1[16], lmlist1[14], img)
lrt, info, img = detector.findDistance(lmlist1[14], lmlist1[13], img)
ring_angle = (lrb**2 + lrt**2 - lrl**2) / (2 * lrb * lrt)
ring_angle = math.degrees(math.acos(ring_angle))
return int(ring_angle)
def pinky(img):
lpl, info, img = detector.findDistance(lmlist1[20], lmlist1[17], img)
lpb, info, img = detector.findDistance(lmlist1[20], lmlist1[18], img)
lpt, info, img = detector.findDistance(lmlist1[18], lmlist1[17], img)
pinky_angle = (lpb**2 + lpt**2 - lpl**2) / (2 * lpb * lpt)
pinky_angle = math.degrees(math.acos(pinky_angle))
return int(pinky_angle)
def thumb(img):
ltl, info, img = detector.findDistance(lmlist1[4], lmlist1[2], img)
ltb, info, img = detector.findDistance(lmlist1[4], lmlist1[3], img)
ltt, info, img = detector.findDistance(lmlist1[3], lmlist1[2], img)
thumb_angle = (ltb**2 + ltt**2 - ltl**2) / (2 * ltb * ltt)
thumb_angle = math.degrees(math.acos(thumb_angle))
return int(thumb_angle)
def data(img):
print(str(thumb(img)) + ", " + str(index(img)) + ", " + str(middle(img)) + ", " + str(ring(img)) + ", " + str(pinky(img)))
time.sleep(0.5)
threading.Thread(target=data).start()
while True:
success, img = cap.read()
#img = cv2.resize(img, (640, 420))
hands, img = detector.findHands(img)
#print('Resolution: ' + str(img.shape[0]) + ' x ' + str(img.shape[1]))
if hands:
hand1 = hands[0]
lmlist1 = hand1["lmList"]
bbox = hand1["bbox"]
cp1 = hand1["center"]
HandType = hand1["type"]
data(img)
#print(str(thumb(img)) + ", " + str(index(img)) + ", " + str(middle(img)) + ", " + str(ring(img)) + ", " + str(pinky(img)))
cv2.imshow("Image", img)
cv2.waitKey(1)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
One way is to use time.time to measure how much time has passed (will print 'hi' every 5 seconds or so, this is less precise because if some part of the loop takes more time, it may print later than expected):
import time
start = time.time()
while True:
# run some code
current_time = time.time()
if current_time - start >= 5:
print('hi')
start = current_time
Or use threading module to run a loop concurrently (will print 'hi' every 5 seconds, this is also more precise, because the time measurement is not affected by the speed of the "main" loop (as is the case with the above code)):
import time
import threading
def loop():
while True:
time.sleep(5)
print('hi')
threading.Thread(target=loop).start()
while True:
# run some code
pass # this can be removed after you add the actual code
I had executed object detection using Deep Neural Network which is SSD with backbone of MobilenetV3 on raspberry pi 4B, but it turns out this error "Attributeerror module 'torchvision.models.detection' has no attribute 'ssdlite320_mobilenet_v3_large'".
The following is the specifications of my experiment:
model = SSD with mobilenetV3
library = pyTorch
torch = 1.7.1
torchvision = 0.8.2
device = raspberry pi 4B (ArmV8)
Here is my script:
# import the necessary packages
from torchvision.models import detection, mobilenet
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import pickle
import torch
import time
import cv2
import paramiko
import psutil
import GPUtil
import datetime
import mysql.connector
import ftplib
import pysftp
mydb = mysql.connector.connect(
host="aaa",
user="bbb",
password="ccc!",
database="ddd"
)
mycursor = mydb.cursor()
labels = 'pytorch/labels/coco_classes.pickle'
# class_name = 'pytorch/labels/coco.names'
# model = 'frcnn-mobilenet'
# model = 'frcnn-resnet'
# model = 'mrcnn-resnet'
# model = 'retinanet'
model = 'ssd-mobilenet'
# model = 'ssd-vgg16'
confidence_param = 0.5
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
CLASSES = pickle.loads(open(labels, "rb").read())
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
MODELS = {
"frcnn-resnet": detection.fasterrcnn_resnet50_fpn,
"frcnn-mobilenet": detection.fasterrcnn_mobilenet_v3_large_320_fpn,
"retinanet": detection.retinanet_resnet50_fpn,
"ssd-mobilenet": detection.ssdlite320_mobilenet_v3_large,
"ssd-vgg16": detection.ssd300_vgg16,
"mrcnn-resnet": detection.maskrcnn_resnet50_fpn
}
# load the model and set it to evaluation mode
print("[INFO] loading model...")
model = MODELS[model](pretrained=True, progress=True, num_classes=len(CLASSES), pretrained_backbone=True).to(DEVICE)
modelsum = model.eval()
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
fps = FPS().start()
# auto stop when timeout
time.sleep(2.0)
timeout = time.time() + 10
pTime = 0
cTime = 0
current_time = datetime.datetime.now()
# loop over the frames from the video stream
while True:
cTime = time.time()
frame = vs.read()
frame = imutils.resize(frame, width=600)
orig = frame.copy()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = frame.transpose((2, 0, 1))
frame = np.expand_dims(frame, axis=0)
frame = frame / 255.0
frame = torch.FloatTensor(frame)
frame = frame.to(DEVICE)
detections = model(frame)[0]
# gpu configuration
GPUs = GPUtil.getGPUs()
gpu = GPUs[0]
gpu1 = gpu.load * 100
cpu2 = psutil.cpu_percent() + 10
ram1 = psutil.virtual_memory()[2]
cpu3 = '{:.2f}'.format(cpu2)
gpu2 = '{:.2f}'.format(gpu1)
print ('')
print ('---------------------------------')
print ('cpu usage: ', cpu3, '%')
print ('gpu usage: ', gpu2, '%')
ram1 = psutil.virtual_memory()[2]
print('RAM: ', ram1, '%')
# loop over the detections
for i in range(0, len(detections["boxes"])):
confidence = detections["scores"][i]
idx = int(detections["labels"][i])
if idx == 1 and confidence > confidence_param:
print ('---object has been detected!---')
box = detections["boxes"][i].detach().cpu().numpy()
(startX, startY, endX, endY) = box.astype("int")
label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
cv2.rectangle(orig, (startX, startY), (endX, endY), COLORS[idx], 2)
accuracy = label
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(orig, label, (startX, y),cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
s1 = cTime-pTime
s2 = '{:.2f}'.format(s1)
fps1 = 1/(cTime-pTime)
fps2 = '{:.2f}'.format(fps1)
pTime = cTime
print ("bounding box: ", startX, startY, endX, endY)
startXa = int(startX)
startYa = int(startY)
endXa = int(endX)
endYa = int(endY)
try:
xid = mycursor.lastrowid
print('Record ID: ', xid)
xid += 1
except:
pass
print ('Throughput rate: ', fps2,"fps")
print ('---------------------------------')
print ('')
break
else:
print ('---NO object has been detected---')
s1 = cTime-pTime
s2 = '{:.2f}'.format(s1)
fps1 = 1/(cTime-pTime)
fps2 = '{:.2f}'.format(fps1)
pTime = cTime
try:
xid = mycursor.lastrowid
print('Record ID: ', xid)
xid += 1
except:
pass
print ('Throughput rate: ', fps2,"fps")
print ('---------------------------------')
print ('')
break
cv2.imshow("Frame", orig)
key = cv2.waitKey(1) & 0xFF
if time.time() > timeout:
break
if key == ord("q"):
break
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
cv2.destroyAllWindows()
vs.stop()
SSDLite was only added to torchvision in version 0.10.0 (release message).
That means you need to update torch and torchvision on your device by running
pip3 install --upgrade torch
pip3 install --upgrade torchvision
import time
import threading
import cv2
import imagezmq
import cv2
from flask import Flask, Response
import emotion_detection_copy, mouth_open_copy
import objectdetection
import final_recogcopy
import predict
import person_and_phone_copy, head_pose_copy
import eye_tracker_copy
import predict
import datetime
image_hub = imagezmq.ImageHub()
while True:
cam_id, frame = image_hub.recv_image()
print("before", datetime.datetime.now())
# predict.predict_labels(frame) # working fine
t1 =
threading.Thread(target=final_recogcopy.recog(frame),args())
t2 =
threading.Thread(target=objectdetection.object_detection(frame), args=()) # working fine
t3 = threading.Thread(target=emotion_detection_copy.detect_emotion(frame), args=()) # working fine
t4 = threading.Thread(target=person_and_phone_copy.person_and_phone_count(frame), args=()) # working fine
t5 = threading.Thread(target=head_pose_copy.head_position(frame), args=()) # working fine cv2.waitKey(1)
t6 = threading.Thread(target=eye_tracker_copy.gaze_detection(frame))
final_recogcopy.recog(frame)
t7 = threading.Thread(target=mouth_open_copy.mouth_opening_detection(frame))
x = datetime.datetime.now()
#print(start)
t2.start()
start1 = time.time()
t3.start()
start2 = time.time()
t4.start()
start3 = time.time()
t5.start()
start4 = time.time()
t6.start()
start5 = time.time()
t1.start()
start = time.time()
# t7.start()
t1.join()
y = datetime.datetime.now()
print("diff is", y - x)
end = time.time()
print(end)
print("Total time taken T1", start - end)
t2.join()
end1 = time.time()
print("Total time taken T2", start1 - end1)
t3.join()
end2 = time.time()
print("Total time taken T3", start2 - end2)
t4.join()
end3 = time.time()
print("Total time taken T4", start3 - end3)
t5.join()
end4 = time.time()
print("Total time taken T5", start4 - end4)
t6.join()
end5 = time.time()
print("Total time taken T6", start5 - end5)
# t7.join()
print("after", datetime.datetime.now())
cv2.imshow(cam_id, frame)
cv2.waitKey(1)
image_hub.send_reply(b'OK')
I am using imagezmq for streaming of video in python from my webcam using opencv. when i am streaming normal video to the receiver side its normal at receiver side but when processing like object detection etc at receiver side the video will not like near to realtime its very messy and slow . plzzzz help.
Thanks
SYSTEM
Linux (Manjaro KDE)
Python 3.8.3
PROGRAM:
I have incoming string data on a UDP port. The main loop spools up the processes prior to using selectors to monitor the UDP port. I want the UDP data, which is constantly updated, available for each process.
TRIED:
Multiprocessing Queues with maxsize = 1 and that became a headache and quickly broke down.
Multiprocessing Arrays (this is where I'm at now)
I have checked, and the Array at each location I'm looking at has the same memory address (I think). For whatever reason, when I try to access the contents of the Array in the child process, the process hangs.
NOT TRIED
Pipes. I have a feeling this may be the way to go. But I'm already deep in uncharted territory; I've never used them before.
WHAT I WANT
I would like to access the UDP data from the child processes - these are the camera_view method.
Dummy UDP string
import socket
import random
import datetime
import time
conn = ('127.0.0.1', 6666)
def rand_value(f_val, t_val):
result = round(random.uniform(f_val, t_val), 2)
result = random.uniform(f_val, t_val)
return result
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True:
time.sleep(6)
timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
overlay = timestamp
for i in range(9):
val = rand_value(i*10, i*10+10)
if i == 8: val = 'TASK: Im the real Batman'
overlay = overlay + "," + str(val)
print(overlay)
sock.sendto(overlay.encode(), conn)
My Program
import datetime
import selectors
import socket
import time
from multiprocessing import Lock, Process, Queue
from multiprocessing.sharedctypes import Array
from ctypes import c_char_p
REQUIRED_CAMERAS = 1
CAMERA_CONN = {'name':['Colour Camera'], 'ip':['127.0.0.1'], 'port':[9000]}
OVERLAY_CONN = ('0.0.0.0', 6666)
CONTROL_CONN = ('0.0.0.0', 6667)
NUMBER_OF_ITEMS_IN_OVERLAY = 10
class Camera():
def __init__(self, cam_name, cam_ip, cam_port):
self.ip = cam_ip
self.port = cam_port
self.capture = cv2.VideoCapture(0)
self.frame_width = int(self.capture.get(3))
self.frame_height = int(self.capture.get(4))
self.name = cam_name
def get_overlay(data_packet):
data = data_packet.decode()
data = data.split(',')
field0 = data[0]
field1 = 'KP: ' + str(round(float(data[1]), 3))
field2 = 'DCC: ' + str(round(float(data[2]), 2)) + 'm'
field3 = 'E: ' + str(round(float(data[3]), 2)) + 'm'
field4 = 'N: ' + str(round(float(data[4]), 2)) + 'm'
field5 = 'D: ' + str(round(float(data[5]), 2)) + 'm'
field6 = 'H: ' + str(round(float(data[6]), 2)) # + '°'
field7 = 'R: ' + str(round(float(data[7]), 2)) # + '°'
field8 = 'P: ' + str(round(float(data[8]), 2)) # + '°'
field9 = data[9]
x = []
for i in range(NUMBER_OF_ITEMS_IN_OVERLAY):
x.append(eval('field' + str(i)).encode())
# if i == 0:
# print(x[i])
return x
def socket_reader(sock, mask, q, REQUIRED_CAMERAS, overlay):
data_packet, sensor_ip = sock.recvfrom(1024)
sensor_port = sock.getsockname()[1]
print(f'SENSOR PORT {sensor_port} and SENSOR_IP {sensor_ip}')
if sensor_port == OVERLAY_CONN[1]:
x = get_overlay(data_packet)
for i in range(len(x)):
overlay[i] = x[i]
print(f'Socket Reader {overlay}')
def camera_view(CAMERA_CONN, cam_name, camera, overlay_q, control_q, overlay):
while True:
print(f'PROCESS {camera} RUNNING FOR: {cam_name}')
try:
print(f'Camera View {overlay}')
for i in range(len(overlay)):
print(overlay[i])
except:
pass
time.sleep(1)
def controller(REQUIRED_CAMERAS, CAMERA_CONN, OVERLAY_CONN, CONTROL_CONN):
if REQUIRED_CAMERAS > len(CAMERA_CONN['name']):
print(f'REQURIED_CAMERAS: {REQUIRED_CAMERAS} - more than connections in CAMERA_CONN ')
else:
# Set up a UDP connection for the overlay string and the control commands
sock_overlay = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock_control = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock_overlay.bind(OVERLAY_CONN)
sock_control.bind(CONTROL_CONN)
# Set up the selector to watch over the socket
# and trigger when data is ready for reading
sel = selectors.DefaultSelector()
sel.register(fileobj=sock_overlay, events=selectors.EVENT_READ, data=socket_reader)
sel.register(fileobj=sock_control, events=selectors.EVENT_READ, data=socket_reader)
# create shared memory
overlay_q = Queue(maxsize=1)
control_q = Queue(maxsize=1)
overlay = Array(c_char_p, range(NUMBER_OF_ITEMS_IN_OVERLAY))
print(f'Init Overlay {overlay}')
# Generate the processes; one per camera
processes = []
for camera in range(REQUIRED_CAMERAS):
processes.append(Process(target=camera_view, args=(CAMERA_CONN, CAMERA_CONN['name'][camera], camera, overlay_q, control_q, overlay)))
for process in processes:
process.daemon = True
process.start()
# Spin over the selector
while True:
# Only have one connnection registered, so to stop
# the loop spinning up the CPU, I have made it blocking
# with the timeout = 1 (sec) instead of =0.
events = sel.select(timeout=None)
for key, mask in events:
# the selector callback is the data= from the register above
callback = key.data
# the callback gets the sock, mask and the sensor queues
if key.fileobj == sock_overlay:
callback(key.fileobj, mask, overlay_q, REQUIRED_CAMERAS, overlay)
else:
callback(key.fileobj, mask, control_q, REQUIRED_CAMERAS, overlay)
if __name__ == "__main__":
controller(REQUIRED_CAMERAS, CAMERA_CONN, OVERLAY_CONN, CONTROL_CONN)
EDIT1:
from multiprocessing import Process, Array
from ctypes import c_char_p
import time
def worker(arr):
count = 0
while True:
count += 1
val = 'val' + str(count)
arr[0] = val
print(arr[:])
time.sleep(2)
def main():
arr = Array(c_char_p, 1)
p = Process(target=worker, args=(arr,))
p.daemon = True
p.start()
while True:
print(arr[:])
try:
print(arr[:].decode('utf-8'))
except :
pass
# try:
# val = arr[:]
# val = val.decode('utf-8')
# print(f'main {val}')
# except:
# pass
time.sleep(1)
if __name__ == "__main__":
main()
'''
from multiprocessing import Process, Array
from ctypes import c_char_p
import time
def worker(arr):
count = 0
while True:
count += 1
val = 'val' + str(count)
arr[0] = bytes(val, 'utf-8')
print(arr[:])
time.sleep(2)
def main():
arr = Array(c_char_p, 1)
p = Process(target=worker, args=(arr,))
p.daemon = True
p.start()
while True:
print(arr[:])
try:
print(arr[:].decode('utf-8'))
except :
pass
time.sleep(1)
if __name__ == "__main__":
main()
if __name__ == "__main__":
main()
'''
EDIT2:
Thanks to #RolandSmith, I have persevered with Queues and I think I have got a template on how I can move forward. See below code. If I can't get this to work in program, I'll be back here.
from multiprocessing import Process, Queue
import time
import datetime
def worker(camera, q):
val = ''
while True:
if q.full() == True:
val = q.get()
else:
val = val
print(f'WORKER{camera} {val}')
time.sleep(0.2)
def main():
cameras = 2
processes = []
queues = []
for camera in range(cameras):
queues.append(Queue(maxsize=1))
processes.append(Process(target=worker, args=(camera, queues[camera])))
for process in processes:
process.daemon = True
process.start()
while True:
for q in queues:
if not q.empty():
try:
_ = q.get()
except:
pass
else:
q.put(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
time.sleep(.5)
if __name__ == "__main__":
main()
In my view, using Queue is a less error-prone solution than using an Array.
Here is your second example, converted to using a Queue:
from multiprocessing import Process, Queue
import time
def worker(q):
count = 0
while True:
count += 1
val = 'val' + str(count)
q.put(val)
print('worker:', val)
time.sleep(2)
def main():
q = Queue()
p = Process(target=worker, args=(q, ))
p.daemon = True
p.start()
while True:
if not q.empty():
print('main:', q.get())
time.sleep(1)
if __name__ == "__main__":
main()
This yields:
> python3 test3.py
worker: val1
main: val1
worker: val2
main: val2
worker: val3
main: val3
worker: val4
main: val4
worker: val5
Here is the same example using a Pipe:
from multiprocessing import Process, Pipe
import time
def worker(p):
count = 0
while True:
count += 1
val = 'val' + str(count)
p.send(val)
print('worker:', val)
time.sleep(2)
def main():
child, parent = Pipe()
p = Process(target=worker, args=(child, ))
p.daemon = True
p.start()
while True:
if parent.poll():
print('main:', parent.recv())
time.sleep(1)
if __name__ == "__main__":
main()
This produces the same result as the previous example.
Additionally, by default a pipe is bidirectional.
So you could also send back data from the workers to the parent.