Get result from multiprocessing process - python

I want to know if is there a way to make multiprocessing working in this code. What should I change or if there exist other function in multiprocessing that will allow me to do that operation.
You can call the locateOnScreen('calc7key.png') function to get the screen coordinates. The return value is a 4-integer tuple: (left, top, width, height).
I got error:
checkNumber1 = resourceBlankLightTemp[1]
TypeError: 'Process' object does not support indexing
import pyautogui, time, os, logging, sys, random, copy
import multiprocessing as mp
BLANK_DARK = os.path.join('images', 'blankDark.png')
BLANK_LIGHT = os.path.join('images', 'blankLight.png')
def blankFirstDarkResourcesIconPosition():
blankDarkIcon = pyautogui.locateOnScreen(BLANK_DARK)
return blankDarkIcon
def blankFirstLightResourcesIconPosition():
blankLightIcon = pyautogui.locateOnScreen(BLANK_LIGHT)
return blankLightIcon
def getRegionOfResourceImage():
global resourceIconRegion
resourceBlankLightTemp = mp.Process(target = blankFirstLightResourcesIconPosition)
resourceBlankDarkTemp = mp.Process(target = blankFirstDarkResourcesIconPosition)
resourceBlankLightTemp.start()
resourceBlankDarkTemp.start()
if(resourceBlankLightTemp == None):
checkNumber1 = 2000
else:
checkNumber1 = resourceBlankLightTemp[1]
if(resourceBlankDarkTemp == None):
checkNumber2 = 2000
else:
checkNumber2 = resourceBlankDarkTemp[1]

In general, if you just want to use multiprocessing to run existing CPU-intensive functions in parallel, it is easiest to do through a Pool, as shown in the example at the beginning of the documentation:
# ...
def getRegionOfResourceImage():
global resourceIconRegion
with mp.Pool(2) as p:
resourceBlankLightTemp, resourceBlankDarkTemp = p.map(
lambda x: x(), [blankFirstLightResourcesIconPosition,
blankFirstDarkResourcesIconPosition])
if(resourceBlankLightTemp == None):
# ...

Related

TF2 transform can't find an actuall existing frame

In a global planner node that I wrote, I have the following init code
#!/usr/bin/env python
import rospy
import copy
import tf2_ros
import time
import numpy as np
import math
import tf
from math import sqrt, pow
from geometry_msgs.msg import Vector3, Point
from std_msgs.msg import Int32MultiArray
from std_msgs.msg import Bool
from nav_msgs.msg import OccupancyGrid, Path
from geometry_msgs.msg import PoseStamped, PointStamped
from tf2_geometry_msgs import do_transform_point
from Queue import PriorityQueue
class GlobalPlanner():
def __init__(self):
print("init global planner")
self.tfBuffer = tf2_ros.Buffer()
self.listener = tf2_ros.TransformListener(self.tfBuffer)
self.drone_position_sub = rospy.Subscriber('uav/sensors/gps', PoseStamped, self.get_drone_position)
self.drone_position = []
self.drone_map_position = []
self.map_sub = rospy.Subscriber("/map", OccupancyGrid, self.get_map)
self.goal_sub = rospy.Subscriber("/cell_tower/position", Point, self.getTransformedGoal)
self.goal_position = []
self.goal = Point()
self.goal_map_position = []
self.occupancy_grid = OccupancyGrid()
self.map = []
self.p_path = Int32MultiArray()
self.position_pub = rospy.Publisher("/uav/input/position", Vector3, queue_size = 1)
#next_movement in
self.next_movement = Vector3
self.next_movement.z = 3
self.path_pub = rospy.Publisher('/uav/path', Int32MultiArray, queue_size=1)
self.width = rospy.get_param('global_planner_node/map_width')
self.height = rospy.get_param('global_planner_node/map_height')
#Check whether there is a path plan
self.have_plan = False
self.path = []
self.euc_distance_drone_goal = 100
self.twod_distance_drone_goal = []
self.map_distance_drone_goal = []
self.mainLoop()
And there is a call-back function call getTransformed goal, which will take the goal position in the "cell_tower" frame to the "world" frame. Which looks like this
def getTransformedGoal(self, msg):
self.goal = msg
try:
#Lookup the tower to world transform
transform = self.tfBuffer.lookup_transform('cell_tower', 'world', rospy.Time())
#transform = self.tfBuffer.lookup_transform('world','cell-tower' rospy.Time())
#Convert the goal to a PointStamped
goal_pointStamped = PointStamped()
goal_pointStamped.point.x = self.goal.x
goal_pointStamped.point.y = self.goal.y
goal_pointStamped.point.z = self.goal.z
#Use the do_transform_point function to convert the point using the transform
new_point = do_transform_point(goal_pointStamped, transform)
#Convert the point back into a vector message containing integers
transform_point = [new_point.point.x, new_point.point.y]
#Publish the vector
self.goal_position = transform_point
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e:
print(e)
print('global_planner tf2 exception, continuing')
The error message said that
"cell_tower" passed to lookupTransform argument target_frame does not exist.
I check the RQT plot for both active and all, which shows that when active, the topic /tf is not being subscribe by the node global planner. Check the following image, which is for active
enter image description here
and this image is for all the node (include non-active)
enter image description here
But I have actually set up the listner, I have another node call local planner that use the same strategy and it works for that node, but not for the global planner
I'm not sure why this is.
Try adding a timeout to your lookup_transform() function call, as your transformation may not be available when you need it:
transform = self.tfBuffer.lookup_transform('cell_tower', 'world',rospy.Time.now(), rospy.Duration(1.0))

Python interpreter locked/freezing while trying to run pygetwindow as a thread

I'm trying to learn how to use threading and specifically concurrent.futures.ThreadPoolExecutor this is because I need to return a numpy.array from a function I want to run concurrently.
The end goal is to have one process running a video loop of an application, while another process does object detection and GUI interactions. The result() keyword from the concurrent.futures library allows me to do this.
The issue is my code runs once, and then seems to lock up. I'm actually unsure what happens as when I step through it in the debugger it runs once, then the debugger goes blank and I literally cannot step through and no error is thrown.
The code appears to lock up on the line: notepadWindow = pygetwindow.getWindowsWithTitle('Notepad')[0]
I get exactly one loop, the print statement prints once the loop restarts and then it halts at pygetwindow
I don't know much about the GIL but I have tried using the max_workers=1 argument on ThreadPoolExecutor() which doesn't make a difference either way and I was under the impression concurrent.futures allows me to bypass the lock.
How do I run videoLoop as a single thread making sure to return DetectionWindow every iteration?
import cv2 as cv
import numpy as np
import concurrent.futures
from PIL import ImageGrab
import pygetwindow
def videoLoop():
notepadWindow = pygetwindow.getWindowsWithTitle('Notepad')[0]
x1 = notepadWindow.left
y1 = notepadWindow.top
height = notepadWindow.height
width = notepadWindow.width
x2 = x1 + width
y2 = y1 + height
haystack_img = ImageGrab.grab(bbox=(x1, y1, x2, y2))
haystack_img_np = np.array(haystack_img)
DetectionWindow= cv.cvtColor(haystack_img_np, cv.COLOR_BGR2GRAY)
return DetectionWindow
def f1():
with concurrent.futures.ThreadPoolExecutor() as executor:
f1 = executor.submit(videoLoop)
notepadWindow = f1.result()
cv.imshow("Video Loop", notepadWindow)
cv.waitKey(1)
print(f1.result())
while True:
f1()
A ThreadPoolExecutor won't help you an awful lot here, if you want a continuous stream of frames.
Here's a reworking of your code that uses a regular old threading.Thread and puts frames (and their capture timestamps, since this is asynchronous) in a queue.Queue you can then read in another (or the main) thread.
The thread has an otherwise infinite loop that can be stopped by setting the thread's exit_signal.
(I didn't test this, since I'm presently on a Mac, so there may be typos or other problems.)
import queue
import time
import cv2 as cv
import numpy as np
import threading
from PIL import ImageGrab
import pygetwindow
def do_capture():
notepadWindow = pygetwindow.getWindowsWithTitle("Notepad")[0]
x1 = notepadWindow.left
y1 = notepadWindow.top
height = notepadWindow.height
width = notepadWindow.width
x2 = x1 + width
y2 = y1 + height
haystack_img = ImageGrab.grab(bbox=(x1, y1, x2, y2))
return cv.cvtColor(np.array(haystack_img), cv.COLOR_BGR2GRAY)
class VideoCaptureThread(threading.Thread):
def __init__(self, result_queue: queue.Queue) -> None:
super().__init__()
self.exit_signal = threading.Event()
self.result_queue = result_queue
def run(self) -> None:
while not self.exit_signal.wait(0.05):
try:
result = do_capture()
self.result_queue.put((time.time(), result))
except Exception as exc:
print(f"Failed capture: {exc}")
def process_frames(result_queue: queue.Queue):
start_time = time.time()
while time.time() - start_time < 5: # Run for five seconds
frame = result_queue.get()
print(frame)
def main():
result_queue = queue.Queue()
thread = VideoCaptureThread(result_queue=result_queue)
thread.start()
process_frames(result_queue)
thread.exit_signal.set()
thread.join()
if __name__ == "__main__":
main()

python multiprocess problem with OpenCV returning key points

I'm using multiprocess Python module to parallelise processing with OpenCV algorithms (e.g. ORB detector/descriptor). The multiprocess module works fine in most cases, but when it comes to returning a list of cv2.KeyPoint objects there is a problem - all fields of each key point are set to 0 when returned to the caller process, although inside the worker process all key points are correct (as returned by OpenCV).
Here is minimal example that can be used to reproduce the error (you will need an image file called lena.png to make it work):
import numpy as np
from cv2 import ORB_create, imread, cvtColor, COLOR_BGR2GRAY
from multiprocess import Pool
feature = ORB_create(nfeatures=4)
def proc(img):
return feature.detect(img)
def good(feat, frames):
return map(proc, frames)
def bad(feat, frames):
# this starts a worker process
# and then collects result
# but something is lost on the way
pool = Pool(4)
return pool.map(proc, frames)
if __name__ == '__main__':
# it doesn't matter how many images
# a list of images is required to make use of
# pool from multiprocess module
rgb_images = map(lambda fn: imread(fn), ['lena.png'])
grey_images = map(lambda img: cvtColor(img, COLOR_BGR2GRAY), rgb_images)
good_kp = good(feature, grey_images)
bad_kp = bad(feature, grey_images)
# this will fail because elements in
# bad_kp will all contain zeros
for i in range(len(grey_images)):
for x, y in zip(good_kp[i], bad_kp[i]):
# these should be the same
print('good: pt=%s angle=%s size=%s - bad: pt=%s angle=%s size=%s' % (x.pt, x.angle, x.size, y.pt, y.angle, y.size))
assert x.pt == y.pt
Platforms: both CentOS 7.6 and Windows 10 x64
Versions:
Python version: 2.7.15
multiprocess: 0.70.6.1
opencv-python-headless: 3.4.5.20 and 4.0.0.21
Is there a way to work around this? Use of standard multiprocessing module is not an option because of heavy usage of lambdas and callable objects which "can't be pickled".
After some analysis it turned out that the problem is caused by something about cv2.KeyPoint class. This is suggested in a related question and corresponding answer. The problems is that pickle which is apparently used by dill is unable to work with this class.
A simple solution is to avoid sending instances of cv2.KeyPoint between worker and main process. If this is not convenient, then one should wrap data of each keypoint in a simple Python structure or dictionary and pass it.
An example of wrapper could be:
import cv2
class KeyPoint(object):
def __init__(self, kp):
# type: (cv2.KeyPoint) -> None
x, y = kp.pt
self.pt = float(x), float(y)
self.angle = float(kp.angle) if kp.angle is not None else None
self.size = float(kp.size) if kp.size is not None else None
self.response = float(kp.response) if kp.response is not None else None
self.class_id = int(kp.class_id) if kp.class_id is not None else None
self.octave = int(kp.octave) if kp.octave is not None else None
def to_opencv(self):
# type: () -> cv2.KeyPoint
kp = cv2.KeyPoint()
kp.pt = self.pt
kp.angle = self.angle
kp.size = self.size
kp.response = self.response
kp.octave = self.octave
kp.class_id = self.class_id
return kp

Python multiprocessing code using pipes hangs at first .recv() method when Pipes are generated automatically

I'm writing some bogus practice code so I can implement the ideas once I have a better idea of what I'm doing. The code is designed for multiprocessing in order to reduce the runtime by splitting the stack of three arrays into several pieces horizontally that are then executed in parallel via map_async. However, the code seems to hang at the first .recv() method of a pipe, even though the Pipe() object is fully defined, and I'm not sure why it's doing it. If I manually define each Pipe() object then the code works just fine, but as soon as I iterate the process the code hangs at sec1 = pipes[0][0].recv(). How can I fix this?
from multiprocessing import Process, Pipe
import multiprocessing as mp
import numpy as np
import math
num_sections = 4
pipes_send = [None]*num_sections
pipes_recv = [None]*num_sections
pipes = zip(pipes_recv,pipes_send)
for i in range(num_sections):
pipes[i] = list(pipes[i])
for i in range(num_sections):
pipes[i][0],pipes[i][1] = Pipe()
def f(sec_num):
for plane in range(3):
hist_sec[sec_num][plane] += rand_sec[sec_num][plane]
if sec_num == 0:
pipes[0][1].send(hist_sec[sec_num])
pipes[0][1].close()
if sec_num == 1:
pipes[1][1].send(hist_sec[sec_num])
pipes[1][1].close()
if sec_num == 2:
pipes[2][1].send(hist_sec[sec_num])
pipes[2][1].close()
if sec_num == 3:
pipes[3][1].send(hist_sec[sec_num])
pipes[3][1].close()
hist = np.zeros((3,512,512))
hist_sec = []
randmat = np.random.rand(3,512,512)
rand_sec = []
for plane in range(3):
hist_div = np.array_split(hist[plane], num_sections)
hist_sec.append(hist_div)
randmatsplit = np.array_split(randmat[plane], num_sections)
rand_sec.append(randmatsplit)
hist_sec = np.rollaxis(np.asarray(hist_sec),1,0)
rand_sec = np.rollaxis(np.asarray(rand_sec),1,0)
if __name__ == '__main__':
pool = mp.Pool(num_sections)
args = np.arange(num_sections)
pool.map_async(f, args, chunksize=1)
sec1 = pipes[0][0].recv()
sec2 = pipes[1][0].recv()
sec3 = pipes[2][0].recv()
sec4 = pipes[3][0].recv()
for plane in range(3):
hist_plane = np.concatenate((sec1[plane],sec2[plane],sec3[plane],sec4[plane]),axis=0)
hist_full.append(hist_plane)
pool.close()
pool.join()

Concurrent functions running in separate process using pygame and multiprocessing

Suppose we want to drive an autonomous car by predicting image labels from a previous set of images and labels collected (A Machine Learning application). For this task, the car is connected via bluetooth serial (rfcomm) to the Host Computer (A PC with *NIX) and the images are streamed directly from an Android phone using IP Webcam, meanwhile, the PC is running a program that links this two functions, displaying the captured images in a drawing environment created by pygame, and sending the instructions back to the car using serial.
At the moment, I've tried to implement those processes using the multiprocessing module, the seemed to work, but when I execute the client, the drawing function (if __name__ == '__main__') works after the getKeyPress() function ends.
The question is: It is possible to parallelize or synchronize the drawing fuinction enclosed within the if __name__ == '__main__' with the process declared in getKyPress(), such that the program works in two independent processes?
Here's the implemented code so far:
import urllib
import time
import os
import sys
import serial
import signal
import multiprocessing
import numpy as np
import scipy
import scipy.io as sio
import matplotlib.image as mpimg
from pygame.locals import *
PORT = '/dev/rfcomm0'
SPEED = 115200
ser = serial.Serial(PORT)
status = False
move = None
targets = []
inputs = []
tic = False
def getKeyPress():
import pygame
pygame.init()
global targets
global status
while not status:
pygame.event.pump()
keys = pygame.key.get_pressed()
targets, status = processOutputs(targets, keys)
targets = np.array(targets)
targets = flattenMatrix(targets)
sio.savemat('targets.mat', {'targets':targets})
def rgb2gray(rgb):
r, g, b = np.rollaxis(rgb[...,:3], axis = -1)
return 0.299 * r + 0.587 * g + 0.114 * b
def processImages(inputX, inputs):
inputX = flattenMatrix(inputX)
if len(inputs) == 0:
inputs = inputX
elif inputs.shape[1] >= 1:
inputs = np.hstack((inputs, inputX))
return inputs
def flattenMatrix(mat):
mat = mat.flatten(1)
mat = mat.reshape((len(mat), 1))
return mat
def send_command(val):
connection = serial.Serial( PORT,
SPEED,
timeout=0,
stopbits=serial.STOPBITS_TWO
)
connection.write(val)
connection.close()
def processOutputs(targets, keys):
global move
global status
global tic
status = False
keypress = ['K_p', 'K_UP', 'K_LEFT', 'K_DOWN', 'K_RIGHT']
labels = [1, 2, 3, 4, 5]
commands = ['p', 'w', 'r', 'j', 's']
text = ['S', 'Up', 'Left', 'Down', 'Right']
if keys[K_q]:
status = True
return targets, status
else:
for i, j, k, g in zip(keypress, labels, commands, text):
cmd = compile('cond = keys['+i+']', '<string>', 'exec')
exec cmd
if cond:
move = g
targets.append(j)
send_command(k)
break
send_command('p')
return targets, status
targetProcess = multiprocessing.Process(target=getKeyPress)
targetProcess.daemon = True
targetProcess.start()
if __name__ == '__main__':
import pygame
pygame.init()
w = 288
h = 352
size=(w,h)
screen = pygame.display.set_mode(size)
c = pygame.time.Clock() # create a clock object for timing
pygame.display.set_caption('Driver')
ubuntu = pygame.font.match_font('Ubuntu')
font = pygame.font.Font(ubuntu, 13)
inputs = []
try:
while not status:
urllib.urlretrieve("http://192.168.0.10:8080/shot.jpg", "input.jpg")
try:
inputX = mpimg.imread('input.jpg')
except IOError:
status = True
inputX = rgb2gray(inputX)/255
out = inputX.copy()
out = scipy.misc.imresize(out, (352, 288), interp='bicubic', mode=None)
scipy.misc.imsave('input.png', out)
inputs = processImages(inputX, inputs)
print inputs.shape[1]
img=pygame.image.load('input.png')
screen.blit(img,(0,0))
pygame.display.flip()
c.tick(1)
if move != None:
text = font.render(move, False, (255, 128, 255), (0, 0, 0))
textRect = text.get_rect()
textRect.centerx = 20 #screen.get_rect().centerx
textRect.centery = 20 #screen.get_rect().centery
screen.blit(text, textRect)
pygame.display.update()
if status:
targetProcess.join()
sio.savemat('inputs.mat', {'inputs':inputs})
except KeyboardInterrupt:
targetProcess.join()
sio.savemat('inputs.mat', {'inputs':inputs})
targetProcess.join()
sio.savemat('inputs.mat', {'inputs':inputs})
Thanks in advance.
I would personally suggest writing this without using the multiprocessing module: it uses fork() which has unspecified effects with most complex libraries, like in this case pygame.
You should try to write this as two completely separate programs. It forces you to think about what data needs to go from one to the other, which is both a bad and a good thing (as it may clarify things). You can use some inter-process communication facility, like the stdin/stdout pipe; e.g. in one program (the "main" one) you start the other as a sub-process like this:
popen = subprocess.Popen([sys.executable, '-u', 'my_subproc.py'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(The -u is for unbuffered.)
Then read/write the data to popen.stdin/popen.stdout in the parent process, and to sys.stdin/sys.stdout in the subprocess. The simplest example would be if the two processes only need a synchronization signal, e.g. the parent process waits in a loop for the subprocess to say "next please". To do this the subprocess does print 'next please', and the parent process does popen.stdin.readline(). (The print goes to sys.stdin in the subprocess.)
Unrelated small note:
keypress = ['K_p', ...]
...
cmd = compile('cond = keys['+i+']', '<string>', 'exec')
exec cmd
if cond:
This looks like very heavy code to just do:
keypress = [K_p, ...] # not strings, directly the values
...
if keys[i]:
My suggestion is to use separate threads.
#At the beginning
import threading
#Instead of def getKeyPress()
class getKeyPress(threading.Thread):
def run(self):
import pygame
pygame.init()
global targets
global status
while not status:
pygame.event.pump()
keys = pygame.key.get_pressed()
targets, status = processOutputs(targets, keys)
targets = np.array(targets)
targets = flattenMatrix(targets)
sio.savemat('targets.mat', {'targets':targets})
#Instead of
#targetProcess = multiprocessing.Process(target=getKeyPress)
#targetProcess.daemon = True
#targetProcess.start()
gkp = getKeyPress()
gkp.start()
An alternative would be creating two different scripts and using sockets to handle the inter-process communication.

Categories

Resources