simpy traffic light simulation - python

I am trying to simulate a circular road with a series of traffic lights in sequence. Vehicles enter the system via a Poisson process. Once in the system, they queue at each light. It takes one time unit for them to go through each queue. They exit the system when they have traveled a number of queues equal to their trip length. The queues only operate during a green phase. Cars are represented by integers.
The problem is that I keep getting the error "pop from an empty deque" even though this part of the code is only reachable by an if statement that checks whether the deque has cars in it. I am not very familiar with simpy and so I think the problem has to do with the timeout. If I move the timeout after the pop operation, the code works. But this is not quiet what I want.
import simpy
from simpy.util import start_delayed
# import numpy.random
from collections import deque,namedtuple
from numpy import random
NUM_INT = 3
ARRIVAL_TIME_MEAN = 1.1
TRIP_LENGTH = 4
GREEN_TIME = 3.0
RED_TIME = 3.0
class Simulation(object):
def __init__(self,env):
self.env = env
self.intersections = [Intersection(env,i) for i in range(NUM_INT)]
for (i,intersection) in enumerate(self.intersections):
intersection.set_next_intersection(self.intersections[(i+1)%NUM_INT])
self.env.process(self.light())
self.env.process(self.arrivals())
def arrivals(self):
while True:
yield self.env.timeout(random.exponential(ARRIVAL_TIME_MEAN))
intersection = random.choice(self.intersections)
intersection.receive(TRIP_LENGTH)
def light(self):
while True:
for intersection in self.intersections:
intersection.start_departing()
yield self.env.timeout(GREEN_TIME)
for intersection in self.intersections:
intersection.turn_red()
yield env.timeout(RED_TIME)
class Intersection(object):
def __init__(self,env,index):
self.index = index
self.queue = deque()
self.env = env
self.start_departing()
def set_next_intersection(self,intersection):
self.next_intersection = intersection
def start_departing(self):
self.is_departing = True
self.action = env.process(self.departure())
def turn_red(self):
if self.is_departing:
self.is_departing = False
self.action.interrupt('red light')
def receive(self,car):
self.queue.append(car)
if not self.is_departing:
self.start_departing()
def departure(self):
while True:
try:
if len(self.queue)==0:
self.is_departing = False
self.env.exit('no more cars in %d'%self.index)
else:
yield self.env.timeout(1.0)
car = self.queue.popleft()
car = car - 1
if car > 0:
self.next_intersection.receive(car)
except simpy.Interrupt as i:
print('interrupted by',i.cause)
env = simpy.Environment()
sim = Simulation(env)
env.run(until=15.0)

not a fix as much as a workaround...
import simpy
from simpy.util import start_delayed
# import numpy.random
from collections import deque,namedtuple
from numpy import random
NUM_INT = 3
ARRIVAL_TIME_MEAN = 1.1
TRIP_LENGTH = 4
GREEN_TIME = 3.0
RED_TIME = 3.0
class Simulation(object):
def __init__(self,env):
self.env = env
self.intersections = [Intersection(env,i) for i in range(NUM_INT)]
for (i,intersection) in enumerate(self.intersections):
intersection.set_next_intersection(self.intersections[(i+1)%NUM_INT])
self.env.process(self.light())
self.env.process(self.arrivals())
def arrivals(self):
while True:
yield self.env.timeout(random.exponential(ARRIVAL_TIME_MEAN))
intersection = random.choice(self.intersections)
intersection.receive(TRIP_LENGTH)
def light(self):
while True:
for intersection in self.intersections:
intersection.start_departing()
yield self.env.timeout(GREEN_TIME)
for intersection in self.intersections:
intersection.turn_red()
yield env.timeout(RED_TIME)
class Intersection(object):
def __init__(self,env,index):
self.index = index
self.queue = deque()
self.env = env
self.start_departing()
def set_next_intersection(self,intersection):
self.next_intersection = intersection
def start_departing(self):
self.is_departing = True
self.action = env.process(self.departure())
def turn_red(self):
if self.is_departing:
self.is_departing = False
self.action.interrupt('red light')
def receive(self,car):
self.queue.append(car)
if not self.is_departing:
self.start_departing()
def departure(self):
while True:
try:
if len(self.queue)==0:
self.is_departing = False
self.env.exit('no more cars in %d'%self.index)
else:
yield self.env.timeout(1.0)
if len(self.queue)>0:
if len(self.queue)==1:
car=self.queue[0]
self.queue.clear()
else:
car = self.queue.popleft()
car = car - 1
if car > 0:
self.next_intersection.receive(car)
except simpy.Interrupt as i:
print('interrupted by',i.cause)
env = simpy.Environment()
sim = Simulation(env)
env.run(until=15.0)
tell me if this works for you

In departure, in the case where the queue is not empty, you immediately yield, which lets the calling process run, which can cause other events to cause the queue to be empty and raise your exception. This is a bit like cooperative multitasking, but without locks, so you have to be a bit careful with where you place yields.
Moving the yield to the end of that if fixes it for me.
def departure(self):
while True:
try:
if len(self.queue)==0:
self.is_departing = False
self.env.exit('no more cars in %d'%self.index)
else:
car = self.queue.popleft()
car = car - 1
if car > 0:
self.next_intersection.receive(car)
yield self.env.timeout(1.0)
except simpy.Interrupt as i:
print('interrupted by',i.cause)
yield self.env.timeout(1.0)

Related

python setting state of a future

Is it bad practice to set the state of future to pass arguments?
Specifically using something like future.q = q to use q in the callback
from threading import Thread
from threading import RLock
from threading import current_thread
from concurrent.futures import Future
import time
import random
class NonBlockingQueue:
def __init__(self, max_size):
self.max_size = max_size
self.q = []
self.q_waiting_puts = []
self.q_waiting_gets = []
self.lock = RLock()
def enqueue(self, item):
future = None
with self.lock:
curr_size = len(self.q)
# queue is full so create a future for a put
# request
if curr_size == self.max_size:
future = Future()
self.q_waiting_puts.append(future)
else:
self.q.append(item)
# remember to resolve a pending future for
# a get request
if len(self.q_waiting_gets) != 0:
future_get = self.q_waiting_gets.pop(0)
future_get.set_result(self.q.pop(0))
return future
def retry_enqueue(future):
print("\nCallback invoked by thread {0}".format(current_thread().getName()))
item = future.item
q = future.q
new_future = q.enqueue(item)
if new_future is not None:
new_future.item = item
new_future.q = q
new_future.add_done_callback(retry_enqueue)
else:
print("\n{0} successfully added on a retry".format(item))
### MAIN CODE
def producer_thread(q):
item = 1
while 1:
future = q.enqueue(item)
if future is not None:
future.item = item
future.q = q
future.add_done_callback(retry_enqueue)
item += 1
# slow down the producer
time.sleep(random.randint(1, 3))
It is not a good idea to pass around arguments like this.
The reason is that in future (no pun), they could just disallow setting custom attributes on the Future object, which will break your code.
Better solution is to use functools.partial or lambda to pass extra arguments to the callback.
First, accept q as an argument in the retry_enqueue function:
def retry_enqueue(future, q): # accept 'q' argument
...
Example using functools.partial:
import functools
future.add_done_callback(functools.partial(retry_enqueue, q=q))
Example using lambda:
future.add_done_callback(lambda future: retry_enqueue(future, q))

Using Python multiprocessing library inside nested objects

I'm trying to use the multiprocessing library to parallelize some expensive calculations without blocking some others, much lighter. The both need to interact through some variables, although the may run with different paces.
To show this, I have created the following example, that works fine:
import multiprocessing
import time
import numpy as np
class SumClass:
def __init__(self):
self.result = 0.0
self.p = None
self.return_value = None
def expensive_function(self, new_number, return_value):
# Execute expensive calculation
#######
time.sleep(np.random.random_integers(5, 10, 1))
return_value.value = self.result + new_number
#######
def execute_function(self, new_number):
print(' New number received: %f' % new_number)
self.return_value = multiprocessing.Value("f", 0.0, lock=True)
self.p = multiprocessing.Process(target=self.expensive_function, args=(new_number, self.return_value))
self.p.start()
def is_executing(self):
if self.p is not None:
if not self.p.is_alive():
self.result = self.return_value.value
self.p = None
return False
else:
return True
else:
return False
if __name__ == '__main__':
sum_obj = SumClass()
current_value = 0
while True:
if not sum_obj.is_executing():
# Randomly determine whether the function must be executed or not
if np.random.rand() < 0.25:
print('Current sum value: %f' % sum_obj.result)
new_number = np.random.rand(1)[0]
sum_obj.execute_function(new_number)
# Execute other (light) stuff
#######
print('Executing other stuff')
current_value += sum_obj.result * 0.1
print('Current value: %f' % current_value)
time.sleep(1)
#######
Basically, in the main loop some light function is executed, and depending on a random condition, some heavy work is sent to another process if it has already finished the previous one, carried out by an object which needs to store some data between executions. Although expensive_function needs some time, the light function keeps on executing without being blocked.
Although the above code gets the job done, I'm wondering: is it the best/most appropriate method to do this?
Besides, let us suppose the class SumClass has an instance of another object, which also needs to store data. For example:
import multiprocessing
import time
import numpy as np
class Operator:
def __init__(self):
self.last_value = 1.0
def operate(self, value):
print(' Operation, last value: %f' % self.last_value)
self.last_value *= value
return self.last_value
class SumClass:
def __init__(self):
self.operator_obj = Operator()
self.result = 0.0
self.p = None
self.return_value = None
def expensive_function(self, new_number, return_value):
# Execute expensive calculation
#######
time.sleep(np.random.random_integers(5, 10, 1))
# Apply operation
number = self.operator_obj.operate(new_number)
# Apply other operation
return_value.value = self.result + number
#######
def execute_function(self, new_number):
print(' New number received: %f' % new_number)
self.return_value = multiprocessing.Value("f", 0.0, lock=True)
self.p = multiprocessing.Process(target=self.expensive_function, args=(new_number, self.return_value))
self.p.start()
def is_executing(self):
if self.p is not None:
if not self.p.is_alive():
self.result = self.return_value.value
self.p = None
return False
else:
return True
else:
return False
if __name__ == '__main__':
sum_obj = SumClass()
current_value = 0
while True:
if not sum_obj.is_executing():
# Randomly determine whether the function must be executed or not
if np.random.rand() < 0.25:
print('Current sum value: %f' % sum_obj.result)
new_number = np.random.rand(1)[0]
sum_obj.execute_function(new_number)
# Execute other (light) stuff
#######
print('Executing other stuff')
current_value += sum_obj.result * 0.1
print('Current value: %f' % current_value)
time.sleep(1)
#######
Now, inside the expensive_function, a function member of the object Operator is used, which needs to store the number passed.
As expected, the member variable last_value does not change, i.e. it does not keep any value.
Is there any way of doing this properly?
I can imagine I could arrange everything so that I only need to use one class level, and it would work well. However, this is a toy example, in reality there are different levels of complex objects and it would be hard.
Thank you very much in advance!
from concurrent.futures import ThreadPoolExecutor
from numba import jit
import requests
import timeit
def timer(number, repeat):
def wrapper(func):
runs = timeit.repeat(func, number=number, repeat=repeat)
print(sum(runs) / len(runs))
return wrapper
URL = "https://httpbin.org/uuid"
#jit(nopython=True, nogil=True,cache=True)
def fetch(session, url):
with session.get(url) as response:
print(response.json()['uuid'])
#timer(1, 1)
def runner():
with ThreadPoolExecutor(max_workers=25) as executor:
with requests.Session() as session:
executor.map(fetch, [session] * 100, [URL] * 100)
executor.shutdown(wait=True)
executor._adjust_thread_count
Maybe this might help.
I'm using ThreadPoolExecutor for multithreading. you can also use ProcessPoolExecutor.
For your compute expensive operation you can use numba for making cached byte code of your function for faster exeution.

Process yields a Process in Python using Simpy

I'm having a problem with my code.
I am using the Simpy for Python and I'm trying to make a P2P simulator utilizing Simpy.
Bellow is the code of my generator of peers, I don't know why but I never enter in the function generate(). The console don't shows the print('I am here').
Anyone knows what I'm doing wrong on my code? Sorry if I'm doing something very wrong.
import simpy
import random
# PARAMETERS
RANDOM_SEED = 93817
N_NODES = 10 # 2000
RUN_TIME = 10 # 86400 # 24 hours
TIME_TO_GENERATE = 3 # at each 3 seconds
# FUNCTIONS
def peer(env, N_PEER):
print('Peer %d join at %d' % (N_PEER, env.now))
def chanceToGenerate():
value = random.random()*100
if value < 50:
return False
else:
return True
def generate(env, N_PEER):
print('I am here')
chance = chanceToGenerate()
if chance:
yield env.process(peer(env, N_PEER))
return True
else:
return False
def peerGenerator(env):
N_PEER = 0
while True:
if N_PEER < N_NODES:
generated = generate(env, N_PEER)
if generated:
N_PEER += 1
print('time: %d' % env.now)
yield env.timeout(TIME_TO_GENERATE)
# RUNNING
random.seed(RANDOM_SEED)
env = simpy.Environment()
env.process(peerGenerator(env))
env.run(until=RUN_TIME)
I solved the problem, what was my solution ?
Answer: I removed the function generate() and moved the yield env.process(peer(env, N_PEER)) to the functiongenerator().
Why I did that?
I have been reading the documentation of Simpy and I found out that I can't make a non-process yields another process. So only the peerGenerator() function can yields another process.
Code:
import simpy
import random
# PARAMETERS
RANDOM_SEED = 93817
N_NODES = 10 # 2000
RUN_TIME = 10 # 86400 # 24 hours
TIME_TO_GENERATE = 3 # at each 3 seconds
# FUNCTIONS
class peerGenerator:
def __init__(self, env):
self.env = env
self.generator_proc = env.process(self.generator(env))
def peer(self, env, N_PEER):
print('Peer %d join at %d' % (N_PEER, env.now))
yield env.timeout(0)
def chanceToGenerate(self):
value = random.randint(0,100)
if value < 50:
print('Tried to create a peer')
return False
else:
return True
def generator(self, env):
N_PEER = 0
while True:
if N_PEER < N_NODES:
chance = self.chanceToGenerate()
if chance:
yield env.process(self.peer(env, N_PEER))
N_PEER += 1
print('time: %d' % env.now)
yield env.timeout(TIME_TO_GENERATE)
# RUNNING
env = simpy.Environment()
bootstrap = peerGenerator(env)
env.run(until=RUN_TIME)

How can I properly call a method from its callback method within a class?

I Have a code with two functions. Function 'send_thread' and Function 'receive_thread' which is the callback of 'send_thread'. What I want to do is to run 'send_thread', this activates 'receive_thread' and once it's over repeat it all again. To do so, I have come up with the code below. This is not giving the desired results, since the 'send_thread' gets called again but doesn't activate the callback anymore. Thank you in advance for your help.
I have noticed, that the function gets called at the end of the receive_thread and runs for the amount of time that I wait in the send_thread (rospy.sleep()). I does never activate the callback again after the first try though.
import rospy
import pepper_2d_simulator
import threading
class TROS(object):
def __init__(self):
self.cmd_vel_pub = rospy.Publisher('cmd_vel',Twist)
self.event = threading.Event()
def send_thread(self):
#send commmand
self.event.set()
sequence = [[1,0,0.05],[0,0,0],[0,0,0.1292]]
for cmd in sequence:
rospy.Rate(0.5).sleep()
msg = Twist()
msg.linear.x = cmd[0]
msg.linear.y = cmd[1]
msg.angular.z = cmd[2]
t = rospy.get_rostime()
self.cmd_vel_pub.publish(msg)
self.event.clear()
rospy.sleep(1)
def receive_thread(self,msg):
#if something is being send, listen to this
if self.event.isSet():
frame_id = msg.header.frame_id
self.x_odom = msg.pose.pose.position.x
self.y_odom = msg.pose.pose.position.y
self.z_odom = msg.pose.pose.position.z
self.pos_odom = [self.x_odom,self.y_odom,self.z_odom,1]
self.ang_odom = msg.pose.pose.orientation.z
self.time = msg.header.stamp.secs + msg.header.stamp.nsecs
#some transformations here to get self.trans...
else:
#after self.event() is cleared, rename and run again
self.x_odom = self.trans_br_x
self.y_odom = self.trans_br_y
self.ang_odom = self.rot_br_ang
self.send_thread()
def init_node(self):
rospy.init_node('pepper_cmd_evaluator',anonymous = True)
rospy.Subscriber('odom',Odometry,self.receive_thread)
if __name__ == '__main__':
thinking = Thinking()
thinking.init_node()
thinking.send_thread()
The expected result is that I am able to loop this two function so that I call send_thread, this activates receive thread. Then send_thread stops, receive_thread stops and activates the send_thread again. I want to do this 10 times.
I have now figured out how to this. I will post my solution in case anyone else runs into a similar problem. The working solution I came up with is pretty simple. I created a self.flag variable and alternatively set it to True and False in the send_thread and callback respectively. The code:
import rospy
import pepper_2d_simulator
import threading
class TROS(object):
def __init__(self):
self.cmd_vel_pub = rospy.Publisher('cmd_vel',Twist)
self.event = threading.Event()
self.count = 0
self.flag = True
def send_thread(self):
while self.count < 10:
if self.flag:
self.count = self.count + 1
#send commmand
self.event.set()
sequence = [[1,0,0.05],[0,0,0],[0,0,0.1292]]
for cmd in sequence:
rospy.Rate(0.5).sleep()
msg = Twist()
msg.linear.x = cmd[0]
msg.linear.y = cmd[1]
msg.angular.z = cmd[2]
t = rospy.get_rostime()
self.cmd_vel_pub.publish(msg)
self.event.clear()
rospy.sleep(0.3)
self.flag = False
rospy.signal_shutdown('Command finished')
def receive_thread(self,msg):
#if something is being send, listen to this
if self.event.isSet():
frame_id = msg.header.frame_id
self.x_odom = msg.pose.pose.position.x
self.y_odom = msg.pose.pose.position.y
self.z_odom = msg.pose.pose.position.z
self.pos_odom = [self.x_odom,self.y_odom,self.z_odom,1]
self.ang_odom = msg.pose.pose.orientation.z
self.time = msg.header.stamp.secs + msg.header.stamp.nsecs
#some transformations here to get self.trans...
else:
#after self.event() is cleared, rename and run again
self.x_odom = self.trans_br_x
self.y_odom = self.trans_br_y
self.ang_odom = self.rot_br_ang
self.flag = True
def init_node(self):
rospy.init_node('pepper_cmd_evaluator',anonymous = True)
rospy.Subscriber('odom',Odometry,self.receive_thread)
if __name__ == '__main__':
thinking = Thinking()
thinking.init_node()
thinking.send_thread()

Multithreading (?): Manual interference in a loop

I've been looking into a way to directly change variables in a running module.
What I want to achieve is that a load test is being run and that I can manually adjust the call pace or whatsoever.
Below some code that I just created (not-tested e.d.), just to give you an idea.
class A():
def __init__(self):
self.value = 1
def runForever(self):
while(1):
print self.value
def setValue(self, value):
self.value = value
if __name__ == '__main__':
#Some code to create the A object and directly apply the value from an human's input
a = A()
#Some parallelism or something has to be applied.
a.runForever()
a.setValue(raw_input("New value: "))
Edit #1: Yes, I know that now I will never hit the a.setValue() :-)
Here is a multi-threaded example. This code will work with the python interpreter but not with the Python Shell of IDLE, because the raw_input function is not handled the same way.
from threading import Thread
from time import sleep
class A(Thread):
def __init__(self):
Thread.__init__(self)
self.value = 1
self.stop_flag = False
def run(self):
while not self.stop_flag:
sleep(1)
print(self.value)
def set_value(self, value):
self.value = value
def stop(self):
self.stop_flag = True
if __name__ == '__main__':
a = A()
a.start()
try:
while 1:
r = raw_input()
a.set_value(int(r))
except:
a.stop()
The pseudo code you wrote is quite similar to the way Threading / Multiprocessing works in python. You will want to start a (for example) thread that "runs forever" and then instead of modifying the internal rate value directly, you will probably just send a message through a Queue that gives the new value.
Check out this question.
Here is a demonstration of doing what you asked about. I prefer to use Queues to directly making calls on threads / processes.
import Queue # !!warning. if you use multiprocessing, use multiprocessing.Queue
import threading
import time
def main():
q = Queue.Queue()
tester = Tester(q)
tester.start()
while True:
user_input = raw_input("New period in seconds or (q)uit: ")
if user_input.lower() == 'q':
break
try:
new_speed = float(user_input)
except ValueError:
new_speed = None # ignore junk
if new_speed is not None:
q.put(new_speed)
q.put(Tester.STOP_TOKEN)
class Tester(threading.Thread):
STOP_TOKEN = '<<stop>>'
def __init__(self, q):
threading.Thread.__init__(self)
self.q = q
self.speed = 1
def run(self):
while True:
# get from the queue
try:
item = self.q.get(block=False) # don't hang
except Queue.Empty:
item = None # do nothing
if item:
# stop when requested
if item == self.STOP_TOKEN:
break # stop this thread loop
# otherwise check for a new speed
try:
self.speed = float(item)
except ValueError:
pass # whatever you like with unknown input
# do your thing
self.main_code()
def main_code(self):
time.sleep(self.speed) # or whatever you want to do
if __name__ == '__main__':
main()

Categories

Resources