I'm testing with the asyncio module, however I need a hint / suggesstion how to fetch large emails in an async way.
I have a list with usernames and passwords for the mail accounts.
data = [
{'usern': 'foo#bar.de', 'passw': 'x'},
{'usern': 'foo2#bar.de', 'passw': 'y'},
{'usern': 'foo3#bar.de', 'passw': 'z'} (...)
]
I thought about:
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait([get_attachment(d) for d in data]))
loop.close()
However, the long part is to download the email attachments.
Email:
#asyncio.coroutine
def get_attachment(d):
username = d['usern']
password = d['passw']
connection = imaplib.IMAP4_SSL('imap.bar.de')
connection.login(username, password)
connection.select()
# list all available mails
typ, data = connection.search(None, 'ALL')
for num in data[0].split():
# fetching each mail
typ, data = connection.fetch(num, '(RFC822)')
raw_string = data[0][1].decode('utf-8')
msg = email.message_from_string(raw_string)
for part in msg.walk():
if part.get_content_maintype() == 'multipart':
continue
if part.get('Content-Disposition') is None:
continue
if part.get_filename():
body = part.get_payload(decode=True)
# do something with the body, async?
connection.close()
connection.logout()
How could I process all (downloading attachments) mails in an async way?
If you don't have an asynchronous I/O-based imap library, you can just use a concurrent.futures.ThreadPoolExecutor to do the I/O in threads. Python will release the GIL during the I/O, so you'll get true concurrency:
def init_connection(d):
username = d['usern']
password = d['passw']
connection = imaplib.IMAP4_SSL('imap.bar.de')
connection.login(username, password)
connection.select()
return connection
local = threading.local() # We use this to get a different connection per thread
def do_fetch(num, d, rfc):
try:
connection = local.connection
except AttributeError:
connnection = local.connection = init_connection(d)
return connnection.fetch(num, rfc)
#asyncio.coroutine
def get_attachment(d, pool):
connection = init_connection(d)
# list all available mails
typ, data = connection.search(None, 'ALL')
# Kick off asynchronous tasks for all the fetches
loop = asyncio.get_event_loop()
futs = [asyncio.create_task(loop.run_in_executor(pool, do_fetch, num, d, '(RFC822)'))
for num in data[0].split()]
# Process each fetch as it completes
for fut in asyncio.as_completed(futs):
typ, data = yield from fut
raw_string = data[0][1].decode('utf-8')
msg = email.message_from_string(raw_string)
for part in msg.walk():
if part.get_content_maintype() == 'multipart':
continue
if part.get('Content-Disposition') is None:
continue
if part.get_filename():
body = part.get_payload(decode=True)
# do something with the body, async?
connection.close()
connection.logout()
loop = asyncio.get_event_loop()
pool = ThreadPoolExecutor(max_workers=5) # You can probably increase max_workers, because the threads are almost exclusively doing I/O.
loop.run_until_complete(asyncio.wait([get_attachment(d, pool) for d in data]))
loop.close()
This isn't quite as nice as a truly asynchronous I/O-based solution, because you've still got the overhead of creating the threads, which limits scalability and adds extra memory overhead. You also do get some GIL slowdown because of all the code wrapping the actual I/O calls. Still, if you're dealing with less than thousands of mails, it should still perform ok.
We use run_in_executor to use the ThreadPoolExecutor as part of the asyncio event loop, asyncio.async to wrap the coroutine object returned in a asyncio.Future, and as_completed to iterate through the futures in the order they complete.
Edit:
It seems imaplib is not thread-safe. I've edited my answer to use thread-local storage via threading.local, which allows us to create one connection object per-thread, which can be re-used for the entire life of the thread (meaning you create num_workers connection objects only, rather than a new connection for every fetch).
I had the same needs : fetching emails with python 3 fully async. If others here are interested I pushed an asyncio IMAP lib here : https://github.com/bamthomas/aioimaplib
You can use it like this :
import asyncio
from aioimaplib import aioimaplib
#asyncio.coroutine
def wait_for_new_message(host, user, password):
imap_client = aioimaplib.IMAP4(host=host)
yield from imap_client.wait_hello_from_server()
yield from imap_client.login(user, password)
yield from imap_client.select()
asyncio.async(imap_client.idle())
id = 0
while True:
msg = yield from imap_client.wait_server_push()
print('--> received from server: %s' % msg)
if 'EXISTS' in msg:
id = msg.split()[0]
imap_client.idle_done()
break
result, data = yield from imap_client.fetch(id, '(RFC822)')
email_message = email.message_from_bytes(data[0])
attachments = []
body = ''
for part in email_message.walk():
if part.get_content_maintype() == 'multipart':
continue
if part.get_content_maintype() == 'text' and 'attachment' not in part.get('Content-Disposition', ''):
body = part.get_payload(decode=True).decode(part.get_param('charset', 'ascii')).strip()
else:
attachments.append(
{'type': part.get_content_type(), 'filename': part.get_filename(), 'size': len(part.as_bytes())})
print('attachments : %s' % attachments)
print('body : %s' % body)
yield from imap_client.logout()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(wait_for_new_message('my.imap.server', 'user', 'pass'))
Large emails with attachments are also downloaded with asyncio.
Related
So I have been trying to experiment with Streams in Python and wrote the following code.
ServiceSubscription.py
class ServiceSubscription():
def __init__(self) -> None:
self.subscriber_connections = []
self.service_connections = []
self.server_listener = None
# Dictionary of service readers where key is the name of the service and the value is the reader for the service
self.service_readers = {}
"""
Create the listening server on port 7777
"""
async def initiate_server(self):
server = await asyncio.start_server(self.handle_incoming, '127.0.0.1', 7777)
addrs = ', '.join(str(sock.getsockname()) for sock in server.sockets)
print(f'Serving on {addrs}')
async with server:
await server.serve_forever()
"""
Handle the incoming connection based on whether the connection is from a service or suscriber
The first message sent should include either 'service:SERVICE_NAME' or 'suscriber: [SERVICE1, SERVICE2, ...]'
"""
async def handle_incoming(self, reader: StreamReader, writer: StreamWriter):
data = await reader.read(100)
message = data.decode()
addr = writer.get_extra_info('peername')
print(f"Received {message!r} from {addr!r}")
if ("Service:" in f"{message!r}"):
message = message[0:7]
self.service_connections.append(Connections(reader, writer, message))
service_reader = ServiceReader(reader=reader, writer=writer)
self.service_readers[message] = (service_reader)
await service_reader.broadcast()
elif ("Suscriber:" in f"{message!r}"):
message = message[0:9]
self.subscriber_connections.append(Connections(reader, writer, message))
self.service_readers[message].add_suscribers(writer)
else:
pass
class ServiceReader():
def __init__(self, reader: StreamReader, writer: StreamWriter):
self.reader = reader
self.writer = writer
self.suscribers: Writer = []
self.loop = asyncio.get_event_loop()
def stop(self):
self._stop.set()
"""
Add new subscriber's StreamWriter here
"""
def add_suscribers(self, writer: StreamWriter):
# Not sure if this will work
self.suscribers.append(writer)
"""
Read data and broadcast it to subscribed clients
"""
async def broadcast(self):
while not self.reader.at_eof():
data = await self.reader.readline()
if b'\n' in data:
print(True)
data = data.decode()
print(data)
WriterTest.py
import asyncio
from os import linesep
async def tcp_echo_client(message):
reader, writer = await asyncio.open_connection(
'127.0.0.1', 7777)
print(f'Send: {message!r}\n')
writer.write(message.encode())
await writer.drain()
while not writer.is_closing():
data = input("Type a message\n")
data = (data + "\n").encode()
writer.write(data)
await writer.drain()
writer.close()
asyncio.run(tcp_echo_client('Service: TEST'))
I ran both python ServiceSubscription.py and python WriterTest.py at the same time to simulate a client and server.
Upon running ServiceSubscription.py, it will print "Serving on ('127.0.0.1', 7777)". When WriterTest.py is executed, ServiceSubscription.py will print "Received 'Service: TEST' from ('127.0.0.1', 39923)". However, typing anything beyond that will not be printed out until WriterTest.py's connection is closed. When the connection is closed, ServiceSubcription.py prints out the remaining bytes in the buffer and also confirms that there are newlines in the data read but it is not picked up by readline as it doesn't return after encountering a newline.
The problem is here, in your WriterTest:
data = input("Type a message\n")
The input function is blocking, so in an asyncio program it blocks the event loop. All tasks are stopped until you enter something. With asyncio streams, the actual transmission of the bytes occurs in another task. Your call to the input function blocks that task, which prevents the transmission. Your server doesn't respond because nothing is actually sent.
Since this is just a test program, you have a couple of quick solutions. You could put this line:
await asyncio.sleep(1.0)
after the line await writer.drain(). This will keep the other tasks running for one second, plenty of time for the data to get transmitted.
You could, of course, replace the call to input with some hard-coded string.
Better solutions can be found at the following link:
Listen to keypress with asyncio
As a general rule, input and asyncio do not play well together.
Data is encoded alongside a break line and a couple of variables are unpacked as reader and writer objects, then meanwhile is_closing() is waiting for a closing process, that should be close(), you could try to set a conditional when message have no characters.
import asyncio
import sys
from os import linesep
async def tcp_echo_client(message):
reader, writer = await asyncio.open_connection(
'127.0.0.1', 7777)
print(f'Send: {message!r}\n')
writer.write(message.encode())
await writer.drain()
while not writer.is_closing():
await asyncio.get_event_loop().run_in_executor(None, lambda s="Type a message\n": sys.stdout.write(s+' '))
data = await asyncio.get_event_loop().run_in_executor(None, sys.stdin.readline)
if len(data)==0: # empty data
break
data = (data + "\n").encode()
writer.write(data)
await writer.drain()
print('Close the connection')
writer.close()
await writer.wait_closed()
here I change while conditional to check instead if data is empty and use a different corountine from asyncio
async def broadcast(self):
while True:
data = asyncio.wait_for(self.reader.readline(), timeout=10.0)
if data is None or len(data.decode()) == 0:
print("Expected message, received None")
break
data = data.decode().rstrip().upper()
print(data)
I am trying to write some code to read my inbox and process some attachments if present. I decided this would be a good time to learn how generators work as I want to process all messages that have a particular subject. I have gotten to the point where I can get all the attachments and relevant subjects but I sort of had to fake it as the iterator in the for i in range . . . was not advancing so I am advancing the latest_email_id in the loop
def read_email_from_gmail():
try:
print 'got here'
mail = imaplib.IMAP4_SSL(SMTP_SERVER)
mail.login(FROM_EMAIL,FROM_PWD)
mail.select('inbox')
type, data = mail.search(None, 'ALL')
mail_ids = data[0]
id_list = mail_ids.split()
first_email_id = int(id_list[0])
latest_email_id = int(id_list[-1])
print latest_email_id
while True:
for i in range(latest_email_id,first_email_id - 1, -1):
latest_email_id -= 1
#do stuff to get attachment and subject
yield attachment_data, subject
except Exception, e:
print str(e)
for attachment, subject in read_email_from_gmail():
x = process_attachment(attachment)
y = process_subject(subject)
Is there a more pythonic way to advance through my in-box using a generator to hold state in the in-box?
I have learned a bit more about generators and played around with the code I started with so I have a function that uses a generator to send each relevant email message subject to the main function. This is what I have so far, and it works great for my needs
import imaplib
import email
FROM_EMAIL = 'myemail#gmail.com'
FROM_PWD = "mygmail_password"
SMTP_SERVER = "imap.gmail.com"
SMTP_PORT = 993
STOP_MESSAGES = set(['Could not connect to mailbox',
'No Messages or None Retrieved Successfully',
'Could not retrieve some message',
'Finished processing'])
def read_emails():
mail = imaplib.IMAP4_SSL(SMTP_SERVER)
mail.login(FROM_EMAIL,FROM_PWD)
mail.select('inbox')
con_status, data = mail.uid('search', None, "ALL")
if con_status != 'OK':
yield 'Could not connect to mailbox'
try:
mail_ids = data[0].split()
except Exception:
yield 'No Messages or None Retrieved Successfully'
print mail_ids
processed = []
while True:
for mail_id in mail_ids:
status, mdata = mail.uid('fetch', mail_id, '(RFC822)')
if status != 'OK':
yield 'Could not retrieve some message'
if mail_id in processed:
yield 'Finished processing'
raw_msg = mdata[0][1]
structured_msg = email.message_from_string(raw_msg)
msg_subject = structured_msg['subject']
processed.append(mail_id)
yield msg_subject
To access my messages one by one, I then use the following block to get my messages
for msg_subj in read_emails():
if msg_subj not in STOP_MESSAGES:
do some stuff here with msg_subj
else:
print msg_subj
break
I am accessing these messages by their uid as I will be deleting them later and would like to use the uid as the key to manage deletion. For me the trick was to collect the uid in the list named processed and then check to see if I was going to circle through them again because I was working with a uid that had already been processed.
I have a ROUTER whose purpose is to accumulate image data from multiple DEALER clients and perform OCR on the complete image. I found that the most efficient way of handling the OCR is through the utilization of Python's multiprocessing library; the accumulated image bytes are put into a Queue for due procession in a separate Process. However, I need to ensure that when a client experiences a timeout that the Process is properly terminated and doesn't meaninglessly linger and hog resources.
In my current solution I insert each newly-connected client into a dict where the value is my ClientHandler class that possesses all image data and spawns a Thread that sets a boolean variable named "timeout" to True when 5 seconds have elapsed. Should a new message be received within that 5 second frame, bump is called & the timer is reset back to 0, otherwise I cleanup prior to thread termination and the reference is deleted from the dict in the main loop:
import threading
import time
import zmq
class ClientHandler(threading.Thread):
def __init__(self, socket):
self.elapsed = time.time()
self.timeout = False
self.socket = socket
super(ClientHandler, self).__init__()
def run(self):
while time.time() - self.elapsed < 5.0:
pass
self.timeout = True
# CLIENT TIMED OUT
# HANDLE TERMINATION AND CLEAN UP HERE
def bump(self):
self.elapsed = time.time()
def handle(self, id, header, data):
# HANDLE CLIENT DATA HERE
# ACCUMULATE IMAGE BYTES, ETC
self.socket.send_multipart([id, str(0)])
def server_task():
clients = dict()
context = zmq.Context.instance()
server = context.socket(zmq.ROUTER)
server.setsockopt(zmq.RCVTIMEO, 0)
server.bind("tcp://127.0.0.1:7777")
while True:
try:
id, header, data = server.recv_multipart()
client = clients.get(id)
if client == None:
client = clients[id] = ClientHandler(server)
client.start()
client.bump()
client.handle(id, header, data)
except zmq.Again:
for id in clients.keys():
if clients[id].timeout:
del clients[id]
context.term()
if __name__ == "__main__":
server_task()
But this entire method just doesn't feel right. Am I going about this improperly? If so, I would greatly appreciate if someone could point me in the right direction.
Figured it out myself, hoping it may be of assistance to others.
I instead have a ROUTER on an assigned port that distributes unique ports to each client, which thereafter connects to the newly-bound socket on said unique port. When a client disconnects, the port is recycled for reassignment.
import sys
import zmq
from multiprocessing import Process, Queue, Value
def server_task():
context = zmq.Context.instance()
server = context.socket(zmq.ROUTER)
server.bind("tcp://127.0.0.1:7777")
timeout_queue = Queue()
port_list = [ 1 ]
proc_list = [ ]
while True:
try:
id = server.recv_multipart()[0]
# Get an unused port from the list
# Ports from clients that have timed out are recycled here
while not timeout_queue.empty():
port_list.append(timeout_queue.get())
port = port_list.pop()
if len(port_list) == 0:
port_list.append(port + 1)
# Spawn a new worker task, binding the port to a socket
proc_running = Value("b", True)
proc_list.append(proc_running)
Process(target=worker_task, args=(proc_running, port, timeout_queue)).start()
# Send the new port to the client
server.send_multipart([id, str(7777 + port)])
except KeyboardInterrupt:
break
# Safely allow our worker processes to terminate
for proc_running in proc_list:
proc_running.value = False
context.term()
def worker_task(proc_running, port, timeout_queue):
context = zmq.Context.instance()
worker = context.socket(zmq.ROUTER)
worker.setsockopt(zmq.RCVTIMEO, 5000)
worker.bind("tcp://127.0.0.1:%d" % (7777 + port, ))
while proc_running.value:
try:
id, data = worker.recv_multipart()
worker.send_multipart([id, data])
except zmq.Again:
timeout_queue.put(port)
context.term()
break
print("Client on port %d disconnected" % (7777 + port, ))
I'm trying to modify a zeromq example for processing background task and get it working. In particular, I have a xpub/xsub sockets setup, and a client would subscribe to the publisher to receive progress updates and results from the worker.
worker_server.py
proxy = zmq.devices.ThreadDevice(zmq.QUEUE, zmq.XSUB, zmq.XPUB)
proxy.bind_in('tcp://127.0.0.1:5002')
proxy.bind_out('tcp://127.0.0.1:5003')
proxy.start()
client.py
ctx = zmq.Context()
socket = server.create_socket(ctx, 'sub')
socket.setsockopt(zmq.SUBSCRIBE, '')
poller = zmq.Poller()
print 'polling'
poller.register(socket, zmq.POLLIN)
ready = dict(poller.poll())
print 'polling done'
if ready and ready.has_key(socket):
job_id, code, result = socket.recv_multipart()
return {'status': code, 'data': result}
So far, the code works for small messages, however when the worker tries to publish the task results which is large, 35393030 bytes, client does not receive the message and code hangs at ready = dict(poller.poll()) Now, I just started learning to use zmq, but isn't send_multipart supposed to chunk the messages? what is causing the client to not receive results
worker.py
def worker(logger_name, method, **task_kwargs):
job_id = os.getpid()
ctx = zmq.Context()
socket = create_socket(ctx, 'pub')
time.sleep(1)
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
sh = WSLoggingHandler(socket, job_id)
fh = logging.FileHandler(filename=os.path.join(tmp_folder, 'classifier.log.txt'), encoding='utf-8')
logger.addHandler(ch)
logger.addHandler(sh)
logger.addHandler(fh)
modules_arr = method.split('.')
m = __import__(".".join(modules_arr[:-1]), globals(), locals(), -1)
fn = getattr(m, modules_arr[-1])
try:
results = fn(**task_kwargs)
print 'size of data file %s' %len(results)
data = [
str(job_id),
SUCCESS_CODE,
results
]
tracker = socket.send_multipart(data)
print 'sent!!!'
except Exception, e:
print traceback.format_exc()
socket.send_multipart((
str(job_id),
ERROR_CODE,
str(e)
))
finally:
socket.close()
EDIT:
Tried manually splitting up the results into smaller chunks but haven had success.
results = fn(**task_kwargs)
print 'size of data file %s' %len(results)
data = [
str(job_id),
SUCCESS_CODE,
] + [results[i: i + 20] for i in xrange(0, len(results), 20)]
print 'list size %s' %len(data)
tracker = socket.send_multipart(data)
print 'sent!!!'
From the pyzmq documentation:
https://zeromq.github.io/pyzmq/api/zmq.html#zmq.Socket.send_multipart
msg_parts : iterable
A sequence of objects to send as a multipart message. Each element can be any sendable object (Frame, bytes, buffer-providers)
The message doesn't get chunked automatically, each element in the iterable you pass in is the chunk. So the way you have it set up, all of your result data will be one chunk. You'll need to use an iterator that chunks your results into an appropriate size.
I have an IRC client that receives messages on a socket.
From this client I have created several bots that connect to other peoples chat channels on twitch. (These are authorized and not Spam bots!).
Each bot is created in a separate thread that takes the channel name along with a few other parameters.
My issue is my IRC socket can only bind to one port and this handles all the IRC messages, each message has a #channel string as the third word-string that directs it to a particular channel. These messages can be handled inside each bot as each one knows the name of its channel.
My problem is; How do I send the string received over the socket to multiple threads?
import time
import socket
import threading
import string
import sys
import os
class IRCBetBot:
#irc ref
irc = None
def __init__(self,IRCRef,playerName,channelName,currencyName):
#assign variables
self.irc = IRCRef
self.channel = '#' + channelName
self.irc.send(('JOIN ' + self.channel + '\r\n') .encode("utf8"))
#create readbuffer to hold strings from IRC
readbuffer = ""
# This is the main loop
while 1:
##readbuffer## <- need to send message from IRC to this variable
for line in temp:
line=str.rstrip(line)
line=str.split(line)
if (len(line) >= 4) and ("PRIVMSG" == line[1]) and (self.channel == line[2]) and not ("jtv" in line[0]):
#call function to handle user message
if(line[0]=="PING"):
self.irc.send(("PONG %s\r\n" % line[0]).encode("utf8"))
def runAsThread(ircref,userName, channelName, currencyPrefix):
print("Got to runAsThread with : " + str(userName) + " " + str(channelName) + " " + str(currencyPrefix))
IRCBetBot(ircref,userName,channelName,currencyPrefix)
# Here we create the IRC connection
#IRC connection variables
nick = 'mybot' #alter this value with the username used to connect to IRC eg: "username".
password = "oauth:mykey" #alter this value with the password used to connect to IRC from the username above.
server = 'irc.twitch.tv'
port = 6667
#create IRC socket
irc = socket.socket()
irc.connect((server, port))
#sends variables for connection to twitch chat
irc.send(('PASS ' + password + '\r\n').encode("utf8"))
irc.send(('USER ' + nick + '\r\n').encode("utf8"))
irc.send(('NICK ' + nick + '\r\n').encode("utf8"))
# Array to hold all the new threads
threads = []
# authorised Channels loaded from file in real program
authorisedChannels = [["user1","#channel1","coin1"],["user2","#channel2","coin2"],["user3","#channel3","coin3"]]
for item in authorisedChannels:
try:
userName = item[0]
channelName = item[1]
currencyPrefix = item [2]
myTuple = (irc,userName,channelName,currencyPrefix)
thread = threading.Thread(target=runAsThread,args = myTuple,)
thread.start()
threads.append(thread)
time.sleep(5) # wait to avoid too many connections to IRC at once from same IP
except Exception as e:
print("An error occurred while creating threads.")
print(str(e))
#create readbuffer to hold strings from IRC
readbuffer = ""
# This is the main loop
while 1:
readbuffer= readbuffer+self.irc.recv(1024).decode("utf-8")
temp=str.split(readbuffer, "\n")
readbuffer=temp.pop( )
#
#Need to send readbuffer to each IRCBetBot() created in runAsThread that contains a while 1: loop to listen for strings in its __init__() method.
#
print ("Waiting...")
for thread in threads:
thread.join()
print ("Complete.")
I need to somehow get the readbuffer from the main loop into each IRCBetBot object created in separate threads? Any ideas?
Here's an example that shows how you can do this using a queue for each thread. Instead of just creating a list of threads, we create a dict of threads with the channel as the key, and store both the thread object and a queue that can be used to talk to the thread in the dict.
#!/usr/bin/python3
import threading
from queue import Queue
class IRCBetBot(threading.Thread):
def __init__(self, q, playerName, channelName, currencyName):
super().__init__()
self.channel = channelName
self.playerName = playerName
self.currencyName = currencyName
self.queue = q
def run(self):
readbuffer = ""
while 1:
readbuffer = self.queue.get() # This will block until a message is sent to the queue.
print("{} got msg {}".format(self.channel, readbuffer))
if __name__ == "__main__":
authorisedChannels = [["user1","#channel1","coin1"],
["user2","#channel2","coin2"],
["user3","#channel3","coin3"]]
threads = {}
for item in authorisedChannels:
try:
userName = item[0]
channelName = item[1]
currencyPrefix = item [2]
myTuple = (userName,channelName,currencyPrefix)
q = Queue()
thread = IRCBetBot(q, *myTuple )
thread.start()
threads[channelName] = (q, thread)
except Exception as e:
print("An error occurred while creating threads.")
print(str(e))
while 1:
a = input("Input your message (channel: msg): ")
channel, msg = a.split(":")
threads[channel][0].put(msg) # Sends a message using the queue object
As you can see, when messages come into the socket, we parse the channel out (which your code already does) and then just pass the message on to the appropriate queue in our thread dict.
Sample output (slightly tweaked so the output isn't scrambled due to the concurrent print calls):
dan#dantop:~$ ./test.py
Input your message (channel: msg): #channel1: hi there
#channel1 got msg hi there
Input your message (channel: msg): #channel2: another one
#channel2 got msg another one
Well one way to do it would be to have an array of readBuffers similar to the array of threads. And then each thread basically is waiting on data on it's particular readbuffer.
When you get in data you can pass it to the thread you're interested in or just copy the data over to all the readbuffers and let the threads process it if they are interested. An observer pattern would work best in this case.