Run bleak (python library) in background with asyncio - python

I want to use the bleak library in Python to receive data from a Bluetooth Low Energy device. This part is working. My problem is now, that I don't know how to run this code in the background or parallel.
Eventually, I want to build a tiny python app which is processing the data from the Bluetooth device. So bleak is looping all the time fetching data from a bluetooth device and sending it to the main process where it is processed and displayed.
For some reason, bleak does not run in a thread. Is it possible to use asyncio for this (since it is already used by bleak maybe a good way to go)?
I checked out threads and multiprocessing but somehow I found only examples without processes which loop infinitely and send data. I'm totally new to the topic of parallelization and/or asynchronous processes. Maybe one of you can give a hint where to look for a proper solution for this case.
Below is my code so far (for now I just loop and print data).
from bleak import BleakClient
import json
import time
current_index = 0
time_array = [0] * 20
def TicTocGenerator():
# Generator that returns time differences
ti = 0 # initial time
tf = time.time() # final time
while True:
ti = tf
tf = time.time()
yield tf-ti # returns the time difference
TicToc = TicTocGenerator() # create an instance of the TicTocGen generator
# This will be the main function through which we define both tic() and toc()
def toc(tempBool=True):
# Prints the time difference yielded by generator instance TicToc
tempTimeInterval = next(TicToc)
global current_index
if tempBool:
#print( "Elapsed time: %f seconds.\n" %tempTimeInterval )
time_array[current_index] = tempTimeInterval
if current_index == 19:
current_index = 0
else:
current_index += 1
def tic():
# Records a time in TicToc, marks the beginning of a time interval
toc(False)
def Average(lst):
return sum(lst) / len(lst)
#address = "30:ae:a4:5d:bc:ba"
address = "CCA9907B-10EA-411E-9816-A5E247DCA0C7"
MODEL_NBR_UUID = "beb5483e-36e1-4688-b7f5-ea07361b26a8"
async def run(address, loop):
async with BleakClient(address, loop=loop) as client:
while True:
tic()
model_number = await client.read_gatt_char(MODEL_NBR_UUID)
toc()
json_payload=json.loads(model_number)
print()
print(json_payload)
print("Temp [°C]: "+"{:.2f}".format(json_payload["Temp"]))
print("Volt [V]: "+"{:.2f}".format(json_payload["Volt"]))
print("AngX: "+str(json_payload["AngX"]))
print("AngY: "+str(json_payload["AngY"]))
print("AngZ: "+str(json_payload["AngZ"]))
#print("Millis: {0}".format("".join(map(chr, model_number))))
print("Average [ms]: {:.1f}".format(Average(time_array)*1000))
loop = asyncio.get_event_loop()
loop.run_until_complete(run(address, loop))

I had to make GUI for app that automates FUOTA on multiple BLE devices so my solution was to put bleak loop in separate thread in order to be able to use tkinter mainloop in main thread. You need to use asyncio.run_coroutine_threadsafe to schedule a new task from main thread.
from threading import Thread
import tkinter as tk
from Bleak import BleakScanner
async def scan():
device = await BleakScanner.discover()
for device in devices:
print(device)
def startScan():
# call startScan() from main thread
asyncio.run_coroutine_threadsafe(scan(), loop)
if __name__ == "__main__":
window = tk.Tk()
# ...
loop = asyncio.get_event_loop()
def bleak_thread(loop):
asyncio.set_event_loop(loop)
loop.run_forever()
t = Thread(target=bleak_thread, args=(loop,))
t.start()
window.mainloop()
loop.call_soon_threadsafe(loop.stop)

Related

Python Bleak scan for advertisements and exit event loop

I've inherited some code that utilizes Python Bleak to scan for advertisements emitted from a certain device. Whenever an advertisement from the Bluetooth mac address and service id we're looking for is detected and a certain condition from the extracted payload information is true, we want to terminate and return. In the attached code, I've masked the Bluetooth and service ID:s.
Not being too familiar with the event loop, is there a way to exit before the timer runs out? I suppose there's probably a better way to approach this problem.
Sample code:
import asyncio
import struct
from bleak import BleakScanner
timeout_seconds = 10
address_to_look_for = 'masked'
service_id_to_look_for = 'masked'
def detection_callback(device, advertisement_data):
if device.address == address_to_look_for:
byte_data = advertisement_data.service_data.get(service_id_to_look_for)
num_to_test = struct.unpack_from('<I', byte_data, 0)
if num_to_test == 1:
print('here we want to terminate')
async def run():
scanner = BleakScanner()
scanner.register_detection_callback(detection_callback)
await scanner.start()
await asyncio.sleep(timeout_seconds)
await scanner.stop()
if __name__=='__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
I'm sure there are many ways this can be done. A small mod to your code would be rather than having the asyncio.sleep for the full period before you stop the scan, you could could have a while loop that ends on time elapsed or device found event.
For example:
import asyncio
import struct
from bleak import BleakScanner
timeout_seconds = 20
address_to_look_for = 'F1:D9:3B:39:4D:A2'
service_id_to_look_for = '0000feaa-0000-1000-8000-00805f9b34fb'
class MyScanner:
def __init__(self):
self._scanner = BleakScanner()
self._scanner.register_detection_callback(self.detection_callback)
self.scanning = asyncio.Event()
def detection_callback(self, device, advertisement_data):
# Looking for:
# AdvertisementData(service_data={
# '0000feaa-0000-1000-8000-00805f9b34fb': b'\x00\xf6\x00\x00\x00Jupiter\x00\x00\x00\x00\x00\x0b'},
# service_uuids=['0000feaa-0000-1000-8000-00805f9b34fb'])
if device.address == address_to_look_for:
byte_data = advertisement_data.service_data.get(service_id_to_look_for)
num_to_test, = struct.unpack_from('<I', byte_data, 0)
if num_to_test == 62976:
print('\t\tDevice found so we terminate')
self.scanning.clear()
async def run(self):
await self._scanner.start()
self.scanning.set()
end_time = loop.time() + timeout_seconds
while self.scanning.is_set():
if loop.time() > end_time:
self.scanning.clear()
print('\t\tScan has timed out so we terminate')
await asyncio.sleep(0.1)
await self._scanner.stop()
if __name__ == '__main__':
my_scanner = MyScanner()
loop = asyncio.get_event_loop()
loop.run_until_complete(my_scanner.run())

Async IO switch coroutine on multiprocessing.queue.get() not ready

I am writing some code where I have 3 processes (spawned from the main). The first one is a process that uses Async IO to create 3 coroutines and switch between them. The last two processes run independently and generate two outputs that are used in one of the coroutines of the first process.
The communication has been managed using multiprocessing.queue(), the main puts the input data inside queue_source_position_hrir_calculator and queue_source_position_cutoff_calculator, then these two queues are emptied by p2_hrir_computation_process and p3_cutoff_computation_process. These two processes outputs their computation results in two output queues queue_computed_hrirs and queue_computed_cutoff
Finally these two queues are consumed by the Async IO process, in particular inside the input_parameters_coroutine function.
The full code is the following (I will highlight the key parts in following snippets):
import asyncio
import multiprocessing
import numpy as np
import time
from classes.HRIR_interpreter_min_phase_linear_interpolation import HRIR_interpreter_min_phase_linear_interpolation
from classes.object_renderer import ObjectRenderer
#Useful resources: https://bbc.github.io/cloudfit-public-docs/asyncio/asyncio-part-2
#https://realpython.com/async-io-python/
Fs = 44100
# region Async_IO functions
async def audio_input_coroutine(overlay):
for i in range(0,100):
print('Executing audio input coroutine')
print(overlay)
await asyncio.sleep(1/(Fs*4))
async def input_parameters_coroutine(overlay, queue_computed_hrirs,queue_computed_cutoff):
for i in range(0,10):
print('Executing audio input_parameters coroutine')
#print(overlay)
current_hrir = queue_computed_hrirs.get()
print('got current hrir')
current_cutoff = queue_computed_cutoff.get()
print('got current cutoff')
await asyncio.sleep(0.5)
async def audio_output_coroutine(overlay):
for i in range(0,10):
print('Executing audio_output coroutine')
#print(overlay)
await asyncio.sleep(0.5)
async def main_coroutine(overlay, queue_computed_hrirs,queue_computed_cutoff):
await asyncio.gather(audio_input_coroutine(overlay), input_parameters_coroutine(overlay, queue_computed_hrirs,queue_computed_cutoff), audio_output_coroutine(overlay))
def async_IO_main_process(queue_computed_hrirs,queue_computed_cutoff):
overlay = 10
asyncio.run(main_coroutine(overlay, queue_computed_hrirs,queue_computed_cutoff))
# endregion
# region HRIR_computation_process
def compute_hrir(queue_source_position, queue_computed_hrirs):
print('computing hrir')
SOFA_filename = '../HRTF_data/HUTUBS_min_phase.sofa'
# loading the simulated dataset using the support class HRIRInterpreter
HRIRInterpreter = HRIR_interpreter_min_phase_linear_interpolation(SOFA_filename=SOFA_filename)
# variable to check if I have other positions in my input queue
eof_source_position = False
# Un-comment following line to return when no more messages
while not eof_source_position:
#while True:
# print('inside while loop')
time.sleep(1)
# print('state of the queue', queue_source_position.empty())
if not eof_source_position:
position = queue_source_position.get()
if position is None:
eof_source_position = True # end of messages indicator
else:
required_IR = HRIRInterpreter.get_interpolated_IR(position[0], position[1], 1)
queue_computed_hrirs.put(required_IR)
# print('printing computed HRIR:', required_IR)
print('completed hrir computation, adding none to queue')
queue_computed_hrirs.put(None) # end of messages indicator
print('completed hrir process')
# endregion
# region cutoff_computation_process
def compute_cutoff(queue_source_position, queue_computed_cutoff):
print('computing cutoff')
cutoff = 20000
object_renderer = ObjectRenderer()
object_positions = np.array([(20, 0), (40, 0), (100, 0), (225, 0)])
eof_source_position = False
# Un-comment following line to return when no more messages
while not eof_source_position:
#while True:
time.sleep(1)
object_renderer.update_object_position(object_positions)
if not eof_source_position:
print('inside source position update')
source_position = queue_source_position.get()
if source_position is None: # end of messages indicator
eof_source_position = True
else:
cutoff = object_renderer.get_cutoff(azimuth=source_position[0], elevation=source_position[1])
queue_computed_cutoff.put(cutoff)
queue_computed_cutoff.put(None) # end of messages indicator
# endregion
if __name__ == "__main__":
import time
queue_source_position_hrir_calculator = multiprocessing.Queue()
queue_source_position_cutoff_calculator = multiprocessing.Queue()
queue_computed_hrirs = multiprocessing.Queue()
queue_computed_cutoff = multiprocessing.Queue()
i = 0.0
#Basically here I am writing a sequence of positions into the queue
#then I add a None value to detect when I am done with the simulation so the process can end
for _ in range(10):
# print('into main while-> source_position:', source_position[0])
source_position = np.array([i, 0.0])
queue_source_position_hrir_calculator.put(source_position)
queue_source_position_cutoff_calculator.put(source_position)
i += 10
queue_source_position_hrir_calculator.put(None) # "end of messages" indicator
queue_source_position_cutoff_calculator.put(None) # "end of messages" indicator
p1_async_IO_process = multiprocessing.Process(target=async_IO_main_process, args=(queue_computed_hrirs,queue_computed_cutoff)) #process that manages the ASYNC_IO coroutines between DMAs
p2_hrir_computation_process = multiprocessing.Process(target=compute_hrir, args=(queue_source_position_hrir_calculator, queue_computed_hrirs))
p3_cutoff_computation_process = multiprocessing.Process(target=compute_hrir, args=(queue_source_position_cutoff_calculator, queue_computed_cutoff))
p1_async_IO_process.start()
p2_hrir_computation_process.start()
p3_cutoff_computation_process.start()
#temp cycle to join processes
#for _ in range(2):
# current_hrir = queue_computed_hrirs.get()
# current_cutoff = queue_computed_cutoff.get()
print('joining async_IO process')
p1_async_IO_process.join()
print('joined async_IO process')
#NB: to join a process, its qeues must be empty. So before calling the join on p2, I should get the values from the queue_computed_hrirs queue
print('joining hrir computation process')
p2_hrir_computation_process.join()
print('joined hrir computation process')
print('joining hrir computation process')
p2_hrir_computation_process.join()
print('joined hrir computation process')
print('joining cutoff computation process')
p3_cutoff_computation_process.join()
print('joined cutoff computation process')
print("completed main")
The important part of the code is:
async def input_parameters_coroutine(overlay, queue_computed_hrirs,queue_computed_cutoff):
for i in range(0,10):
print('Executing audio input_parameters coroutine')
#print(overlay)
current_hrir = queue_computed_hrirs.get()
print('got current hrir')
current_cutoff = queue_computed_cutoff.get()
print('got current cutoff')
await asyncio.sleep(0.5)
This coroutine receives as input 3 variables overlay (which is a dummy variable I am using for future developments) and the two multiprocessing.Queue() classes, queue_computed_hrirs and queue_computed_cutoff.
At the moment my input_parameters_coroutine gets "stuck" while executing current_hrir = queue_computed_hrirs.get() and current_cutoff = queue_computed_cutoff.get(). I said "stuck" because the code works fine and complete its execution, the problem is that those two commands are blocking, thus my coroutine stops until it has something to get from the queue.
What I would like to achieve is: try to execute current_hrir = queue_computed_hrirs.get(), if it is not possible at that moment, switch to another coroutine and let it execute what it wants, then go back and check if it possible to execute current_hrir = queue_computed_hrirs.get(), if yes go on, if not switch again to another coroutine and let it do its job.
I saw that there are some problems in making async IO and multiprocessing communicate ( What kind of problems (if any) would there be combining asyncio with multiprocessing? , Can I somehow share an asynchronous queue with a subprocess? ) but I wasn't able to find a smart solution to my problem.

processing very large text files in parallel using multiprocessing and threading

I have found several other questions that touch on this topic but none that are quite like my situation.
I have several very large text files (3+ gigabytes in size).
I would like to process them (say 2 documents) in parallel using multiprocessing. As part of my processing (within a single process) I need to make an API call and because of this would like to have each process have it's own threads to run asynchronously.
I have came up with a simplified example ( I have commented the code to try to explain what I think it should be doing):
import multiprocessing
from threading import Thread
import threading
from queue import Queue
import time
def process_huge_file(*, file_, batch_size=250, num_threads=4):
# create APICaller instance for each process that has it's own Queue
api_call = APICaller()
batch = []
# create threads that will run asynchronously to make API calls
# I expect these to immediately block since there is nothing in the Queue (which is was
# the api_call.run depends on to make a call
threads = []
for i in range(num_threads):
thread = Thread(target=api_call.run)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
####
# start processing the file line by line
for line in file_:
# if we are at our batch size, add the batch to the api_call to to let the threads do
# their api calling
if i % batch_size == 0:
api_call.queue.put(batch)
else:
# add fake line to batch
batch.append(fake_line)
class APICaller:
def __init__(self):
# thread safe queue to feed the threads which point at instances
of these APICaller objects
self.queue = Queue()
def run(self):
print("waiting for something to do")
self.queue.get()
print("processing item in queue")
time.sleep(0.1)
print("finished processing item in queue")
if __name__ == "__main__":
# fake docs
fake_line = "this is a fake line of some text"
# two fake docs with line length == 1000
fake_docs = [[fake_line] * 1000 for i in range(2)]
####
num_processes = 2
procs = []
for idx, doc in enumerate(fake_docs):
proc = multiprocessing.Process(target=process_huge_file, kwargs=dict(file_=doc))
proc.start()
procs.append(proc)
for proc in procs:
proc.join()
As the code is now, "waiting for something to do" prints 8 times (makes sense 4 threads per process) and then it stops or "deadlocks" which is not what I expect - I expect it to start sharing time with the threads as soon as I start putting items in the Queue but the code does not appear to make it this far. I ordinarily would step through to find a hang up but I still don't have a solid understanding of how to best debug using Threads (another topic for another day).
In the meantime, can someone help me figure out why my code is not doing what it should be doing?
I have made a few adjustments and additions and the code appears to do what it is supposed to now. The main adjustments are: adding a CloseableQueue class (from Brett Slatkins Effective Python Item 55), and ensuring that I call close and join on the queue so that the threads properly exit. Full code with these changes below:
import multiprocessing
from threading import Thread
import threading
from queue import Queue
import time
from concurrency_utils import CloseableQueue
def sync_process_huge_file(*, file_, batch_size=250):
batch = []
for idx, line in enumerate(file_):
# do processing on the text
if idx % batch_size == 0:
time.sleep(0.1)
batch = []
# api_call.queue.put(batch)
else:
computation = 0
for i in range(100000):
computation += i
batch.append(line)
def process_huge_file(*, file_, batch_size=250, num_threads=4):
api_call = APICaller()
batch = []
# api call threads
threads = []
for i in range(num_threads):
thread = Thread(target=api_call.run)
threads.append(thread)
thread.start()
for idx, line in enumerate(file_):
# do processing on the text
if idx % batch_size == 0:
api_call.queue.put(batch)
else:
computation = 0
for i in range(100000):
computation += i
batch.append(line)
for _ in threads:
api_call.queue.close()
api_call.queue.join()
for thread in threads:
thread.join()
class APICaller:
def __init__(self):
self.queue = CloseableQueue()
def run(self):
for item in self.queue:
print("waiting for something to do")
pass
print("processing item in queue")
time.sleep(0.1)
print("finished processing item in queue")
print("exiting run")
if __name__ == "__main__":
# fake docs
fake_line = "this is a fake line of some text"
# two fake docs with line length == 1000
fake_docs = [[fake_line] * 10000 for i in range(2)]
####
time_s = time.time()
num_processes = 2
procs = []
for idx, doc in enumerate(fake_docs):
proc = multiprocessing.Process(target=process_huge_file, kwargs=dict(file_=doc))
proc.start()
procs.append(proc)
for proc in procs:
proc.join()
time_e = time.time()
print(f"took {time_e-time_s} ")
class CloseableQueue(Queue):
SENTINEL = object()
def __init__(self, **kwargs):
super().__init__(**kwargs)
def close(self):
self.put(self.SENTINEL)
def __iter__(self):
while True:
item = self.get()
try:
if item is self.SENTINEL:
return # exit thread
yield item
finally:
self.task_done()
As expected this is a great speedup from running synchronously - 120 seconds vs 50 seconds.

Detecting Activity with mouse, keyboard and voice on windows

We have a module on Python (through win32) to detect user mouse and keyboard activity by GetLastInputInfo and GetTickCount. How can we register Voice activity in GetLastInputInfo?
Or maybe can we add a synthesized input to update GetLastInputInfo every time the mic detects voice input? but can we do that without interrupting the user?
Sample code on Pyaudio to detect user voice by volume:
audio = pyaudio.PyAudio()
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
CHUNK = 1024
# recording prerequisites
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
while True:
data = stream.read(CHUNK)
data_chunk = array('h', data)
vol = max(data_chunk)
if vol >= 500:
# voice detected from mic
print("talking - {}".format(vol))
else:
print("-")
Sample code for detecting user input:
# code to get inactivity
class LastInputInfo(Structure):
_fields_ = [
("cbSize", UINT),
("dwTime", DWORD)
]
def _getLastInputTick() -> int:
"""
retrieves the last input action
:return: int
"""
prototype = WINFUNCTYPE(BOOL, POINTER(LastInputInfo))
paramflags = ((1, "lastinputinfo"), )
# type: ignore
c_GetLastInputInfo = prototype(("GetLastInputInfo", ctypes.windll.user32), paramflags)
l = LastInputInfo()
l.cbSize = ctypes.sizeof(LastInputInfo)
assert 0 != c_GetLastInputInfo(l)
return l.dwTime
def _getTickCount() -> int:
"""
:return: int
tick count
"""
prototype = WINFUNCTYPE(DWORD)
paramflags = ()
c_GetTickCount = prototype(("GetTickCount", ctypes.windll.kernel32), paramflags) # type: ignore
return c_GetTickCount()
def seconds_since_last_input():
"""
:return: float
the time of user input
"""
seconds_since_input = (_getTickCount() - _getLastInputTick()) / 1000
return seconds_since_input
# inactivity in N seconds
seconds_since_input = seconds_since_last_input()
inactive_seconds = 10
while True:
# Becomes active
if afk and seconds_since_input < inactive_seconds:
afk = False
#becomes afk
elif not afk and seconds_since_input >= inactive_seconds:
afk = True
print("afk status: {}, seconds since last input :{}".format(seconds_since_input))
If you want to do something, without interrupting the user, you can use multithreading with threading.
If you want to save something in a variable that every thread can use, you can use queue.
This will run whatever you need to run, in a different thread, and save on a shared variable.
import modules
import threading
import queue
Create a shared variable
shared_var = queue.Queue()
Create a function that checks what you want (in this case audio), and edits the shared variable
Edit shared variable: shared_var.put(item)
(in this case, whenever audio is detected you can say audio_detected.put(True) and/or current_tick_count.put(tick_count), or something like that`)
create a thread and pass in the function you made to check
thread = threading.Thread(target=function, args=arguments)
where target is the function you want to call in this new tread, and args are the arguments you need to pass into your function
Start the new thread
thread.start()
On main thread or a new thread, do what you want with that variable
shared_var.get() will wait until something is added to shared_var and then return what was added.
Example code:
import threading
import queue
import time
text = queue.Queue()
def change(text):
time.sleep(3)
text.put("hello world")
thread = threading.Thread(target=change, args=(text,))
# ^ IMPORTANT! (,)
thread.start()
def display(text):
text = text.get() # This will wait till text has somthing inside and then returns it
print(text)
thread2 = threading.Thread(target=display, args=(text,))
# ^ IMPORTANT! (,)
thread2.start()
input() # To show it won't interrupt the user until the text has something
I am sorry if this answer isn't so clear. I'm not familiar with pyaudio and win32, but I do know threading and queue so you can just work with this and add you're code. If you want you could edit the answer with your code in it.
I hope this helps!

How do I use concurrency in faust?

I'm working with faust and would like to leverage concurrency feature.
The example listed doesn't quite demonstrate the use of concurrency.
What I would like to do is, read from kafka producer and unnest json.
Then the shipments are sent to a process to calculate billing etc. I should send 10 shipments at one time to a function which does the calculation. For this i'm using concurrency so 10 shipments can calculate concurrently.
import faust
import time
import json
from typing import List
import asyncio
class Items(faust.Record):
name: str
billing_unit: str
billing_qty: int
class Shipments(faust.Record, serializer="json"):
shipments: List[Items]
ship_type: str
shipping_service: str
shipped_at: str
app = faust.App('ships_app', broker='kafka://localhost:9092', )
ship_topic = app.topic('test_shipments', value_type=Shipments)
#app.agent(value_type=str, concurrency=10)
async def mytask(records):
# task that does some other activity
async for record in records:
print(f'received....{record}')
time.sleep(5)
#app.agent(ship_topic)
async def process_shipments(shipments):
# async for ships in stream.take(100, within=10):
async for ships in shipments:
data = ships.items
uid = faust.uuid()
for item in data:
item_uuid = faust.uuid()
print(f'{uid}, {item_uuid}, {ships.ship_type}, {ships.shipping_service}, {ships.shipped_at}, {item.name}, {item.billing_unit}, {item.billing_qty}')
await mytask.send(value=("{} -- {}".format(uid, item_uuid)))
# time.sleep(2)
# time.sleep(10)
if __name__ == '__main__':
app.main()
Ok I figured out how it works. The problem with the example you gave was actually with the time.sleep bit, not the concurrency bit. Below are two silly examples that show how an agent would work with and without concurrency.
import faust
import asyncio
app = faust.App(
'example_app',
broker="kafka://localhost:9092",
value_serializer='raw',
)
t = app.topic('topic_1')
# #app.agent(t, concurrency=1)
# async def my_task(tasks):
# async for my_task in tasks:
# val = my_task.decode('utf-8')
# if (val == "Meher"):
# # This will print out second because there is only one thread.
# # It'll take 5ish seconds and print out right after Waldo
# print("Meher's a jerk.")
# else:
# await asyncio.sleep(5)
# # Since there's only one thread running this will effectively
# # block the agent.
# print(f"Where did {val} go?")
#app.agent(t, concurrency=2)
async def my_task2(tasks):
async for my_task in tasks:
val = my_task.decode('utf-8')
if (val == "Meher"):
# This will print out first even though the Meher message is
# received second.
print("Meher's a jerk.")
else:
await asyncio.sleep(5)
# Because this will be sleeping and there are two threads available.
print(f"Where did {val} go?")
# ===============================
# In another process run
from kafka import KafkaProducer
p = KafkaProducer()
p.send('topic_1', b'Waldo'); p.send('topic_1', b'Meher')

Categories

Resources