I have a function that generates a random reaction time and waits before printing out to the console.
Here is my code
import time
import random
def saySnap(player):
reactionTime = random.randint(120, 401) / 1000
time.sleep(reactionTime)
print("{} : Snap!".format(player))
saySnap("p1")
saySnap("p2")
This results in 'p1' always being first since time.sleep blocks the program. How can I make sure that either player can print first?
You can use threading:
import time
import random
import threading
def saySnap(player):
reactionTime = random.randint(120, 401) / 1000
time.sleep(reactionTime)
print(f"{player}: Snapped in {reactionTime}!")
p1_thread = threading.Thread(target=saySnap, args=("p1",))
p2_thread = threading.Thread(target=saySnap, args=("p2",))
p1_thread.start()
p2_thread.start()
Which gives results randomly on your reaction times you set above.
You can use the threading.Timer class from built in threading module which represents an action that should be run only after a certain amount of time has passed.
Use:
import threading
def printSnap(player): # action that will be performed after reactionTime has been elapsed
print("{} : Snap!".format(player))
def saySnap(player):
reactionTime = random.randint(120, 401) / 1000
# instantiate and start the timer.
threading.Timer(reactionTime, printSnap, args=(player,)).start()
Or, if you don't want to define another function printSnap, Use:
def saySnap(player):
reactionTime = random.randint(120, 401) / 1000
threading.Timer(
reactionTime, lambda p: print(f"{p} : Snap!"), args=(player,)).start()
Calling the function:
saySnap("p1")
saySnap("p2")
Related
This is my take for async based on this
How to use AsyncHTTPProvider in web3py?
article. However, upon running this code it executes like a
synchronous function.
For web3.js, there is a support for batch request
https://dapp-world.com/smartbook/web3-batch-request-Eku8 . However,
web3.py does not have any.
I am using Ethereum Alchemy API which supports about 19 API calls per
second.
I have about 1000 Ethereum Addresses
How do I modify the code
such that I am able to batch 19 addresses per second?
from web3 import Web3
from web3.eth import AsyncEth
import time
import pandas as pd
import aiohttp
import asyncio
alchemy_url = "https://eth-mainnet.g.alchemy.com/v2/zCTn-wyjipF5DvGFVNEx_XqCKZakaB57"
w3 = Web3(Web3.AsyncHTTPProvider(alchemy_url), modules={'eth': (AsyncEth,)}, middlewares=[])
start = time.time()
df = pd.read_csv('Ethereum/ethereumaddresses.csv')
Wallet_Address=(df.loc[:,'Address'])
#Balance_storage = []
session_timeout = aiohttp.ClientTimeout(total=None)
async def get_balances():
for address in Wallet_Address:
balance = await w3.eth.get_balance(address)
print(address, balance)
asyncio.run(get_balances())
end = time.time()
total_time = end - start
print(f"It took {total_time} seconds to make {len(Wallet_Address)} API calls")
I think my idea isn't the best but you can use it as a temporary solution.
For this, you have to use ThreadpoolExecutor.
I executed a benchmark and found these results:
Without ThreadpoolExecutor, using BSC Public RPC, just running in for loop, takes more than 3 minutes to finish the process.
Click here to see the output of test 1
With ThreadpoolExecutor, BSC Public RPC, and 100ms Delay using time.sleep(0.1), finishes in less than 40 seconds as you can see in the next image. Click here to see the output of test 2
With ThreadpoolExecutor, using Quicknode, and 100ms Delay, finishes in 35 seconds. Click here to see the output of test 3
Doing simple math (1000 wallets / 19 calls per sec.) we know your process needs to take at least something close to 50 seconds. Try running at 100ms delays and if it doesn't work you can increase more delay.
One of the problems with using time.sleep is if you are using GUI or something like that which we can't pause (because GUI will freeze) during the process. (I think you can use multiprocessing to bypass this xD)
The second problem is that doing this will probably change each address's position in CSV. (You can attribute _id or something like that for each address to organize with For Loops after ends.)
Code: Working Good at BSC (Just change the RPC). This code will find all balances and store them inside self.data (defaultdict). After this, save it in new CSV file called "newBalances.csv" (You can change this)
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from web3 import Web3
import pandas as pd
import time
class multiGetBalanceExample():
def __init__(self):
self.initialtime = datetime.now() #initial time
#=== Setup Web3 ===#
self.bsc = "https://bsc-dataseed.binance.org/" #rpc (change this)
self.web3 = Web3(Web3.HTTPProvider(self.bsc)) #web3 connect
#=== Loading Csv file ===#
self.df = pd.read_csv(r"./Ethereum/ethereumaddresses.csv")
self.wallet_address=(self.df.loc[:,'Address'])
#=== Setup Temporary Address/Balance Save Defaultdict ===#
self.data = defaultdict(list)
#=== Start ===#
self.start_workers(self.data)
#=== Finish ===#
self.saveCsv() #saving in new csv file
self.finaltime = datetime.now() #end time
print(f"\nFinished! Process takes: {self.finaltime - self.initialtime}")
def start_workers(self, data, workers=10):
with ThreadPoolExecutor(max_workers=workers) as executor:
[executor.submit(self.getBalances, _data=data, _from=0, _to=101)]
[executor.submit(self.getBalances, _data=data, _from=101, _to=201)]
[executor.submit(self.getBalances, _data=data, _from=201, _to=301)]
[executor.submit(self.getBalances, _data=data, _from=301, _to=401)]
[executor.submit(self.getBalances, _data=data, _from=401, _to=501)]
[executor.submit(self.getBalances, _data=data, _from=501, _to=601)]
[executor.submit(self.getBalances, _data=data, _from=601, _to=701)]
[executor.submit(self.getBalances, _data=data, _from=701, _to=801)]
[executor.submit(self.getBalances, _data=data, _from=801, _to=901)]
[executor.submit(self.getBalances, _data=data, _from=901, _to=1000)]
return data
def getBalances(self, _data, _from, _to):
for i in range (_from, _to):
# == Getting Balances from each wallet == #
get_balance = self.web3.eth.get_balance(self.wallet_address[i])
# == Appending in self.data == #
_data["Address"].append(self.wallet_address[i])
_data["Balance"].append(get_balance)
# == Print and time.sleep(100ms) == #
print(f"Found: {self.wallet_address[i], get_balance}\n") #printing process.
time.sleep(0.1) #change this conform to your max limit (in my test 100ms takes 40 seconds to finish.)
return _data
def saveCsv(self):
#== Creating new CSV File ==#
headers = ["Address","Balance"]
new_df = pd.DataFrame(columns=headers)
new_df["Address"] = self.data["Address"]
new_df["Balance"] = self.data["Balance"]
new_df.to_csv(r"./Ethereum/newBalances.csv", index=False) #save
multiGetBalanceExample()
I want to execute powers sync function in a different thread and group the returned value from powers whenever I need it in my main script. I have tried the Threading module of python, but it didn't work for me.
I noticed that the async generator async for power in powers(): doesn't stop even if I put a condition to break to loop, that's why I was thinking maybe I can run this loop in different thread and get the returned value when I need it.
Here is my code :
import asyncio
from rtlsdr import RtlSdr
import numpy as np
async def powers():
# SDR CONFIGURATION
sdr = RtlSdr()
sdr.rs = 2.4e6
sdr.fc = 801e6
async for samples in sdr.stream(512):
samples = samples - np.mean(samples)
calibrate = 3.2
power = np.mean(np.abs(samples ** 2))
yield power
# print('Relative power:', calibrate * 10 * np.log10(power), 'dB')
await sdr.stop()
sdr.close()
async def main():
nb_config = 1000
fpower = -100
i = 0
while i < nb_config:
async for power in powers():
print(i)
print(10 * np.log10(power))
if power > fpower:
fpower = power
i+= 1 #print(10 * np.log10(fpower))
if i >= nb_config:
break
asyncio.run(main())
so I've been thinking about this for a couple days now and I cant figure it out, I've searched around but couldn't find the answer I was looking for, so any help would be greatly appreciated.
Essentially what I am trying to do is call a method on a group of objects in my main thread from a separate thread, just once after 2 seconds and then the thread can exit, I'm just using threading as a way of creating a non-blocking 2 second pause (if there are other ways of accomplishing this please let me know.
So I have a pyqtplot graph/plot that updates from a websocket stream and the gui can only be updated from the thread that starts it (the main one).
What happens is I open a websocket stream fill up a buffer for about 2 seconds, make an REST request, apply the updates from the buffer to the data from the REST request and then update the data/plot as new messages come in. Now the issue is I can't figure out how to create a non blocking 2 second pause in the main thread without creating a child thread. If I create a child thread and pass the object that contains the dictionary I want to update after 2 seconds, I get issues regarding updating the plot from a different thread. So what I THINK is happening is when that new spawned thread is spawned the reference to the object I want to update is actually the object itself, or the data (dictionary) containing the update data is now in a different thread as the gui and that causes issues.
open websocket --> start filling buffer --> wait 2 seconds --> REST request --> apply updates from buffer to REST data --> update data as new websocket updates/messages come in.
Unfortunately the websocket and gui only start when you run pg.exec() and you can't break them up to start individually, you create them and then start them together (or at least I have failed to find a way to start them individually, alternatively I also tried using a separate library to handle websockets however this requires starting a thread for incoming messages as well)
This is the minimum reproducible example, sorry it's pretty long but I couldn't really break it down anymore without removing required functionality as well as preserving context:
import json
import importlib
from requests.api import get
import functools
import time
import threading
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore
QtWebSockets = importlib.import_module(pg.Qt.QT_LIB + '.QtWebSockets')
class coin():
def __init__(self):
self.orderBook = {'bids':{}, 'asks':{}}
self.SnapShotRecieved = False
self.last_uID = 0
self.ordBookBuff = []
self.pltwgt = pg.PlotWidget()
self.pltwgt.show()
self.bidBar = pg.BarGraphItem(x=[0], height=[1], width= 1, brush=(25,25,255,125), pen=(0,0,0,0))
self.askBar = pg.BarGraphItem(x=[1], height=[1], width= 1, brush=(255,25,25,125), pen=(0,0,0,0))
self.pltwgt.addItem(self.bidBar)
self.pltwgt.addItem(self.askBar)
def updateOrderBook(self, message):
for side in ['a','b']:
bookSide = 'bids' if side == 'b' else 'asks'
for update in message[side]:
if float(update[1]) == 0:
try:
del self.orderBook[bookSide][float(update[0])]
except:
pass
else:
self.orderBook[bookSide].update({float(update[0]): float(update[1])})
while len(self.orderBook[bookSide]) > 1000:
del self.orderBook[bookSide][(min(self.orderBook['bids'], key=self.orderBook['bids'].get)) if side == 'b' else (max(self.orderBook['asks'], key=self.orderBook['asks'].get))]
if self.SnapShotRecieved == True:
self.bidBar.setOpts(x0=self.orderBook['bids'].keys(), height=self.orderBook['bids'].values(), width=1 )
self.askBar.setOpts(x0=self.orderBook['asks'].keys(), height=self.orderBook['asks'].values(), width=1 )
def getOrderBookSnapshot(self):
orderBookEncoded = get('https://api.binance.com/api/v3/depth?symbol=BTCUSDT&limit=1000')
if orderBookEncoded.ok:
rawOrderBook = orderBookEncoded.json()
orderBook = {'bids':{}, 'asks':{}}
for orders in rawOrderBook['bids']:
orderBook['bids'].update({float(orders[0]): float(orders[1])})
for orders in rawOrderBook['asks']:
orderBook['asks'].update({float(orders[0]): float(orders[1])})
last_uID = rawOrderBook['lastUpdateId']
while self.ordBookBuff[0]['u'] <= last_uID:
del self.ordBookBuff[0]
if len(self.ordBookBuff) == 0:
break
if len(self.ordBookBuff) >= 1 :
for eachUpdate in self.ordBookBuff:
self.last_uID = eachUpdate['u']
self.updateOrderBook(eachUpdate)
self.ordBookBuff = []
self.SnapShotRecieved = True
else:
print('Error retieving order book.') #RESTfull request failed
def on_text_message(message, refObj):
messaged = json.loads(message)
if refObj.SnapShotRecieved == False:
refObj.ordBookBuff.append(messaged)
else:
refObj.updateOrderBook(messaged)
def delay(myObj):
time.sleep(2)
myObj.getOrderBookSnapshot()
def main():
pg.mkQApp()
refObj = coin()
websock = QtWebSockets.QWebSocket()
websock.connected.connect(lambda : print('connected'))
websock.disconnected.connect(lambda : print('disconnected'))
websock.error.connect(lambda e : print('error', e))
websock.textMessageReceived.connect(functools.partial(on_text_message, refObj=refObj))
url = QtCore.QUrl("wss://stream.binance.com:9443/ws/btcusdt#depth#1000ms")
websock.open(url)
getorderbook = threading.Thread(target = delay, args=(refObj,), daemon=True) #, args = (lambda : websocketThreadExitFlag,)
getorderbook.start()
pg.exec()
if __name__ == "__main__":
main()
I am designing a new time/score keeper for an air hockey table using a PyBoard as a base. My plan is to use a TM1627 (4x7seg) for time display, rotary encoder w/ button to set the time, IR and a couple 7segs for scoring, IR reflector sensors for goallines, and a relay to control the fan.
I'm getting hung up trying to separate the clock into its own thread while focusing on reading the sensors. Figured I could use uasyncio to split everything up nicely, but I can't figure out where to put the directives to spin off a thread for the clock and eventually the sensors.
On execution right now, it appears the rotary encoder is assigned the default value, no timer is started, the encoder doesn't set the time, and the program returns control to REPL rather quickly.
Prior to trying to async everything, I had the rotary encoder and timer working well. Now, not so much.
from rotary_irq_pyb import RotaryIRQ
from machine import Pin
import tm1637
import utime
import uasyncio
async def countdown(cntr):
# just init min/sec to any int > 0
min = sec = 99
enableColon = True
while True:
# update the 4x7seg with the time remaining
min = abs(int((cntr - utime.time()) / 60))
sec = (cntr - utime.time()) % 60
#print(str(), str(sec), sep=':' )
enableColon = not enableColon # alternately blink the colon
tm.numbers(min, sec, colon = enableColon)
if(min + sec == 0): # once both reach zero, break
break
await uasyncio.sleep(500)
X1 = pyb.Pin.board.X1
X2 = pyb.Pin.board.X2
Y1 = pyb.Pin.board.Y1
Y2 = pyb.Pin.board.Y2
button = pyb.Pin(pyb.Pin.board.X3, pyb.Pin.IN)
r = RotaryIRQ(pin_num_clk=X1,
pin_num_dt=X2,
min_val=3,
max_val=10,
reverse=False,
range_mode=RotaryIRQ.RANGE_BOUNDED)
tm = tm1637.TM1637(clk = Y1, dio = Y2)
val_old = val_new = 0
while True:
val_new = r.value()
if(val_old != val_new):
val_old = val_new
print(str(val_new))
if(button.value()): # save value as minutes
loop = uasyncio.get_event_loop()
endTime = utime.time() + (60 * val_new)
loop.create_task(countdown(endTime))
r.close() # Turn off Rotary Encoder
break
#loop = uasyncio.get_event_loop()
#loop.create_task(countdown(et))
#loop.run_until_complete(countdown(et))
I'm sure it's something simple, but this is the first non-CLI python script I've done, so I'm sure there are a bunch of silly mistakes. Any assistance would be appreciated.
Is it possible to create a Mayavi visualization that is updated on a timed bases rather than through Trait events?
I have a visualization that I need to update continually, but the data I am updating is coming from an external source (I.E. not an event from a user input from the graphical interface).
In the mean time, I need to be running a separate thread - so the Mayavi visualization can't control the main loop.
Can this be done? And if so How??
Any help would be very greatly appreciated.
Some dummy code for how I'm trying to tackle this is below:
import numpy
from mayavi.sources.array_source import ArraySource
from pyface.api import GUI
from mayavi.modules.api import Surface
from mayavi.api import Engine
import threading
import time
# Class runs a given function on a given thread at a given scan time
class TimedThread(threading.Thread):
def __init__(self, thread, scan_time, funct, *funct_args):
threading.Thread.__init__(self)
# Thread for the function to operate in
self.thread = thread
# Defines the scan time the function is to be run at
self.scan_time = scan_time
# Function to be run
self.run_function = funct
# Function arguments
self.funct_args = funct_args
def run(self):
while True:
# Locks the relevant thread
self.thread.acquire()
# Begins timer for elapsed time calculation
start_time = time.time()
# Runs the function that was passed to the thread
self.run_function(*self.funct_args)
# Wakes up relevant threads to listen for the thread release
self.thread.notify_all()
# Releases thread
self.thread.release()
# Calculates the elapsed process time & sleeps for the remainder of the scan time
end_time = time.time()
elapsed_time = end_time - start_time
sleep_time = self.scan_time - elapsed_time
if sleep_time > 0:
time.sleep(sleep_time)
else:
print 'Process time exceeds scan time'
# Function to update the visualisation
def update_visualisation(source):
print("Updating Visualization...")
# Pretend the data is being updated externally
x = numpy.array([0, numpy.random.rand()])
y = z = x
data = [x, y, z]
source.scalar_data = data
GUI.invoke_later(source.update)
# Function to run the visualisation
def run_main():
print 'Running Main Controller'
if __name__ == '__main__':
c = threading.Condition()
# Create a new Engine for Mayavi and start it
engine = Engine()
engine.start()
# Create a new Scene
engine.new_scene()
# Create the data
x = numpy.linspace(0, 10, 2)
y = z = x
data = [x, y, z]
# Create a new Source, map the data to the source and add it to the Engine
src = ArraySource()
src.scalar_data = data
engine.add_source(src)
# Create a Module
surf = Surface()
# Add the Module to the Engine
engine.add_module(surf)
# Create timed thread classes
visualisation_thread = TimedThread(c, 2.0, update_visualisation, src)
main_thread = TimedThread(c, 1.0, run_main)
# Start & join the threads
main_thread.start()
visualisation_thread.start()
main_thread.join()
visualisation_thread.join()
Found solution in the following link:
Animating a mayavi points3d plot
Solved by using the #mlab.animator to call the update function and using the yield command to release the animation to allow for user manipulation.
Solution below:
import numpy as np
import threading
import time
from mayavi import mlab
from mayavi.api import Engine
# Class runs a given function on a given thread at a given scan time
class SafeTimedThread(threading.Thread):
def __init__(self, thread_condition, scan_time, funct, *funct_args):
threading.Thread.__init__(self)
# Thread condition for the function to operate with
self.tc = thread_condition
# Defines the scan time the function is to be run at
self.scan_time = scan_time
# Function to be run
self.run_function = funct
# Function arguments
self.funct_args = funct_args
def run(self):
while True:
# Locks the relevant thread
self.tc.acquire()
# Begins timer for elapsed time calculation
start_time = time.time()
# Runs the function that was passed to the thread
self.run_function(*self.funct_args)
# Wakes up relevant threads to listen for the thread release
self.tc.notify_all()
# Releases thread
self.tc.release()
# Calculates the elapsed process time & sleep for the remainder of the scan time
end_time = time.time()
elapsed_time = end_time - start_time
sleep_time = self.scan_time - elapsed_time
if sleep_time > 0:
time.sleep(sleep_time)
else:
print 'Process time exceeds scan time'
# Function to run the main controller
def run_main():
print 'Running Main Controller'
def init_vis():
# Creates a new Engine, starts it and creates a new scene
engine = Engine()
engine.start()
engine.new_scene()
# Initialise Plot
data = np.random.random((3, 2))
x = data[0]
y = data[1]
z = data[2]
drawing = mlab.plot3d(x, y, z, np.ones_like(x))
return drawing
#mlab.animate(delay=500, ui=False)
def update_visualisation(drawing):
while True:
print ('Updating Visualisation')
# Pretend to receive data from external source
data = np.random.random((3, 2))
x = data[0]
y = data[1]
z = data[2]
drawing.mlab_source.set(x=x, y=y, z=z)
yield
if __name__ == '__main__':
# Create Condition for Safe Threading
c = threading.Condition()
# Create display window
dwg = init_vis()
# Create safe timed thread for main thread and start
main_thread = SafeTimedThread(c, 1.0, run_main).start()
# Update using mlab animator
vis_thread = update_visualisation(dwg)
mlab.show()