micro:bit Python send radio Image - python

micro:bit wireless radio BLE using Python
I want to send over radio a member of the Image collection (Image.HEART). I know how to send strings and a custom image but not a member of the Image collection.
I want receiver's message_in string to be used directly by display.show (or maybe an intermediate variable to modify). I don't want to test the received string for every possible member of the Image collection by using if/else or a dictionary.
I've tried ideas in code below but all fail. I appreciate your help.
# micro:bit radio: Send an image from Image collection
from microbit import *
import radio
radio.config(group=1)
radio.on()
while True:
if button_a.is_pressed():
radio.send(Image.HEART) # ?????
# radio.send(index(Image.HEART)) # ?????
# radio.send(str(Image.HEART)) # ?????
# radio.send('Image.HEART') # ?????
# radio.send('HEART') # ?????
message_in = radio.receive()
if message_in != None:
display.show(message_in) #show heart
# and other tries at syntax for argument

This feels rather "hacky" and brittle, and I am happy to delete it if a better method shows up, but one way that works is like this.
If you run this (link to docs):
repr(Image.HEART)
you'll get this:
"Image('09090:99999:99999:09990:00900:')"
If you look at the documentation for Image class (link to docs), you'll see you can create a new Image from that string. So, my suggestion for the moment is to do this:
# Get a string corresponding to Image.HEART
s = repr(Image.HEART)[7:-3]
... TRANSMIT ...
# Convert received string back into Image
I = Image(received)
I guess this is a slightly less brittle way of picking up digits and colons from the repr output, but it's still ugly:
s = ""
for char in repr(Image.SAD):
if char in '0123456789:': s += char

The way Mark has suggested works well and allows for any image to be sent. I've put it in a function to make it easier for me to experiment with.
from microbit import *
import radio
radio.config(group=1)
radio.on()
def tx_value(image_to_send):
return ''.join([x for x in str(image_to_send) if x in '0123456789:'])
while True:
if button_a.is_pressed() and button_b.is_pressed():
radio.send(tx_value(Image('97531:97531:97531:97531:97531')))
elif button_a.is_pressed():
radio.send(tx_value(Image.DUCK))
elif button_b.is_pressed():
radio.send(tx_value(Image.HEART))
sleep(.25)
message_in = radio.receive()
if message_in != None:
display.show(Image(message_in))
The other approach is to have a dictionary of images and just transmit the dictionary key:
from microbit import *
import radio
radio.config(group=1)
radio.on()
IMAGES = {'duck': Image.DUCK,
'heart': Image.HEART,
'fade': Image('97531:97531:97531:97531:97531')}
while True:
if button_a.is_pressed() and button_b.is_pressed():
radio.send('fade')
elif button_a.is_pressed():
radio.send('duck')
elif button_b.is_pressed():
radio.send('heart')
sleep(.25)
message_in = radio.receive()
if message_in != None:
display.show(IMAGES[message_in])
This requires for the dictionary to be defined the same on both micro:bits

Related

I'd like to change to a specific color space in ACES at a click of a button using blender

So currently I'm switching over to ACES color space in blender but facing a big hurdle when it comes to importing assets with large quantity of image textures. I have to manually change color space of every image input which becomes a bit annoying. My goal is to have a script that allows me to select image files in blender and a click of a button it will change to a specified color space for that file. I found a script that does it but globally but would like to separate it out into different UI buttons that I can just press and will change the color space. Any help would be great, Thank you
I have tried this script which gets me close. But It does it globally to all image textures.
import bpy
def change_color_space(object, color_space='sRGB'):
'''
Change the color space of all the image texture node.
object (bpy.types.Object) - The object to take material from.
color_space (enum in [‘Filmic Log’, ‘Filmic sRGB’, ‘Linear’, ‘Linear ACES’, ‘Linear ACEScg’, ‘Non-Color’, ‘Raw’, ‘sRGB’, ‘XYZ’], default ‘sRGB’) - Color space in the image file, to convert to and from when saving and loading the image.
'''
if object.material_slots:
for slot in object.material_slots:
if slot.material:
node_tree = slot.material.node_tree
for node in node_tree.nodes:
if node.type == 'TEX_IMAGE' and node.image:
node.image.colorspace_settings.name = 'Role - matte_paint'
change_color_space(bpy.context.object, 'Non-Color')
I've come up with this solution so far. It works nicely but as a normal map is plugged into the "Normal" input of the BSDF. It doesnt recognize the link to the image texture coming in so doesnt change the color space. Any Help would be great, I'm not the best with coding.
import bpy
def change_base_color_space(object, color_space='sRGB'):
'''
Change the color space of the base color image texture node.
object (bpy.types.Object) - The object to take material from.
color_space (enum in [‘Filmic Log’, ‘Filmic sRGB’, ‘Linear’, ‘Linear ACES’, ‘Linear ACEScg’, ‘Non-Color’, ‘Raw’, ‘sRGB’, ‘XYZ’], default ‘sRGB’) - Color space in the image file, to convert to and from when saving and loading the image.
'''
if object.material_slots:
for slot in object.material_slots:
if slot.material:
node_tree = slot.material.node_tree
for node in node_tree.nodes:
if node.type == 'BSDF_PRINCIPLED' and node.inputs['Base Color'].is_linked:
image_node = node.inputs['Base Color'].links[0].from_node
if image_node.image:
image_node.image.colorspace_settings.name = "Role - matte_paint"
for node in node_tree.nodes:
if node.type == 'BSDF_PRINCIPLED' and node.inputs["Specular"].is_linked:
image_node = node.inputs["Specular"].links[0].from_node
if image_node.image:
image_node.image.colorspace_settings.name = "Role - data"
for node in node_tree.nodes:
if node.type == 'BSDF_PRINCIPLED' and node.inputs["Roughness"].is_linked:
image_node = node.inputs["Roughness"].links[0].from_node
if image_node.image:
image_node.image.colorspace_settings.name = "Role - data"
if object.material_slots:
for slot in object.material_slots:
if slot.material:
node_tree = slot.material.node_tree
for node in node_tree.nodes:
if node.type == 'ShaderNodeNormalMap' and node.inputs['Color'].is_linked:
image_node = node.inputs['Color'].links[0].from_node
if image_node.image:
image_node.image.colorspace_settings.name = "Role - data"
change_base_color_space(bpy.context.object, 'Role - data')

python calling a main thread function from another thread via value rather than reference

so I've been thinking about this for a couple days now and I cant figure it out, I've searched around but couldn't find the answer I was looking for, so any help would be greatly appreciated.
Essentially what I am trying to do is call a method on a group of objects in my main thread from a separate thread, just once after 2 seconds and then the thread can exit, I'm just using threading as a way of creating a non-blocking 2 second pause (if there are other ways of accomplishing this please let me know.
So I have a pyqtplot graph/plot that updates from a websocket stream and the gui can only be updated from the thread that starts it (the main one).
What happens is I open a websocket stream fill up a buffer for about 2 seconds, make an REST request, apply the updates from the buffer to the data from the REST request and then update the data/plot as new messages come in. Now the issue is I can't figure out how to create a non blocking 2 second pause in the main thread without creating a child thread. If I create a child thread and pass the object that contains the dictionary I want to update after 2 seconds, I get issues regarding updating the plot from a different thread. So what I THINK is happening is when that new spawned thread is spawned the reference to the object I want to update is actually the object itself, or the data (dictionary) containing the update data is now in a different thread as the gui and that causes issues.
open websocket --> start filling buffer --> wait 2 seconds --> REST request --> apply updates from buffer to REST data --> update data as new websocket updates/messages come in.
Unfortunately the websocket and gui only start when you run pg.exec() and you can't break them up to start individually, you create them and then start them together (or at least I have failed to find a way to start them individually, alternatively I also tried using a separate library to handle websockets however this requires starting a thread for incoming messages as well)
This is the minimum reproducible example, sorry it's pretty long but I couldn't really break it down anymore without removing required functionality as well as preserving context:
import json
import importlib
from requests.api import get
import functools
import time
import threading
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore
QtWebSockets = importlib.import_module(pg.Qt.QT_LIB + '.QtWebSockets')
class coin():
def __init__(self):
self.orderBook = {'bids':{}, 'asks':{}}
self.SnapShotRecieved = False
self.last_uID = 0
self.ordBookBuff = []
self.pltwgt = pg.PlotWidget()
self.pltwgt.show()
self.bidBar = pg.BarGraphItem(x=[0], height=[1], width= 1, brush=(25,25,255,125), pen=(0,0,0,0))
self.askBar = pg.BarGraphItem(x=[1], height=[1], width= 1, brush=(255,25,25,125), pen=(0,0,0,0))
self.pltwgt.addItem(self.bidBar)
self.pltwgt.addItem(self.askBar)
def updateOrderBook(self, message):
for side in ['a','b']:
bookSide = 'bids' if side == 'b' else 'asks'
for update in message[side]:
if float(update[1]) == 0:
try:
del self.orderBook[bookSide][float(update[0])]
except:
pass
else:
self.orderBook[bookSide].update({float(update[0]): float(update[1])})
while len(self.orderBook[bookSide]) > 1000:
del self.orderBook[bookSide][(min(self.orderBook['bids'], key=self.orderBook['bids'].get)) if side == 'b' else (max(self.orderBook['asks'], key=self.orderBook['asks'].get))]
if self.SnapShotRecieved == True:
self.bidBar.setOpts(x0=self.orderBook['bids'].keys(), height=self.orderBook['bids'].values(), width=1 )
self.askBar.setOpts(x0=self.orderBook['asks'].keys(), height=self.orderBook['asks'].values(), width=1 )
def getOrderBookSnapshot(self):
orderBookEncoded = get('https://api.binance.com/api/v3/depth?symbol=BTCUSDT&limit=1000')
if orderBookEncoded.ok:
rawOrderBook = orderBookEncoded.json()
orderBook = {'bids':{}, 'asks':{}}
for orders in rawOrderBook['bids']:
orderBook['bids'].update({float(orders[0]): float(orders[1])})
for orders in rawOrderBook['asks']:
orderBook['asks'].update({float(orders[0]): float(orders[1])})
last_uID = rawOrderBook['lastUpdateId']
while self.ordBookBuff[0]['u'] <= last_uID:
del self.ordBookBuff[0]
if len(self.ordBookBuff) == 0:
break
if len(self.ordBookBuff) >= 1 :
for eachUpdate in self.ordBookBuff:
self.last_uID = eachUpdate['u']
self.updateOrderBook(eachUpdate)
self.ordBookBuff = []
self.SnapShotRecieved = True
else:
print('Error retieving order book.') #RESTfull request failed
def on_text_message(message, refObj):
messaged = json.loads(message)
if refObj.SnapShotRecieved == False:
refObj.ordBookBuff.append(messaged)
else:
refObj.updateOrderBook(messaged)
def delay(myObj):
time.sleep(2)
myObj.getOrderBookSnapshot()
def main():
pg.mkQApp()
refObj = coin()
websock = QtWebSockets.QWebSocket()
websock.connected.connect(lambda : print('connected'))
websock.disconnected.connect(lambda : print('disconnected'))
websock.error.connect(lambda e : print('error', e))
websock.textMessageReceived.connect(functools.partial(on_text_message, refObj=refObj))
url = QtCore.QUrl("wss://stream.binance.com:9443/ws/btcusdt#depth#1000ms")
websock.open(url)
getorderbook = threading.Thread(target = delay, args=(refObj,), daemon=True) #, args = (lambda : websocketThreadExitFlag,)
getorderbook.start()
pg.exec()
if __name__ == "__main__":
main()

PIL not writing text a second time onto image

I'm trying to make a somewhat ambitious hangman game for a discord bot, and for that I need PIL to write in text. After writing some text to an image, and then trying to write text again to that same image, instead of sending the image with the second text added, it only sends the image with the first text. The weird thing is, is that the function goes through, it saves it as a new file with a different name, but no text (the second set, that is). What gives? What am I doing wrong here?
import random, discord
from requests import get as reGet
from io import BytesIO
from PIL import Image, ImageDraw, ImageFont
# Just a random image I found off google, not actually using it
inp_shell = reGet("https://upload.wikimedia.org/wikipedia/commons/thumb/a/a0/Circle_-_black_simple.svg/1200px-Circle_-_black_simple.svg.png")
# Yes, I know, probably not the best place to put the font, I'll change it later
fnt = ImageFont.truetype("modules/Roboto-Regular.ttf", size= 40)
# Opens a list of words that it can get from; The file here is just a stub
with open('words_alpha.txt') as words_file:
word_list = words_file.read().splitlines()
class img():
def __init__(self):
self.open_shell = Image.open(BytesIO(inp_shell.content))
# Text in the top left
async def tLeft(self, txt, inp_img):
image = ImageDraw.Draw(inp_img)
image.text((10,10), txt, font=fnt, fill=(0, 0, 0))
self.open_shell.save("tLeft.png")
async def main_text(self, txt, inp_img):
image = ImageDraw.Draw(inp_img)
# Used to position the text in the center but currently is not being used
x, y = inp_img.size
pos = x/2
# I've tried changing the fill and position, and still nothing.
# This is probably the source of the problem
image.text((20,20), txt, font=fnt, fill=(255, 255, 255))
print(txt)
self.open_shell.save("mainText.png")
# Creates a dictionary with the length of the words assigned as they keys,
# I think anyways, I didn't write this
by_length = {}
for word in word_list:
by_length.setdefault(len(word), []).append(word)
# Retrieves a random word with a certain length
async def word_finder(wordlength):
global word
word = random.choice(by_length[wordlength])
print(word)
# Main function
async def hanggMan(message): #double g in hang is intentional
content = message.clean_content[11:]
print(content) # Just used to make sure it's going through
# For now I'm using a word length of 5
if content.lower() == "5":
z = img() # Calls an instance of the img class
# Puts the image in an embed; ignore t his
embed = discord.Embed(title="testtest testtesttesttest")
embed.type = "rich"
embed.colour = discord.Color.gold()
await word_finder(5) # Tells the word_finder function to find a random word with a length of 5
await z.tLeft(txt="tLeft", inp_img= z.open_shell) # Calls the tLeft function and tells it to write "tLeft"
# Calls the main_text function and tells it to write the word on top of
# "tLeft.png". There is a print statement in the function and it actually
# does print the word, so this is not likely to be the source of the problem
await z.main_text(txt=word, inp_img=Image.open("tLeft.png"))
embed.set_image(url="attachment://mainText.png")
# The interesting thing about this is that it actually does save it as "mainText.png"
# and sends the file properly, but the word is nowhere to be found
await message.channel.send(embed=embed, file=discord.File("mainText.png"))
I can't run it but when I start removing not imprtant code then I saw you use image in class img() in wrong way.
You always save image which you have in self.open_shell
In first function you send the same image as argument
z.tLeft(txt="tLeft", inp_img=z.open_shell)
so add text to z.open_shell and you save z.open_shell
But in function you send different image as argument
z.main_text(txt=word, inp_img=Image.open("tLeft.png"))
so you add text to new image but you save again z.open_shell which has older version.
You need
self.open_shell = inp_img
Like this
def main_text(self, txt, inp_img):
self.open_shell = inp_img
image = ImageDraw.Draw(inp_img)
image.text((20,20), txt, font=fnt, fill=(255, 255, 255))
self.open_shell.save("mainText.png")

Raspberry Pi - Python & Flask web control with Adafruit DotStar LEDS

apologies if this isn't the right place to ask, but I did some searching and couldn't find much to point me in the right direction. I wasn't quite sure what to search for. I am a novice with python and programming in general, but usually can do enough googling and stealing other code snippets to get my projects running. However I'm at a bit of a roadblock here.
I need to control an Adafruit DotStar lightstrip with a flask web browser app. I've been able to get the flask app working, I've done a simple proof of concept with turning an LED on and off etc., and I can start my lightstrip script but the code I'm trying to run for the lightstrip needs to loop continuously and still be able to change "modes". I have several different images that display on the light strip and I would like to be able to select which one(s) is/are playing, but for now mainly I would just like to be able to start and stop a "shuffle all" mode. If I run the module in a while loop it just loops forever and I can't change the argument to a different "mode". I built a simple script based on Adafruit's DotStar library (specifically the image persistence of vision script, I'm just using PNG images as the map for the different lightstrip "shows").
It all currently works except it only runs each mode once obviously. I had it all in a while loop and it just looped the first selected mode forever and I was unable to turn it off or switch modes. I also thought maybe I should use multiprocessing, and I looked into getting that working, but I couldn't figure out how to stop a process once it started.
Here is the light strip script:
(the 'off' mode is just a black image. I'm sure theres a cleaner way to do this but I'm not sure on how to do that either)
import Image
from dotstar import Adafruit_DotStar
import random
def lightstrip(mode):
loopLength = 120 #loop length in pixels
fade = "/home/pi/lightshow/images/fade.png"
sparkle = "/home/pi/lightshow/images/sparkle.png"
steeplechase = "/home/pi/lightshow/images/steeplechase.png"
bump = "/home/pi/lightshow/images/bump.png"
spaz = "/home/pi/lightshow/images/spaz.png"
sine = "/home/pi/lightshow/images/sine.png"
bounce = "/home/pi/lightshow/images/bounce.png"
off = "/home/pi/lightshow/images/null.png"
numpixels = 30
datapin = 23
clockpin = 24
strip = Adafruit_DotStar(numpixels, 100000)
rOffset = 3
gOffset = 2
bOffset = 1
strip.begin()
if mode == 1:
options = [fade, sparkle, steeplechase, bump, spaz, sine, bounce]
print "Shuffling All..."
if mode == 2:
options = [bump, spaz, sine, bounce]
print "Shuffling Dance..."
if mode == 3:
options = [fade, sparkle, steeplechase]
print "Shuffling Chill..."
if mode == 0:
choice = off
print "Lightstrip off..."
if mode != 0:
choice = random.choice(options)
print "Loading..."
img = Image.open(choice).convert("RGB")
pixels = img.load()
width = img.size[0]
height = img.size[1]
print "%dx%d pixels" % img.size
# Calculate gamma correction table, makes mid-range colors look 'right':
gamma = bytearray(256)
for i in range(256):
gamma[i] = int(pow(float(i) / 255.0, 2.7) * 255.0 + 0.5)
# Allocate list of bytearrays, one for each column of image.
# Each pixel REQUIRES 4 bytes (0xFF, B, G, R).
print "Allocating..."
column = [0 for x in range(width)]
for x in range(width):
column[x] = bytearray(height * 4)
# Convert entire RGB image into column-wise BGR bytearray list.
# The image-paint.py example proceeds in R/G/B order because it's counting
# on the library to do any necessary conversion. Because we're preparing
# data directly for the strip, it's necessary to work in its native order.
print "Converting..."
for x in range(width): # For each column of image...
for y in range(height): # For each pixel in column...
value = pixels[x, y] # Read pixel in image
y4 = y * 4 # Position in raw buffer
column[x][y4] = 0xFF # Pixel start marker
column[x][y4 + rOffset] = gamma[value[0]] # Gamma-corrected R
column[x][y4 + gOffset] = gamma[value[1]] # Gamma-corrected G
column[x][y4 + bOffset] = gamma[value[2]] # Gamma-corrected B
print "Displaying..."
count = loopLength
while (count > 0):
for x in range(width): # For each column of image...
strip.show(column[x]) # Write raw data to strip
count = count - 1
And the main.py script for running the web app:
from flask import *
from lightshow import *
from multiprocessing import Process
import RPi.GPIO as GPIO
import Image
from dotstar import Adafruit_DotStar
import random
import time
app = Flask(__name__)
#app.route("/")
def hello():
return render_template('index.html')
#app.route("/lightstrip/1", methods=['POST'])
def shuffleall():
lightstrip(1)
return ('', 204)
#app.route("/lightstrip/2", methods=['POST'])
def shuffledance():
lightstrip(2)
return ('', 204)
#app.route("/lightstrip/3", methods=['POST'])
def shufflechill():
lightstrip(3)
return ('', 204)
#app.route("/lightstrip/0", methods=['POST'])
def off():
lightstrip(0)
return ('', 204)
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
Again I'm at a bit of a loss here, this may be simple fix or I may be approaching it totally wrong but any and all help would be appreciated. I am a complete beginner to approaching a problem like this. Thank you
Here's an example showing how to start and stop processes using multiprocessing and psutil. In this example the task_runner kills any running processes before starting a new one.
from flask import Flask
import multiprocessing
import psutil
app = Flask(__name__)
def blink(var):
while True:
# do stuff
print(var)
def task_runner(var):
processes = psutil.Process().children()
for p in processes:
p.kill()
process = multiprocessing.Process(target=blink, args=(var,))
process.start()
#app.route("/red")
def red():
task_runner('red')
return 'red started'
#app.route("/blue")
def blue():
task_runner('blue')
return 'blue started'
if __name__ == "__main__":
app.run()
For your question, the task_runner would look something like:
def task_runner(mode):
processes = psutil.Process().children()
for p in processes:
p.kill()
process = multiprocessing.Process(target=lightstrip, args=(mode,))
process.start()

Use decodebin with adder

I'm trying to create an audio stream that has a constant audio source (in this case, audiotestsrc) to which I can occasionally add sounds from files (of various formats, that's why I'm using decodebin) through the play_file() method. I use an adder for that purpose. However, for some reason, I cannot add the second sound correctly. Not only does the program play the sound incorrectly, it also completely stops the original audiotestsrc. Here's my code so far:
import gst; import gobject; gobject.threads_init()
pipe = gst.Pipeline()
adder = gst.element_factory_make("adder", "adder")
first_sink = adder.get_request_pad('sink%d')
pipe.add(adder)
test = gst.element_factory_make("audiotestsrc", "test")
test.set_property('freq', 100)
pipe.add(test)
testsrc = test.get_pad("src")
testsrc.link(first_sink)
output = gst.element_factory_make("alsasink", "output")
pipe.add(output)
adder.link(output)
pipe.set_state(gst.STATE_PLAYING)
raw_input('Press key to play sound')
def play_file(filename):
adder_sink = adder.get_request_pad('sink%d')
audiofile = gst.element_factory_make('filesrc', 'audiofile')
audiofile.set_property('location', filename)
decoder = gst.element_factory_make('decodebin', 'decoder')
def on_new_decoded_pad(element, pad, last):
pad.link(adder_sink)
decoder.connect('new-decoded-pad', on_new_decoded_pad)
pipe.add(audiofile)
pipe.add(decoder)
audiofile.link(decoder)
pipe.set_state(gst.STATE_PAUSED)
pipe.set_state(gst.STATE_PLAYING)
play_file('sample.wav')
while True:
pass
Thanks to moch on #gstreamer, I realized that all adder sources should have the same format. I modified the above script so as to have the caps "audio/x-raw-int, endianness=(int)1234, channels=(int)1, width=(int)16, depth=(int)16, signed=(boolean)true, rate=(int)11025" (example) go before every input in the adder.

Categories

Resources