Accessing PyTango Attribute values - python

I'm trying to write a device server with PyTango. I created a list of Attributes for the Server. How do I access the value stored in the attributes by the set_value() function?
If I have this attribute for example, how can I retrieve the value?
x_pixel_size = attribute(label = "x pixel size", dtype=float,
display_level = DispLevel.OPERATOR,
unit = 'microns', format='5.2f',
access = AttrWriteType.READ,
doc = 'Size of a single pixel along x-axis
of the detector')
self.x_pixel_size.set_value(720)
I want to retrieve the value 720 from the Attribute x_pixel_size. Is there a way to do this without using additional variables in the server?

Of course it is possible. You can do this in this way:
from PyTango.server import run
from PyTango.server import Device, DeviceMeta
from PyTango.server import attribute, command, device_property
from PyTango import AttrQuality, AttrWriteType, DispLevel
class DeviceClass(Device):
__metaclass__ = DeviceMeta
def init_device(self):
self.x_pixel_size.set_write_value(720)
x_pixel_size = attribute(label = "x pixel size", dtype=float,
display_level = DispLevel.OPERATOR,
unit = 'microns', format='5.2f',
access = AttrWriteType.READ_WRITE,
doc = 'Size of a single pixel along x-axis of the detector')
def write_x_pixel_size(self, value):
pass
def read_x_pixel_size(self):
return self.x_pixel_size.get_write_value()
def main():
run((DeviceClass,))
if __name__ == "__main__":
main()
And you can test it using Python console:
>>> from PyTango import DeviceProxy
>>> dev = DeviceProxy('test/device/1')
>>> dev.x_pixel_size
720.0
>>> dev.x_pixel_size = 550
>>> dev.x_pixel_size
550.0
>>>
If you have any further questions, just ask. But actually I use additional variables for keeping the attribute's value for me.

Related

TF2 transform can't find an actuall existing frame

In a global planner node that I wrote, I have the following init code
#!/usr/bin/env python
import rospy
import copy
import tf2_ros
import time
import numpy as np
import math
import tf
from math import sqrt, pow
from geometry_msgs.msg import Vector3, Point
from std_msgs.msg import Int32MultiArray
from std_msgs.msg import Bool
from nav_msgs.msg import OccupancyGrid, Path
from geometry_msgs.msg import PoseStamped, PointStamped
from tf2_geometry_msgs import do_transform_point
from Queue import PriorityQueue
class GlobalPlanner():
def __init__(self):
print("init global planner")
self.tfBuffer = tf2_ros.Buffer()
self.listener = tf2_ros.TransformListener(self.tfBuffer)
self.drone_position_sub = rospy.Subscriber('uav/sensors/gps', PoseStamped, self.get_drone_position)
self.drone_position = []
self.drone_map_position = []
self.map_sub = rospy.Subscriber("/map", OccupancyGrid, self.get_map)
self.goal_sub = rospy.Subscriber("/cell_tower/position", Point, self.getTransformedGoal)
self.goal_position = []
self.goal = Point()
self.goal_map_position = []
self.occupancy_grid = OccupancyGrid()
self.map = []
self.p_path = Int32MultiArray()
self.position_pub = rospy.Publisher("/uav/input/position", Vector3, queue_size = 1)
#next_movement in
self.next_movement = Vector3
self.next_movement.z = 3
self.path_pub = rospy.Publisher('/uav/path', Int32MultiArray, queue_size=1)
self.width = rospy.get_param('global_planner_node/map_width')
self.height = rospy.get_param('global_planner_node/map_height')
#Check whether there is a path plan
self.have_plan = False
self.path = []
self.euc_distance_drone_goal = 100
self.twod_distance_drone_goal = []
self.map_distance_drone_goal = []
self.mainLoop()
And there is a call-back function call getTransformed goal, which will take the goal position in the "cell_tower" frame to the "world" frame. Which looks like this
def getTransformedGoal(self, msg):
self.goal = msg
try:
#Lookup the tower to world transform
transform = self.tfBuffer.lookup_transform('cell_tower', 'world', rospy.Time())
#transform = self.tfBuffer.lookup_transform('world','cell-tower' rospy.Time())
#Convert the goal to a PointStamped
goal_pointStamped = PointStamped()
goal_pointStamped.point.x = self.goal.x
goal_pointStamped.point.y = self.goal.y
goal_pointStamped.point.z = self.goal.z
#Use the do_transform_point function to convert the point using the transform
new_point = do_transform_point(goal_pointStamped, transform)
#Convert the point back into a vector message containing integers
transform_point = [new_point.point.x, new_point.point.y]
#Publish the vector
self.goal_position = transform_point
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e:
print(e)
print('global_planner tf2 exception, continuing')
The error message said that
"cell_tower" passed to lookupTransform argument target_frame does not exist.
I check the RQT plot for both active and all, which shows that when active, the topic /tf is not being subscribe by the node global planner. Check the following image, which is for active
enter image description here
and this image is for all the node (include non-active)
enter image description here
But I have actually set up the listner, I have another node call local planner that use the same strategy and it works for that node, but not for the global planner
I'm not sure why this is.
Try adding a timeout to your lookup_transform() function call, as your transformation may not be available when you need it:
transform = self.tfBuffer.lookup_transform('cell_tower', 'world',rospy.Time.now(), rospy.Duration(1.0))

python3 how to change the atribute .P0 with passing argument to call

I am trying to use passing a choice of pins to the raspberry py when creating channels and want to change only the .P(value) when calling the method. For if I call the class in another class I currently have to import all libraries again with the way it is now. Below is code.
import busio
import digitalio
import board
import adafruit_mcp3xxx.mcp3008 as MCP
from adafruit_mcp3xxx.analog_in import AnalogIn
def createChannel(self, channelNumber):
# create the spi bus
spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI)
# create the cs (chip select)
cs = digitalio.DigitalInOut(board.D22)
# create the mcp object
mcp = MCP.MCP3008(spi, cs)
self.channelNumber = channelNumber
chan = self.channelNumber
chan = AnalogIn(mcp, self.channelNumber)
rawValue = chan.voltage
return rawValue
Then I call it like
sensor = createChannel()
rawValue = sensor.createChannel(MCP.P0)
So when I create another class to use the sensor retrieved data and I call the function I need to import all the libraries again that works with the MCP. I want to call it like this
sensor = createChannel()
rawValue = sensor.createChannel(P0)
But I can not find a way to just change the last part 'MCP.P0') by passing a argument in the call that works.
So when I create the other class I have to do this and import all libraries again
def sensorOne(self):
# create the spi bus
spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI)
# create the cs (chip select)
cs = digitalio.DigitalInOut(board.D22)
# create the mcp object
mcp = MCP.MCP3008(spi, cs)
#get date and time
outTime = str(datetime.now())
# instance of createSpi class. so if tds sensor is connected to pin 0 you do as below. I will also do ph on pin 2 but comment it out for I am not sure if anyting is connected there yet.
sensor = createChannel()
#get data from sensor on pin 1
outData = sensor.createChannel(MCP.P1)
return outTime, outData
If spacing is not hundred persent please excuse I can not see for I am blind, but the code works just need to try and be able to change just the .P0 to for instance P1 by passing a argument to the call.
Thank you
I think you can define constants P0 to P7 in your module that defines createChannel, and then other files can import those constants from that module instead of getting them from MCP directly. Also, you can just specify a channel with an integer from 0 to 7.
I found some online documentation for adafruit_mcp3xxx.mcp3008. I think it means the channel names like MCP.P0 and MCP.P1 are really just integer values like 0 and 1, respectively.
The ADC chips’ input pins (AKA “channels”) are aliased in this library
as integer variables whose names start with “P” (eg MCP3008.P0 is
channel 0 on the MCP3008 chip). Each module that contains a driver
class for a particular ADC chip has these aliases predefined
accordingly. This is done for code readability and prevention of
erroneous SPI commands.
You can make channel names available to users of your createChannel method by defining constants P0, P1 and so forth in the module that defines createChannel.
## File createChannel.py
import adafruit_mcp3xxx.mcp3008 as MCP
# Alias the channel constants for convenience.
P0 = MCP.P0
P1 = MCP.P1
# etc.
P7 = MCP.P7
class createChannel():
def createChannel(self, channelNumber):
# ... Do stuff with channelNumber.
return channelNumber # Or whatever you need to return.
In another file that wants to use createChannel you can import the channel constants as well as the method. Alternatively, I think you can just access a channel by specifying an integer from 0 to 7.
## Another file.
from createChannel import createChannel, P0, P1, P7
sensor = createChannel()
# Access a single pin.
rawValue = sensor.createChannel(P0)
# Access each pin.
rawBus = [sensor.createChannel(pin) for pin in range(8)]

How do I use a csv data as variables to apply it for a formula?

I'm trying to use data from a csv file ( https://www.kaggle.com/jingbinxu/sample-of-car-data ). I only need the horsepower and weight columns as variables for the equation: ( 1/4 mile et = 6.290 * (weight/hp) ** .33 ), but it won't apply it. I don't know if the storage is working or I shouldn't do it as a class. When I run the program it doesn't show any errors, but it doesn't show results either. Then I got to plot the results, but I don't think it's even calculating and storing results. Any help is appreciated. Thanks in advance.
Here's the current code i have:
import numpy as np
class car_race_analysis():
def __init__(self, filename):
import numpy as np
self.data = np.genfromtxt(filename,delimiter= ',', skip_header = 1 )
def race_stats(self,w,h):
#cars in data
cars = np.unique(self.data[:,0])
#storage for output
race_times = []
#for each car
for car in cars:
#mask
mask = self.data[:,0] == car
#get data
w = self.data[mask,12]
h = self.data[mask,18]
#apply formula
qrtr_mile = 6.290 * ( w / h ) ** .33
race_times.append(qrtr_mile)
#new atribute
self.race_times = np.array(race_times)
print(race_times)
def trend_plotter(self):
import matlib.pyplot as plt
#inputs
self.race_stats
cars = np.unique(self.data[:,0])
#plot
plt.plot(cars,self.race_times)
plt.xlabel("Car")
plt.ylabel("1/4 Mile Time")
plt.savefig("trend_plot.png")
filename = 'car_data.csv'
Two problems:
I think you meant matplotlib instead of matlib. Make sure you install it pip3 install matplotlib --user and edit your code accordingly.
Your previous code wasn't working because you weren't instantiating a class or running any methods. The only "work" your program did was to define the class and then set a filename variable.
To solve #2, replace your filename=... line with the code below.
Here's what it does:
It checks to see if the file is being run directly (i.e. from command prompt such as python3 <your_file_name>.py. If this class is being imported and used from a different python file, this code would not be executed. More reading: https://www.geeksforgeeks.org/what-does-the-if-name-main-do/
We instantiate a instance of your class and supply the filename variable since that it was your class' __init__ method expects.
We invoke the trend_plotter method on the instance of the class.
if __name__ == '__main__':
filename = 'car_data.csv'
car_analysis = car_race_analysis(filename)
car_analysis.trend_plotter()
Even with those changes, your program will not work because it has other errors. I made a guess at fixing it, which I've pasted below, but I strongly encourage you do diff my changes to understand what I altered to be sure it does what you want.
import numpy as np
import matplotlib.pyplot as plt
class car_race_analysis():
race_times = []
cars = []
def __init__(self, filename):
import numpy as np
self.data = np.genfromtxt(filename, delimiter=',', skip_header=1)
def race_stats(self, w, h):
#cars in data
self.cars = np.unique(self.data[:, 0])
# storage for output
self.race_times = []
# for each car
for car in self.cars:
# mask
mask = self.data[:, 0] == car
# get data
w = self.data[mask, 12]
h = self.data[mask, 18]
# apply formula
qrtr_mile = 6.290 * (w / h) ** .33
self.race_times.append(qrtr_mile)
# new atribute
self.race_times = np.array(self.race_times)
def trend_plotter(self):
# inputs
self.race_stats(len(self.cars), len(self.race_times))
# plot
plt.plot(self.cars, self.race_times)
plt.xlabel("Car")
plt.ylabel("1/4 Mile Time")
plt.savefig("trend_plot.png")
plt.show()
if __name__ == '__main__':
filename = 'car_data.csv'
car_analysis = car_race_analysis(filename)
car_analysis.trend_plotter()

I cannot figure out how to share a session in a bottle request

I've been tinkering with this for hours now and I just can't seem to find a way to make this work. It seems like it should be simple, and I'm sure it is, but I'm stumpled.
I have one module called 'server.py' that handles all of the routing with bottle, this is the main point of execution. An example of a request handler is as such, I'm generalizing as my codebase is rather hefty and most of it is irrelevant to the question:
server.py
#route('home')
def home():
page = Page('home') # A template manager I made
objects = db.get_objects(10) # This is what I can't get to work
return page.render(objects=objects)
I would like the code to be that simple from the server side and all database interaction done in db.py using helper functions, however I would like to use the returned objects from queries which are still attached to the session and so it must be closed outside of db.get_objects. A session should be created and closed on each request. I could do that manually from home() like so:
server.py
#route('home')
def home():
session = Session()
page = Page('home') # A jinja template manager I made
objects = db.get_objects(session, 10)
document = page.render(objects=objects)
session.close()
return document
I don't mind opening and closing the session every time, that seems logical and unavoidable, whether directly or through another object/function, but I do not want to have to pass that session around (manually) to every db helper function, that just seems messy to me.
I feel this problem can be solved with some OOP, a session manager class or something that is shared between the two, but I cannot figure out how to design or share it. The best idea I have come up with so far is to wrap my entire db.py in a class and have the constructor create the session. That would work for the helper functions, but I also have a bunch of other objects in db.py that also need access to the session such as the following, this is an actual object from my codebase:
db.py
class Sticker(_Base):
__tablename__ = 'sticker'
sticker_id = Column(Integer, ForeignKey('product.product_id'), primary_key=True)
sticker_name = Column(String)
svg_location = Column(String, unique=True)
svg_width = Column(Numeric, nullable=False)
svg_height = Column(Numeric, nullable=False)
shaped = Column(Boolean) # Whether the cutpath countors the image
#reconstructor
def _get_related_properties(self, init=False):
'''Fetches and fills out all properties that are created by the
__init__ constructor that are not in the orm constructor.'''
if not init:
session = Session()
self._product = session.query(Product).filter(Product.product_id == self.sticker_id).first()
category_id = session.query(ProductCategory).filter(ProductCategory.product_id == self.sticker_id).first()
session.close()
self.sticker_id = self._product.product_id
self.product_type = self._product.product_type
self.date_added = self._product.date_added
self.sticker_name = self._product.product_name
def _get_svg_size(self):
"""Returns a tuple of the width, height of an svg"""
# Currently only works with pixels I think. I know it fails when saved as points.
# May want to improve this for future versions.
# May also consider moving this function to an external util file or something.
# Possible units: (~"em" | ~"ex" | ~"px" | ~"in" | ~"cm" | ~"mm" | ~"pt" | ~"pc")
# Also may use viewbox attribute to determine aspect ratio and set sizes algorithmically.
import xml.etree.ElementTree as ET
import decimal
# Set decimal precision
decimal.getcontext().prec=7
tree = ET.parse(self.svg_location)
root = tree.getroot()
width = None
height = None
width_attr = root.get('width')
height_attr = root.get('height')
# Get measurement units
units = width_attr[-2:]
if units[-1] == '%':
units = '%'
elif not units.isalpha():
# if units not set assume px
width = decimal.Decimal(width_attr)
height = decimal.Decimal(height_attr)
units = 'px'
if units != 'px':
width = decimal.Decimal(width_attr[:-2])
height = decimal.Decimal(height_attr[:-2])
# Convert to px if not already
# Currently only supports in, cm, mm, and pt
# Assumes DPI is 72
MMPI = 2.834645669291339
DPI = 72
if units == 'in':
width *= DPI
height *= DPI
elif units == 'pt':
width /= DPI
height /= DPI
elif units == 'mm':
width *= MMPI
height *= MMPI
elif units == 'cm':
width *= MMPI * 10
height *= MMPI * 10
else:
raise ValueError('Unsupported svg size unit:',units )
return width, height
def __init__(self, svg_location, name='', category='', metatags=[], sticker_sizes=None, active=False):
# If no name given use filename
if not name:
from os.path import basename
name = basename(svg_location).rstrip('.svg')
# Create parent product and save to db to generate primary key/product id
session = Session()
self._product = Product(product_name = name, product_type = 'sticker', active = active)
session.add(self._product)
session.commit()
# TODO: Handle category and metatags
# Categories should probably be created explicitly by the admin, and so should exist
# at the time of sticker creation. Metatags are more numerous and created on the fly
# and so should be created automatically by the sticker constructor.
# TODO: Expand the sticker table to reference these values from the product table maybe?
self.sticker_id = self._product.product_id
self.svg_location = svg_location
self.svg_width, self.svg_height = self._get_svg_size()
self._get_related_properties(init=True)
# Add to the Database
session.add(self)
session.commit()
# Get sticker sizes
self.sticker_sizes = []
# Check if a size tuple was added, default is empty
if sticker_sizes:
for size in sticker_sizes:
sticker_size = StickerSize(self.sticker_id, size[0], size[1])
session.add(sticker_size)
self.sticker_sizes.append(StickerSize)
session.commit()
session.close()
Most of that is unimportant, but as you can see in many cases I need to query the database from within my ORM mapped objects so they too need access to the session. So a simple question, I hope, how can I do that? Can it even be done or am I approaching this in the wrong way? If I am approaching it wrong how so, and could you offer a design pattern that would work?
I found a solution and that is attaching the session to the request object which is unique to, obviously, each request and can be shared between modules.

IPython cluster and PicklingError

my problem seem to be similar to This Thread however, while I think I am following the advised method, I still get a PicklingError. When I run my process locally without sending to an IPython Cluster Engine the function works fine.
I am using zipline with IPyhon's notebook, so I first create a class based on zipline.TradingAlgorithm
Cell [ 1 ]
from IPython.parallel import Client
rc = Client()
lview = rc.load_balanced_view()
Cell [ 2 ]
%%px --local # This insures that the Class and modules exist on each engine
import zipline as zpl
import numpy as np
class Agent(zpl.TradingAlgorithm): # must define initialize and handle_data methods
def initialize(self):
self.valueHistory = None
pass
def handle_data(self, data):
for security in data.keys():
## Just randomly buy/sell/hold for each security
coinflip = np.random.random()
if coinflip < .25:
self.order(security,100)
elif coinflip > .75:
self.order(security,-100)
pass
Cell [ 3 ]
from zipline.utils.factory import load_from_yahoo
start = '2013-04-01'
end = '2013-06-01'
sidList = ['SPY','GOOG']
data = load_from_yahoo(stocks=sidList,start=start,end=end)
agentList = []
for i in range(3):
agentList.append(Agent())
def testSystem(agent,data):
results = agent.run(data) #-- This is how the zipline based class is executed
#-- next I'm just storing the final value of the test so I can plot later
agent.valueHistory.append(results['portfolio_value'][len(results['portfolio_value'])-1])
return agent
for i in range(10):
tasks = []
for agent in agentList:
#agent = testSystem(agent,data) ## On its own, this works!
#-- To Test, uncomment the above line and comment out the next two
tasks.append(lview.apply_async(testSystem,agent,data))
agentList = [ar.get() for ar in tasks]
for agent in agentList:
plot(agent.valueHistory)
Here is the Error produced:
PicklingError Traceback (most recent call last)/Library/Python/2.7/site-packages/IPython/kernel/zmq/serialize.pyc in serialize_object(obj, buffer_threshold, item_threshold)
100 buffers.extend(_extract_buffers(cobj, buffer_threshold))
101
--> 102 buffers.insert(0, pickle.dumps(cobj,-1))
103 return buffers
104
PicklingError: Can't pickle <type 'function'>: attribute lookup __builtin__.function failed
If I override the run() method from zipline.TradingAlgorithm with something like:
def run(self, data):
return 1
Trying something like this...
def run(self, data):
return zpl.TradingAlgorithm.run(self,data)
results in the same PicklingError.
then the passing off to the engines works, but obviously the guts of the test are not performed. As run is a method internal to zipline.TradingAlgorithm and I don't know everything that it does, how would I make sure it is passed through?
It looks like the zipline TradingAlgorithm object is not pickleable after it has been run:
import zipline as zpl
class Agent(zpl.TradingAlgorithm): # must define initialize and handle_data methods
def handle_data(self, data):
pass
agent = Agent()
pickle.dumps(agent)[:32] # ok
agent.run(data)
pickle.dumps(agent)[:32] # fails
But this suggests to me that you should be creating the Agents on the engines, and only passing data / results back and forth (ideally, not passing data across at all, or at most once).
Minimizing data transfers might look something like this:
define the class:
%%px
import zipline as zpl
import numpy as np
class Agent(zpl.TradingAlgorithm): # must define initialize and handle_data methods
def initialize(self):
self.valueHistory = []
def handle_data(self, data):
for security in data.keys():
## Just randomly buy/sell/hold for each security
coinflip = np.random.random()
if coinflip < .25:
self.order(security,100)
elif coinflip > .75:
self.order(security,-100)
load the data
%%px
from zipline.utils.factory import load_from_yahoo
start = '2013-04-01'
end = '2013-06-01'
sidList = ['SPY','GOOG']
data = load_from_yahoo(stocks=sidList,start=start,end=end)
agent = Agent()
and run the code:
def testSystem(agent, data):
results = agent.run(data) #-- This is how the zipline based class is executed
#-- next I'm just storing the final value of the test so I can plot later
agent.valueHistory.append(results['portfolio_value'][len(results['portfolio_value'])-1])
# create references to the remote agent / data objects
agent_ref = parallel.Reference('agent')
data_ref = parallel.Reference('data')
tasks = []
for i in range(10):
for j in range(len(rc)):
tasks.append(lview.apply_async(testSystem, agent_ref, data_ref))
# wait for the tasks to complete
[ t.get() for t in tasks ]
And plot the results, never fetching the agents themselves
%matplotlib inline
import matplotlib.pyplot as plt
for history in rc[:].apply_async(lambda : agent.valueHistory):
plt.plot(history)
This is not quite the same code you shared - three agents bouncing back and forth on all your engines, whereas this has on agent per engine. I don't know enough about zipline to say whether that's useful to you or not.

Categories

Resources