how to run multiple defs () - Python MAYA? - python

I am following a Python for Maya tutorial from: https://github.com/gyassa4/MayaPyth/blob/master/gear_builder.py
After I run the code it only run first def() which creates a Gear with teeth=10. However, it won't run the second def() which changes the number of gears' teeth as instructed in def changeTeeth(constructor, extrude, teeth=25, length=1).Do I need a Class to run multiple defs()?
import maya.cmds as cmds
def createGear(teeth=10, length=1):
spans = teeth * 2
transform, constructor = cmds.polyPipe(subdivisionsAxis=spans)
sideFaces = range(spans * 2, spans * 3, 2)
cmds.select(clear=True)
for face in sideFaces:
cmds.select('%s.f[%s]' % (transform, face), add = True)
extrude = cmds.polyExtrudeFacet(localTranslateZ = length)[0]
return transform, constructor, extrude
createGear()
def changeTeeth(constructor, extrude, teeth=25, length=1):
spans = teeth * 3
cmds.polyPipe(constructor, edit=True, subdivisionsAxis=spans)
sideFaces = range(spans * 2, spans * 3, 5)
faceNames = []
for face in sideFaces:
faceName = 'f[%s]' % (face)
faceNames.append(faceName)
cmds.setAttr('%s.inputComponents' % (extrude),
len(faceNames),*faceNames,type="componentList")
cmds.polyExtrudeFacet(contructor, extrude, edit=True, ltz=length)

createGear()
this is running the first def
changeTeeth(constructor = "objname",
extrude = "extrudename")
put this at the end will run the second one
Note that two of the arguments are mandatory as you have assigned default values to :
teeth=25, length=1

Related

use OpenMaya to give particles specific translate and rotate values

I'm struggling with OpenMaya here.
I want to be able to take transform information from a list of locators and plug these values to particles shapes.
The goal is to use this over 25000 locators, so I can't create a particle system for each instance. I really need to store position and rotation values to the particles themselves.
To do that I started to dive into OpenMaya... (╯°□°)╯︵ ┻━┻
Anyway, the problem I'm facing now is that my scene crashes every time I launch this script and I can't figure out what I did wrong. I think I'm pretty close, but crashing Maya is not considered a victory.
import pymel.core as pm
import maya.OpenMaya as om
import maya.OpenMayaFX as omfx
import random
### A short script to create the scene with bunch of locators with random pos rot
numOfLoc = 5 # this number will eventually be set 25000 when the script will work.
# create locators with random position location(for test)
def create_gazillion_locators(numOfLoc):
for i in range(0, numOfLoc):
# to create variation
tx = random.uniform(-10, 10)
ty = random.uniform(0, 5)
tz = random.uniform(-10, 10)
rx = random.uniform(0, 360)
ry = random.uniform(0, 360)
rz = random.uniform(0, 360)
pm.spaceLocator()
pm.move(tx, ty, tz)
pm.rotate(rx, ry, rz, ws=True)
# Select locators
def select_locators():
pm.select(cl=True)
loc_selection = pm.listRelatives(pm.ls(type = 'locator'), p=True)
pm.select(loc_selection, r=True)
return loc_selection
# delete the locators
def clean_the_scene():
#del locators (for testing purpiose)
sel = select_locators()
if sel is not None:
pm.delete(sel)
clean_the_scene()
create_gazillion_locators(numOfLoc)
### Actual script
# Found this on the internet. it seems to be more neat
class Vector(om.MVector):
def __str__(self):
return '{0}, {1}, {2}'.format(self.x, self.y, self.z)
def __repr__(self):
return '{0}, {1}, {2}'.format(self.x, self.y, self.z)
# OpenMaya treatment
sel = select_locators()
mSel = om.MSelectionList()
om.MGlobal.getActiveSelectionList(mSel)
mSel_iter = om.MItSelectionList(mSel)
mSel_DagPath = om.MDagPath()
# bvariables to store the transform in
pos_array = om.MVectorArray()
rot_array = om.MVectorArray()
mLoc = om.MObject()
# Main loop of selection iterator.
while not mSel_iter.isDone():
# Get list of selected
mSel_iter.getDagPath(mSel_DagPath)
mSel_iter.getDependNode(mLoc)
dep_node_name = om.MFnDependencyNode(mLoc).name()
transl = pm.getAttr('{}.translate'.format(dep_node_name))
rotate = pm.getAttr('{}.rotate'.format(dep_node_name))
print(dep_node_name)
print(Vector(transl[0], transl[1], transl[2]))
print(Vector(rotate[0], rotate[1], rotate[2]))
pos_array.append(Vector(transl[0], transl[1], transl[2]))
rot_array.append(Vector(rotate[0], rotate[1], rotate[2]))
mSel_iter.next()
# Up untill there it seems to work ok.
nparticles_transform, nparticles_shape = pm.nParticle(position = pos_array)
pm.setAttr('nucleus1.gravity', 0.0)
nparticles_shape.computeRotation.set(True)
pm.addAttr(nparticles_shape, ln = 'rotationPP', dt = 'vectorArray')
pm.addAttr(nparticles_shape, ln = 'rotationPP0', dt = 'vectorArray')
pm.particleInstancer(nparticles_shape, name = p_instancer, edit = True, rotation = "rotationPP")
particle_fn = omfx.MFnParticleSystem(nparticles_shape.__apimobject__())
particle_fn.setPerParticleAttribute('rotationPP', rot_array)
particle_fn.setPerParticleAttribute('rotationPP0', rot_array)
I read lots of things, went through the stack and google, I based my script on several other stuff I found/learnt (I listened to the OpenMaya course on Youtube by Chayan Vinayak)... But I've had a hard time understanding the OpenMaya documentation though.
I had a look and there is no need to use any openmaya in this case if you need pymel anyway. I used cmds for the creation of the locators because it is a bit faster, so if execution speed is a problem, try to switch everything to cmds.
And I think there is no need to set the computeRotation because it's only used during simulation.
import pymel.core as pm
import maya.cmds as cmds
import random
numOfLoc = 5000
def c_create_gazillion_locators(num_of_loc):
for i in range(num_of_loc):
tx = random.uniform(-10, 10)
ty = random.uniform(0, 5)
tz = random.uniform(-10, 10)
rx = random.uniform(0, 360)
ry = random.uniform(0, 360)
rz = random.uniform(0, 360)
cmds.spaceLocator()
cmds.move(tx, ty, tz)
cmds.rotate(rx, ry, rz, ws=True)
create_gazillion_locators(numOfLoc)
locs = pm.ls(type="locator")
locs = pm.listRelatives(locs, p=True)
pos = []
rot = []
for loc in locs:
pos.append(loc.translate.get())
rot.append(loc.rotate.get())
nparticles_transform, nparticles_shape = pm.nParticle(position=pos)
pm.setAttr("nucleus1.gravity", 0.0)
pm.addAttr(nparticles_shape, ln="rotationPP", dt="vectorArray")
pm.addAttr(nparticles_shape, ln="rotationPP0", dt="vectorArray")
rpp= pm.Attribute(nparticles_shape+".rotationPP")
rpp0= pm.Attribute(nparticles_shape+".rotationPP0")
rpp.set(rot)
rpp0.set(rot)
I've made a couple of changes to make it work. I didn't check the particle setup though. In fact, the main problem is mixing two different APIs. Either stick to OpenMaya (or even OpenMaya v2.0) or PyMEL.
import pymel.core as pm
import maya.OpenMaya as om
import maya.OpenMayaFX as omfx
import random
### A short script to create the scene with bunch of locators with random pos rot
numOfLoc = 5 # this number will eventually be set 25000 when the script will work.
# create locators with random position location(for test)
def create_gazillion_locators(num_of_loc):
for i in range(num_of_loc):
# to create variation
tx = random.uniform(-10, 10)
ty = random.uniform(0, 5)
tz = random.uniform(-10, 10)
rx = random.uniform(0, 360)
ry = random.uniform(0, 360)
rz = random.uniform(0, 360)
pm.spaceLocator()
pm.move(tx, ty, tz)
pm.rotate(rx, ry, rz, ws=True)
# Select locators
def select_locators():
pm.select(cl=True)
loc_selection = pm.listRelatives(pm.ls(type="locator"), p=True)
pm.select(loc_selection, r=True)
return loc_selection
# delete the locators
def clean_the_scene():
# del locators (for testing purpiose)
sel = select_locators()
if sel is not None:
pm.delete(sel)
clean_the_scene()
create_gazillion_locators(numOfLoc)
### Actual script
# Found this on the internet. it seems to be more neat
class Vector(om.MVector):
def __str__(self):
return "{0}, {1}, {2}".format(self.x, self.y, self.z)
def __repr__(self):
return "{0}, {1}, {2}".format(self.x, self.y, self.z)
# OpenMaya treatment
sel = select_locators()
mSel = om.MSelectionList()
om.MGlobal.getActiveSelectionList(mSel)
mSel_iter = om.MItSelectionList(mSel)
mSel_DagPath = om.MDagPath()
# bvariables to store the transform in
pos_array = []
rot_array = om.MVectorArray()
mLoc = om.MObject()
# Main loop of selection iterator.
while not mSel_iter.isDone():
# Get list of selected
mSel_iter.getDagPath(mSel_DagPath)
mSel_iter.getDependNode(mLoc)
dep_node_name = om.MFnDependencyNode(mLoc).name()
transl = pm.getAttr("{}.translate".format(dep_node_name))
rotate = pm.getAttr("{}.rotate".format(dep_node_name))
print(dep_node_name)
print(Vector(transl[0], transl[1], transl[2]))
print(Vector(rotate[0], rotate[1], rotate[2]))
pos_array.append((transl[0], transl[1], transl[2]))
rot_array.append(Vector(rotate[0], rotate[1], rotate[2]))
mSel_iter.next()
# Up untill there it seems to work ok.
nparticles_transform, nparticles_shape = pm.nParticle(position=pos_array)
pm.setAttr("nucleus1.gravity", 0.0)
nparticles_shape.computeRotation.set(True)
pm.addAttr(nparticles_shape, ln="rotationPP", dt="vectorArray")
pm.addAttr(nparticles_shape, ln="rotationPP0", dt="vectorArray")
# Create an instancer before trying to edit
instancer_node = pm.particleInstancer(nparticles_shape, name="p_instancer")
pm.particleInstancer(
nparticles_shape, name=instancer_node, edit=True, rotation="rotationPP"
)
particle_fn = omfx.MFnParticleSystem(nparticles_shape.__apimobject__())
particle_fn.setPerParticleAttribute("rotationPP", rot_array)
particle_fn.setPerParticleAttribute("rotationPP0", rot_array)

Python concurrent.futures.ProcessPoolExecutor() not executing methods inside objects

I am trying to concurrently execute methods from two objects concurrently for a computer vision task. My idea is to use two different feature detectors to compute their respective feature descriptions inside a base class.
In this regard, I built the following toy example to understand python concurrent.futures.ProcessPoolExecutor class.
When executed, the first part of the code runs as expected with 20 Heartbeat (10 from each method executed 10 times in total) strings printed out with the sum for two objects coming out correctly as 100, -100.
But in the second half of the code, it appears the ProcessPoolExecutor is not running the do_math(self, numx) method at all. What am I doing wrong here?
With best,
Azmyin
import numpy as np
import concurrent.futures as cf
import time
def current_milli_time():
# CORE FUNCTION
# Function that returns a time tick in milliseconds
return round(time.time() * 1000)
class masterClass(object):
super_multiplier = 1 # Class variable
def __init__(self, ls):
# Attributes of masterClass
self.var1 = ls[0]
self.sumx = ls[1]
def __rep__(self):
print(f"sumx value -- {self.sumx}")
def apply_sup_mult(self, var_in):
self.sumx = self.sumx + (var_in * masterClass.super_multiplier)
time.sleep(0.025)
print(f"Hearbeat!!")
# This is a regular method
def do_math(self, numx):
self.apply_sup_mult(numx)
ls = [10,0]
ls2 = [-10,0]
numx = 10
obj1 = masterClass(ls)
obj2 = masterClass(ls2)
t1 = current_milli_time()
# Run methods one by one
for _ in range(numx):
obj1.do_math(ls[0])
obj2.do_math(ls2[0])
obj1.__rep__()
obj2.__rep__()
t2 = current_milli_time()
print(f"Time taken -- {t2 - t1} ms")
print()
## Using multiprocessing to concurrently run two methods
# Intentionally reinitialize objects
obj1 = masterClass(ls)
obj1 = masterClass(ls2)
t1 = current_milli_time()
resx = []
with cf.ProcessPoolExecutor() as executor:
for i in range(numx):
#fs = [executor.submit(obj3.do_math, ls[0]), executor.submit(obj4.do_math, ls2[0])]
f1 = executor.submit(obj1.do_math, ls[0])
f2 = executor.submit(obj2.do_math, ls2[0])
# for i,f in enumerate(cf.as_completed(fs)):
# print(f"Done with {f}")
# # State of sumx
obj1.__rep__()
obj2.__rep__()
t2 = current_milli_time()
print(f"Time taken -- {t2 - t1} ms")

How to use python block to remove white noise?

I want to design a python block to remove the white noise of my received signal
As shown in the red circle:
So I first tested on Spyder whether my code can remove noise normally
I got the result as pictured:
Just when I finished the test, I wanted to transfer it to the python block, but it couldn't execute normally, causing a crash
Below is the test program on my python block and spyder:
i = 0
j = 0
t = 0
data=[]
buf=[]
wav = numpy.fromfile(open('C:/Users/user/Desktop/datas'), dtype=numpy.uint8)
for i in range(int(len(wav)/10)):
for j in range(10):
buf.append(wav[(i*10)+j])
if (buf[j]<=180)and(buf[j]>=90):
t = t+1
if t < 6:
data = numpy.append(data,buf)
# else:
# data = numpy.append(data,numpy.zeros(10))
t= 0
j = 0
buf.clear()
"""
Embedded Python Blocks:
Each time this file is saved, GRC will instantiate the first class it finds
to get ports and parameters of your block. The arguments to __init__ will
be the parameters. All of them are required to have default values!
"""
import numpy as np
from gnuradio import gr
i = 0
j = 0
t = 0
data=[]
buf=[]
class blk(gr.sync_block): # other base classes are basic_block, decim_block, interp_block
"""Embedded Python Block example - a simple multiply const"""
def __init__(self): # only default arguments here
"""arguments to this function show up as parameters in GRC"""
gr.sync_block.__init__(
self,
name='noise out', # will show up in GRC
in_sig=[np.float32],
out_sig=[np.float32]
)
# if an attribute with the same name as a parameter is found,
# a callback is registered (properties work, too).
def work(self, input_items, output_items):
"""example: multiply with constant"""
np.frombuffer(input_items, dtype=np.uint8)
for i in range(int((len(input_items[0]))/10)):
for j in range(10):
buf.append(input_items[0][(i*10)+j])
if (buf[j]<=180)and(buf[j]>=90):
t = t+1
if t < 6:
data = numpy.append(data,buf)
else:
data = numpy.append(data,numpy.zeros(10))
t= 0
j = 0
buf.clear()
for i in range(len(output_items[0])):
output_items[0][i]=data[i]
return len(output_items[0])
What should I do to modify it if I want it to run normally?

Python for Robotics: How to generate an app for localization of a Pepper robot

These days I have tried to generate an application using the pythonapp template from the Github project Jumpstarter(https://github.com/aldebaran/robot-jumpstarter) to do the localization of Pepper. My basic idea is to combine the LandmarkDetector module in the generated app „Lokalisierung“(Localization of German).
You can read the whole code of "LandmarkDetector.py","main.py" and"MainLandmarkDetection.py" here:
"LandmarkDetector.py":
#! /usr/bin/env python
# -*- encoding: UTF-8 -*-
"""Example: Demonstrates a way to localize the robot with
ALLandMarkDetection"""
import qi
import time
import sys
import argparse
import math
import almath
class LandmarkDetector(object):
"""
We first instantiate a proxy to the ALLandMarkDetection module
Note that this module should be loaded on the robot's naoqi.
The module output its results in ALMemory in a variable
called "LandmarkDetected".
We then read this ALMemory value and check whether we get
interesting things.
After that we get the related position of the landmark compared to robot.
"""
def __init__(self, app):
"""
Initialisation of qi framework and event detection.
"""
super(LandmarkDetector, self).__init__()
app.start()
session = app.session
# Get the service ALMemory.
self.memory = session.service("ALMemory")
# Connect the event callback.
# Get the services ALMotion & ALRobotPosture.
self.motion_service = session.service("ALMotion")
self.posture_service = session.service("ALRobotPosture")
self.subscriber = self.memory.subscriber("LandmarkDetected")
print "self.subscriber = self.memory.subscriber(LandmarkDetected)"
self.subscriber.signal.connect(self.on_landmark_detected)
print "self.subscriber.signal.connect(self.on_landmark_detected)"
# Get the services ALTextToSpeech, ALLandMarkDetection and ALMotion.
self.tts = session.service("ALTextToSpeech")
self.landmark_detection = session.service("ALLandMarkDetection")
# print "self.landmark_detection" is repr(self.landmark_detection)
self.motion_service = session.service("ALMotion")
self.landmark_detection.subscribe("LandmarkDetector", 500, 0.0 )
print "self.landmark_detection.subscribe(LandmarkDetector, 500, 0.0 )"
self.got_landmark = False
# Set here the size of the landmark in meters.
self.landmarkTheoreticalSize = 0.06 #in meters 0 #.05 or 0.06?
# Set here the current camera ("CameraTop" or "CameraBottom").
self.currentCamera = "CameraTop"
def on_landmark_detected(self, markData):
"""
Callback for event LandmarkDetected.
"""
while markData == [] : # empty value when the landmark disappears
self.got_landmark = False
self.motion_service.moveTo(0, 0, 0.1 * math.pi)
if not self.got_landmark: # only speak the first time a landmark appears
self.got_landmark = True
#stop.motion_service.moveTo
print "Ich sehe eine Landmarke! "
self.tts.say("Ich sehe eine Landmarke! ")
# Retrieve landmark center position in radians.
wzCamera = markData[1][0][0][1]
wyCamera = markData[1][0][0][2]
# Retrieve landmark angular size in radians.
angularSize = markData[1][0][0][3]
# Compute distance to landmark.
distanceFromCameraToLandmark = self.landmarkTheoreticalSize / ( 2 * math.tan( angularSize / 2))
# Get current camera position in NAO space.
transform = self.motion_service.getTransform(self.currentCamera, 2, True)
transformList = almath.vectorFloat(transform)
robotToCamera = almath.Transform(transformList)
# Compute the rotation to point towards the landmark.
cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)
# Compute the translation to reach the landmark.
cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)
# Combine all transformations to get the landmark position in NAO space.
robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform *cameraToLandmarkTranslationTransform
# robotTurnAroundAngle = almath.rotationFromAngleDirection(0, 1, 1, 1)
# print "robotTurnAroundAngle = ", robotTurnAroundAngle
print "x " + str(robotToLandmark.r1_c4) + " (in meters)"
print "y " + str(robotToLandmark.r2_c4) + " (in meters)"
print "z " + str(robotToLandmark.r3_c4) + " (in meters)"
def run(self):
"""
Loop on, wait for events until manual interruption.
"""
# Wake up robot
self.motion_service.wakeUp()
# Send robot to Pose Init
self.posture_service.goToPosture("StandInit", 0.5)
# Example showing how to get a simplified robot position in world.
useSensorValues = False
result = self.motion_service.getRobotPosition(useSensorValues)
print "Robot Position", result
# Example showing how to use this information to know the robot's diplacement.
useSensorValues = False
# initRobotPosition = almath.Pose2D(self.motion_service.getRobotPosition(useSensorValues))
# Make the robot move
for i in range(1, 12, 1):
self.motion_service.moveTo(0, 0, 0.1 * math.pi)
print "self.motion_service.moveTo(0, 0, (0.1)*math.pi)"
print "Starting LandmarkDetector"
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print "Interrupted by user, stopping LandmarkDetector"
self.landmark_detection.unsubscribe("LandmarkDetector")
#stop
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="10.0.0.10",
help="Robot IP address. On robot or Local Naoqi: use
'10.0.0.10'.")
parser.add_argument("--port", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
try:
# Initialize qi framework.
connection_url = "tcp://" + args.ip + ":" + str(args.port)
app = qi.Application(["LandmarkDetector", "--qi-url=" + connection_url])
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(args.port) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
landmark_detector = LandmarkDetector(app)
landmark_detector.run()
"main.py":
""" A sample showing how to make a Python script as an app. """
version = "0.0.8"
copyright = "Copyright 2015, Aldebaran Robotics"
author = 'YOURNAME'
email = 'YOUREMAIL#aldebaran.com'
import stk.runner
import stk.events
import stk.services
import stk.logging
class Activity(object):
"A sample standalone app, that demonstrates simple Python usage"
APP_ID = "com.aldebaran.lokalisierung"
def __init__(self, qiapp):
self.qiapp = qiapp
self.events = stk.events.EventHelper(qiapp.session)
self.s = stk.services.ServiceCache(qiapp.session)
self.logger = stk.logging.get_logger(qiapp.session, self.APP_ID)
def on_touched(self, *args):
"Callback for tablet touched."
if args:
self.events.disconnect("ALTabletService.onTouchDown")
self.logger.info("Tablet touched: " + str(args))
self.s.ALTextToSpeech.say("Yay!")
self.stop()
def on_start(self):
"Ask to be touched, waits, and exits."
# Two ways of waiting for events
# 1) block until it's called
self.s.ALTextToSpeech.say("Touch my forehead.")
self.logger.warning("Listening for touch...")
while not self.events.wait_for("FrontTactilTouched"):
pass
# 2) explicitly connect a callback
if self.s.ALTabletService:
self.events.connect("ALTabletService.onTouchDown", self.on_touched)
self.s.ALTextToSpeech.say("okay, now touch my tablet.")
# (this allows to simltaneously speak and watch an event)
else:
self.s.ALTextToSpeech.say("touch my tablet ... oh. " + \
"I don't haave one.")
self.stop()
def stop(self):
"Standard way of stopping the application."
self.qiapp.stop()
def on_stop(self):
"Cleanup"
self.logger.info("Application finished.")
self.events.clear()
if __name__ == "__main__":
stk.runner.run_activity(Activity)
"MainLandmarkDetection.py":
#! /usr/bin/env python
# -*- encoding: UTF-8 -*-
"""A sample showing how to make a Python script as an app to localize
the robot with ALLandMarkDetection"""
version = "0.0.8"
copyright = "Copyright 2015, Aldebaran Robotics"
author = 'YOURNAME'
email = 'YOUREMAIL#aldebaran.com'
import stk.runner
import stk.events
import stk.services
import stk.logging
import time
import sys
import math
import almath
class Activity(object):
"A sample standalone app, that demonstrates simple Python usage"
APP_ID = "com.aldebaran.lokalisierung"
def __init__(self, qiapp):
self.qiapp = qiapp
self.events = stk.events.EventHelper(qiapp.session)
self.s = stk.services.ServiceCache(qiapp.session)
self.logger = stk.logging.get_logger(qiapp.session, self.APP_ID)
self.qiapp.start()
session = qiapp.session
# Get the service ALMemory.
self.memory = session.service("ALMemory")
# Connect the event callback.
# Get the services ALMotion & ALRobotPosture.
self.motion_service = session.service("ALMotion")
self.posture_service = session.service("ALRobotPosture")
self.subscriber = self.memory.subscriber("LandmarkDetected")
print "self.subscriber = self.memory.subscriber(LandmarkDetected)"
self.subscriber.signal.connect(self.on_landmark_detected)
print "self.subscriber.signal.connect(self.on_landmark_detected)"
# Get the services ALTextToSpeech, ALLandMarkDetection and ALMotion.
self.tts = session.service("ALTextToSpeech")
self.landmark_detection = session.service("ALLandMarkDetection")
# print "self.landmark_detection" is repr(self.landmark_detection)
self.motion_service = session.service("ALMotion")
self.landmark_detection.subscribe("Activity", 500, 0.0)
print "self.landmark_detection.subscribe(Activity, 500, 0.0 )"
self.got_landmark = False
# Set here the size of the landmark in meters.
self.landmarkTheoreticalSize = 0.06 # in meters 0 #.05 or 0.06?
# Set here the current camera ("CameraTop" or "CameraBottom").
self.currentCamera = "CameraTop"
def on_landmark_detected(self, markData):
"""
Callback for event LandmarkDetected.
"""
while markData == []: # empty value when the landmark disappears
self.got_landmark = False
# self.motion_service.moveTo(0, 0, 0.1 * math.pi)
if not self.got_landmark: # only speak the first time a landmark appears
self.got_landmark = True
# stop.motion_service.moveTo
print "Ich sehe eine Landmarke! "
# self.tts.say("Ich sehe eine Landmarke! ")
# Retrieve landmark center position in radians.
wzCamera = markData[1][0][0][1]
wyCamera = markData[1][0][0][2]
# Retrieve landmark angular size in radians.
angularSize = markData[1][0][0][3]
# Compute distance to landmark.
distanceFromCameraToLandmark = self.landmarkTheoreticalSize / (2 * math.tan(angularSize / 2))
# Get current camera position in NAO space.
transform = self.motion_service.getTransform(self.currentCamera, 2, True)
transformList = almath.vectorFloat(transform)
robotToCamera = almath.Transform(transformList)
# Compute the rotation to point towards the landmark.
cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)
# Compute the translation to reach the landmark.
cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)
# Combine all transformations to get the landmark position in NAO space.
robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform * cameraToLandmarkTranslationTransform
# robotTurnAroundAngle = almath.rotationFromAngleDirection(0, 1, 1, 1)
# print "robotTurnAroundAngle = ", robotTurnAroundAngle
print "x " + str(robotToLandmark.r1_c4) + " (in meters)"
print "y " + str(robotToLandmark.r2_c4) + " (in meters)"
print "z " + str(robotToLandmark.r3_c4) + " (in meters)"
def run(self):
"""
Loop on, wait for events until manual interruption.
"""
# Wake up robot
self.motion_service.wakeUp()
# Send robot to Pose Init
self.posture_service.goToPosture("StandInit", 0.5)
# Example showing how to get a simplified robot position in world.
useSensorValues = False
result = self.motion_service.getRobotPosition(useSensorValues)
print "Robot Position", result
# Example showing how to use this information to know the robot's diplacement.
useSensorValues = False
# initRobotPosition = almath.Pose2D(self.motion_service.getRobotPosition(useSensorValues))
# Make the robot move
for i in range(1, 20, 1):
self.motion_service.moveTo(0, 0, 0.1 * math.pi)
print "self.motion_service.moveTo(0, 0, (0.1)*math.pi)"
print "Starting Activity"
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print "Interrupted by user, stopping Activity"
self.landmark_detection.unsubscribe("Activity")
# stop
sys.exit(0)
def stop(self):
"Standard way of stopping the application."
self.qiapp.stop()
def on_stop(self):
"Cleanup"
self.logger.info("Application finished.")
self.events.clear()
if __name__ == "__main__":
stk.runner.run_activity(Activity)
landmark_detector = Activity()
landmark_detector.run()
This is how it worked in cmd.exe:
And I have a question to the parameter by ”landmark_detector = Activity()” in line 157 at almost the end of the program because of the Error in the image, which I should pass. After reading the answers to the similar question here by StackoverflowPython: TypeError: __init__() takes exactly 2 arguments (1 given), I am still confused. I think it should be a value which is given to the "qiapp", but what value?
Best regards,
Frederik
Actually, when you call
stk.runner.run_activity(Activity)
... it will instantiate that activity object for you, with the right parameters, you don't need to landmark_detector = Activity() etc. in MainLandmarkDetector.py
And if this object has a method called on_start, that method will be called once everything is ready (so you may only need to rename your run() method to on_start()
Note also that instead of calling sys.stop(), you can just call self.stop() or self.qiapp.stop() (which is a bit cleaner in terms of letting the cleanup code in on_stop to be called, if you need to unsubscribe to things etc.)
See here for some documentation on stk.runner.
Note also that instead of doing
self.motion_service = session.service("ALMotion")
(...)
transform = self.motion_service.getTransform(self.currentCamera, 2, True)
you can directly do
transform = self.s.ALMotion.getTransform(self.currentCamera, 2, True)
... which (in my opinion) makes it easier to see what is being done exactly (and reduces the number of variables).

trying to understand SIMPLEST CASE of Python class and self

I'm pretty new to Python, but learning. I'm trying to sort out class and self after talking to professor today. Here's a SIMPLE experiment I am trying with one "calling", Main program, and a separate class with called functions, called from Main and called from inside the class. I've written enough to try to test some of the flexibilities but I'm now debugging and I fear I'll put A BUNCH of stupid garbage in this to try to get it to run. Can someone see what needs to be done please, to get it to run without any excess complication? Thanks!
#######################################################################################################
#
# BoxDemoMain.py calling program
# demonstration of using and calling BoxClass.py pgm
#
#
import numpy as np
import BoxClassPgm
#
#
def ReportBoxDimensions(anySizeBox):
Ht = anySizeBox.boxHeight
Wd = anySizeBox.boxWidth
Dp = anySizeBox.boxDepth
print "The Height, Width, and Depth of this box is " + str(Ht) + "," + str(Wd) + "," + str(Dp)
print "The Surface Area of this box is " + str(BoxClassPgm.BoxClass(anySizeBox).CalcSurfArea())
print "The Volume of this box is " + str(BoxClassPgm.BoxClass(anySizeBox).CalcVolume())
#
#
largeBox = BoxClass(6,8,4)
mediumBox = BoxClass(4,5,3)
smallBox = BoxClass(3,3,3)
#
#
ReportBoxDimensions(largeBox)
ReportBoxDimensions(mediumBox)
ReportBoxDimensions(smallBox)
#######################################################################################################
#
# BoxClassPgm.py class program
# class demonstration
#
#
import numpy as np
#
#
class BoxClass:
def __init__(self,boxHeight=0,boxWidth=0,boxDepth=0):
self.boxHeight = boxHeight
self.boxWidth = boxWidth
self.boxDepth = boxDepth
def CalcVolume(self):
print "height is " + str(self.boxHeight)
boxVol = self.boxHeight * self.boxWidth * self.boxDepth
return boxVol
def CalcSurfArea(self):
areaOf3Faces = BoxClass().CalcFaceAreas()
boxSurfArea = 2 * areaOf3Faces
return boxSurfArea
def CalcFaceAreas(self):
frontBack = self.boxHeight * self.boxWidth
leftRight = self.boxHeight * self.boxDepth
topBottom = self.boxWidth * self.boxDepth
box3Faces = frontBack + leftRight + topBottom

Categories

Resources