python maya: affect groups of nodes - python

I'm not quite sure where the problem is in this script. What I've done is this....
I have a scene with 2 curves. Each curve has three spheres linked to it.
I select the curves and run the script. It craps out and says I've got objects with matching names?
import maya.cmds as cmds
selection = cmds.ls(selection=True, type='dagNode')
# groups of ![enter image description here][1]nodes to be exported out
nodeBundles = []
for n in selection:
# list the children nodes
children = cmds.listRelatives(n, allDescendents=True, noIntermediate=True, fullPath=True, type="dagNode", path=True)
# list the transform nodes to each child node
childrenTransforms = maya.cmds.listRelatives(children, type='transform', parent=True)
# append each set of children to a unique array and then append to main array
nodeBundles.append(childrenTransforms)
# select the transform nodes
# cmds.select(childrenTransforms)
# MXS cache out each bundle of nodes
for n in nodeBundles:
cmds.select(clear=True)
cmds.xform(n, absolute=True, t=[0,0,10])
print n
FIXED CODE:
import maya.cmds as cmds
selection = cmds.ls(selection=True, type='dagNode')
# groups of ![enter image description here][1]nodes to be exported out
nodeBundles = []
for n in selection:
# list the children nodes
children = cmds.listRelatives(n, allDescendents=True, noIntermediate=True, fullPath=True, type="dagNode", path=True)
# list the transform nodes to each child node
# childrenTransforms = maya.cmds.listRelatives(children, type='transform', parent=True)
childrenTransforms = maya.cmds.listRelatives(children, type='transform', parent=True, fullPath=True)
# append each set of children to a unique array and then append to main array
nodeBundles.append(childrenTransforms)
# select the transform nodes
# cmds.select(childrenTransforms)
# MXS cache out each bundle of nodes
for n in nodeBundles:
cmds.select(clear=True)
cmds.xform(n, r=True, t=[0,0,10])
print n
By adding a list inside of a list I can then iterated based on groups of children. is this the correct method of doing so then?
nodes = []
for item in cmds.ls(sl=True, type = 'transform'):
descendants = cmds.listRelatives(ad=True, ni=True, f=True) or []
# nodes += descendants # append the list, not insert it
nodes.append(descendants)
val = 1
for grp in nodes:
for n in grp:
cmds.select(clear=True)
offset = val * 10
print offset
cmds.xform(n, r=True, t=[0,0,offset])
val += 1

Without seeing your scene or error message, my assumption is you have multiple nodes of the same name. Because Maya uses strings, it can't tell the difference between pSphere1 and... pSphere1
From the documentation on listRelatives, use the argument fullPath:
Return full pathnames instead of object names.
Like this:
childrenTransforms = maya.cmds.listRelatives(children, type='transform', parent=True, fullPath=True)
Assuming the error was in the last cmds.xform, this should make those transforms unambiguous (i.e. |group1|pSphere1)

listRelatives will work on selected objects if nothing is specified, so you can get the subnodes (with full paths) like this:
descendants = cmds.listRelatives(ad=True, ni=True, f=True) # f=True = long paths
If you were trying to use 'dagNode' to filter between shapes and geometry, it wont' work: dagNode will return both transforms and shapes. You can use 'geometryShape' to get only shapes:
descendant_shapes = cmds.listRelatives(ad=True, ni = True, f=True)
but in your case that would return the curveShape as well. You could filter out the curve with:
descendants = cmds.ls(descendants, type= surfaceShape, l=True) # l=True keeps long paths
Also: in you code you are passing lists-of-lists into nodeBundles, which Maya won't like. You should flatten the list by adding items one at a time:
nodes = []
for item in cmds.ls(sl=True, type = 'transform'):
descendants = cmds.listRelatives(ad=True, ni=True, f=True) or []
nodes += descendants # append the list, not insert it
for n in nodes:
cmds.select(clear=True)
cmds.xform(n, r=True, t=[0,0,10])

Related

Untrackable object attribute

I am trying to adapt this code here: https://github.com/nachonavarro/gabes/blob/master/gabes/circuit.py (line 136)
but am coming across an issue because several times the attribute .chosen_label is used but I can find no mention of it anywhere in the code. The objects left_gate, right_gate and gate are Gate objects (https://github.com/nachonavarro/gabes/blob/master/gabes/gate.py)
def reconstruct(self, labels):
levels = [[node for node in children]
for children in anytree.LevelOrderGroupIter(self.tree)][::-1]
for level in levels:
for node in level:
gate = node.name
if node.is_leaf:
garblers_label = labels.pop(0)
evaluators_label = labels.pop(0)
else:
left_gate = node.children[0].name
right_gate = node.children[1].name
garblers_label = left_gate.chosen_label
evaluators_label = right_gate.chosen_label
output_label = gate.ungarble(garblers_label, evaluators_label)
gate.chosen_label = output_label
return self.tree.name.chosen_label
The code runs without error and the .chosen_label is a Label object (https://github.com/nachonavarro/gabes/blob/master/gabes/label.py)
Any help would be much appreciated
The attribute is set in the same method:
for level in levels:
for node in level:
gate = node.name
if node.is_leaf:
# set `garblers_label` and `evaluators_label` from
# the next two elements of the `labels` argument
else:
# use the child nodes of this node to use their gates, and
# set `garblers_label` and `evaluators_label` to the left and
# right `chosen_label` values, respectively.
# generate the `Label()` instance based on `garblers_label` and `evaluators_label`
output_label = gate.ungarble(garblers_label, evaluators_label)
gate.chosen_label = output_label
I'm not familiar with the anytree library, so I had to look up the documentation: the anytree.LevelOrderGroupIter(...) function orders the nodes in a tree from root to leaves, grouped by level. The tree here appears to be a balanced binary tree (each node has either 0 or 2 child nodes), so you get a list with [(rootnode,), (level1_left, level1_right), (level2_left_left, level2_left_right, level2_right_left, level2_right_right), ...]. The function loops over these levels in reverse order. This means that leaves are processed first.
Once all node.is_leaf nodes have their chosen_label set, the other non-leaf nodes can reference the chosen_label value on the leaf nodes on the level already processed before them.
So, assuming that labels is a list with at least twice the number of leaf nodes in the tree, you end up with those label values aggregated at every level via the gate.ungarble() function, and the final value is found at the root node via self.tree.name.chosen_label.

Problem with appending a graph object to lists for networkx in Python

I am trying to remove nodes at random from graphs using the networkx package. The first block describes the graph construction and the second block gives me the node lists that I have to remove from my graph H (20%, 50% and 70% removals). I want 3 versions of the base graph H in the end, in a list or any data structure. The code in block 3 gives me objects of type "None". The last block shows that it works for a single case.
I am guessing that the problem is in the append function, which somehow returns objects of type "None". I also feel that the base graph H might be getting altered after every iteration. Is there any way around this? Any help would be appreciated :)
import networkx as nx
import numpy as np
import random
# node removals from Graphs at random
# network construction
H = nx.Graph()
H.add_nodes_from([1,2,3,4,5,6,7,8,9,10])
H.add_edges_from([[1,2],[2,4],[5,6],[7,10],[1,5],[3,6]])
nx.info(H)
nodes_list = list(H.nodes)
# list of nodes to be removed
perc = [.20,.50,.70] # percentage of nodes to be removed
random_sample_list = []
for p in perc:
interior_list = []
random.seed(2) # for replicability
sample = round(p*10)
random_sample = random.sample(nodes_list, sample)
interior_list.append(random_sample)
random_sample_list.append(random_sample)
# applying the list of nodes to be removed to create a list of graphs - not working
graph_list = []
for i in range(len(random_sample_list)):
H1 = H.copy()
graph_list.append(H1.remove_nodes_from(random_sample_list[i]))
# list access - works
H.remove_nodes_from(random_sample_list[1])
nx.info(H)
Final output should look like:
[Graph with 20% removed nodes, Graph with 50% removed nodes, Graph with 7% removed nodes] - eg. list
The function remove_nodes_from does not return the modified graph, but returns None. Consequently, you only need to create the graph with the desired percentage of your nodes and append it to the list:
graph_list = []
for i in range(len(random_sample_list)):
H1 = H.copy()
H1.remove_nodes_from(random_sample_list[i])
graph_list.append(H1)

Nuke – How to select random nodes with Python?

I'm not too proficient in Python - I'd love a little help with some code. I'm trying to select two random nodes out of all selected nodes in nuke.
I've got far enough that I can print two randomly chosen node names in the array of selected nodes, but could anyone help finish off the code so that the two nodes with the matching names are selected? Essentially I'm imagining if a node name contains chosen_nodes string, select these nodes.
Thanks.
import nuke
import random
array = []
for node in nuke.selectedNodes():
n = node['name'].value()
array.append(n)
chosen_nodes = random.sample(array, k=2)
print chosen_nodes
With this code you can select two random nodes out of several selected ones:
import nuke
import random
array = []
for node in nuke.selectedNodes():
name = node['name'].value()
array.append(name)
print(array)
if nuke.selectedNodes():
for index in nuke.selectedNodes():
index['selected'].setValue(False)
for i in range(1, 3, 1): # range(start, non-inclusive stop, step)
r = random.randint(1, len(array))
nuke.toNode(array[r-1]).setSelected(True)
array.remove(array[r-1]) # delete randomly generated element from array
array = []
Essentially I'm imagining if a node name contains chosen_nodes string, select these nodes.
This should be close to what you want!
match = 'chosen_nodes'
for node in nuke.selectedNodes():
node.setSelected(False)
if match in node['name'].value():
node.setSelected(True)
A slightly more complicated version:
def selectNodesWithFuzzyName(name, nodes=None):
"""
Set the selected nodes to only those that match the passed name.
Parameters
----------
node : str
Glob-style name of a node e.g. 'Grade*'
nodes : Optional[Iterable[nuke.Node]]
If provided, only operate on only these nodes. Otherwise
the currently selected nodes will be used.
"""
import fnmatch
if nodes is None:
nodes = nuke.selectedNodes()
for node in nodes:
node.setSelected(False)
if fnmatch.fnmatch(node['name'].value(), name):
node.setSelected(True)
selectNodesWithFuzzyName('Grade*')

How to retrieve the created vertices of cmds.polyExtrude on Maya

I'm writing a script to change the position of the vertices created by an extrude command given a specific vector. But I can't find a way to get the newly generated vertices/faces/edges.
I tried looking in cmds.getAttr('polyExtrudeFace1') or the query mode of cmds.polyExtrudeFacet, but I can't find the right attribute/flag to get what I need.
Im not sure if there is a nice way to get the new extruded component ids but you can easily find it if you have a tool to get the before state.
One other way would be to desactivate every construction nodes , enable the polyExtrudeFace one by one and fill up a dic and then re-enable everything.
Here is an example to select the latest vertices on an extruded object :
'''
This script only work on the last polyExtrudeFace and on vertex
'''
# get the object
sel = cmds.ls(sl=True, o=True)
# get the extrude nodes, useful to create a dic with all polyExtrudeFace new component ids
extrudenodes = [e for e in cmds.listHistory(sel) if cmds.nodeType(e) == 'polyExtrudeFace']
#current vtx count
current_vtx_nb = cmds.polyEvaluate(sel, v=1)
# disable a polyExtude
cmds.setAttr("{}.nodeState".format(extrudenodes[0]), 1)
# get the previous number
previous_vtx_nb = cmds.polyEvaluate(sel, v=1)
# re-enable it
cmds.setAttr("{}.nodeState".format(extrudenodes[0]), 0)
# get the range
nb = current_vtx_nb - previous_vtx_nb
mrang = [current_vtx_nb-nb,current_vtx_nb]
# recreate the vtx s3election
out = ['{}.vtx[{}]'.format(sel[0], i) for i in range(*mrang)]
# select the vertex
cmds.select(out)
EDIT :
here is an example of the building dictionnary loop :
import maya.cmds as cmds
'''
This script build the vertices data loop
'''
class Counter:
idCounter = 0
def __init__(self):
Counter.idCounter += 1
def loopIncSel():
'relaunch the command to loop throught all key of the dic'
if sorted(dataExtrude.keys()):
count = Counter().idCounter % len(dataExtrude.keys())
k = dataExtrude.keys()[count]
cmds.select(dataExtrude[k])
# get the object
sel = cmds.ls(sl=True, o=True)
# get the extrude nodes, useful to create a dic with all polyExtrudeFace new component ids
extrudenodes = [e for e in cmds.listHistory(sel) if cmds.nodeType(e) == 'polyExtrudeFace']
# dic data :
dataExtrude = {}
for n in extrudenodes:
cmds.setAttr("{}.nodeState".format(n), 1)
# reverse the processus to re-enable,
# note that if there is node in between creating vertices and faces, it won't work
for n in extrudenodes[::-1]:
# get the previous number
previous_vtx_nb = cmds.polyEvaluate(sel, v=1)
# re-enable it
cmds.setAttr("{}.nodeState".format(n), 0)
#current vtx count
current_vtx_nb = cmds.polyEvaluate(sel, v=1)
# get the range
nb = current_vtx_nb - previous_vtx_nb
mrang = [current_vtx_nb-nb,current_vtx_nb]
# recreate the vtx s3election
dataExtrude[n] = ['{}.vtx[{}]'.format(sel[0], i) for i in range(*mrang)]
# select the vertex
# cmds.select(dataExtrude['polyExtrudeFace3'])
loopIncSel()
When applying cmds.polyExtrudeFacet onto a mesh, Maya will automatically select the new faces. Knowing this, it's easy to convert the face components to the new vertexes:
cmds.polySphere(name="pSphere1") # Create a sphere to test with.
cmds.polyExtrudeFacet("pSphere1.f[10]") # Extrude a random face.
sel = cmds.polyListComponentConversion(cmds.ls("*.f[*]", sl=True), fromFace=True, toVertex=True) # Convert faces to verts. Filter `ls` to only get face selections.
cmds.select(sel) # Select the newly created vertexes.

Looking for a better algorithm or data structure to improve conversion of connectivity from ID's to indices

I'm working with Python 3.6.2 and numpy.
I'm writing code to visualize a finite element model and results.
The visualization code requires the finite element mesh nodes and elements to be identified by indices (starting a zero, no gaps) but the input models are based on ID's and can have very large gaps in the ID space.
So I'm processing all of the nodes and elements and changing them to use indices instead of ID's.
The nodes are
First step is to process the array of nodes and node coordinates. This comes to me sorted so I don't specifically have to do anything with the coordinates - I just use the indices of the nodal coordinate array. But I do need to then redefine the connectivity of the elements to be index base instead of ID based.
To do this, I create a dictionary by iterating over the array of node ids and adding each node to the dictionary using it's ID as the key and its index as the value
In the following code fragment,
model.nodes is a dictionary containing all of the Node objects, keyed by their id
nodeCoords is a pre-allocated numpy array where I store the nodal coordinates for later use in visualization. It's the indices of this array that I need to use later to redefine my elements
nodeIdIndexMap is a dictionary that I populate using the Node ID as the key and the index of nodeCoords as the value
Code:
nodeindex=0
node_id_index_map={}
for nid, node in sorted(model.nodes.items()):
nodeCoords[nodeIndex] = node.xyz
nodeIdIndexMap[nid] = nodeIndex
nodeIndex+=1
Then I iterate over all of the elements, looking up each element node ID in the dictionary, getting the index and replacing the ID with the index.
In the following code fragment,
tet4Elements is a dictionary containing all elements of type tet4, keyed using the element id
n1, n2, n3 and n4 are pre-allocated numpy arrays that hold the element nodes
element.nodes[n].nid gets the element node ID
n1[tet4Index] = nodeIdIndexMap[element.nodes[0].nid looks up the element node ID in the dictionary created in the previous fragment, returns the corresponding index and stores it in the numpy array
Code:
tet4Index = 0
for eid, element in tet4Elements.items():
id[tet4Index] = eid
n1[tet4Index] = nodeIdIndexMap[element.nodes[0].nid]
n2[tet4Index] = nodeIdIndexMap[element.nodes[1].nid]
n3[tet4Index] = nodeIdIndexMap[element.nodes[2].nid]
n4[tet4Index] = nodeIdIndexMap[element.nodes[3].nid]
tet4Index+=1
The above works, but it's slow......It takes about 16 seconds to process 6,500,000 tet4 elements (each tet4 element has four nodes, each node ID has to be looked up in the dictionary, so that's 26 million dictionary lookups in a dictionary with 1,600,000 entries.
So the question is how to do this faster? At some point I'll move to C++ but for now I'm looking to improve performance in Python.
I'll be grateful for any ideas to improve performance.
Thanks,
Doug
With the numbers you are quoting and reasonable hardware (8GB ram) the mapping can be done in less than a second. The bad news is that getting the data out of the original dicts of objects takes 60 x longer at least with the mock objects I created.
# extract 29.2821946144104 map 0.4702422618865967
But maybe you can find some way of bulk querying your nodes and tets?
Code:
import numpy as np
from time import time
def mock_data(nn, nt, idf):
nid = np.cumsum(np.random.randint(1, 2*idf, (nn,)))
nodes = np.random.random((nn, 3))
import collections
node = collections.namedtuple('node', 'nid xyz')
tet4 = collections.namedtuple('tet4', 'nodes')
nodes = dict(zip(nid, map(node, nid, nodes)))
eid = np.cumsum(np.random.randint(1, 2*idf, (nt,)))
tet4s = nid[np.random.randint(0, nn, (nt, 4))]
tet4s = dict(zip(eid, map(tet4, map(lambda t: [nodes[ti] for ti in t], tet4s))))
return nodes, tet4s
def f_extract(nodes, tet4s, limit=15*10**7):
nid = np.array(list(nodes.keys()))
from operator import attrgetter
ncoords = np.array(list(map(attrgetter('xyz'), nodes.values())))
tid = np.array(list(tet4s.keys()))
tnodes = np.array([[n.nid for n in v.nodes] for v in tet4s.values()])
return nid, ncoords, tid, tnodes, limit
def f_lookup(nid, ncoords, tid, tnodes, limit):
nmx = nid.max()
if nmx < limit:
nlookup = np.empty((nmx+1,), dtype=np.uint32)
nlookup[nid] = np.arange(len(nid), dtype=np.uint32)
tnodes = nlookup[tnodes]
del nlookup
else:
nidx = np.argsort(nid)
nid = nid[nidx]
ncoords = ncoords[nidx]
tnodes = nid.searchsorted(tnodes)
tmx = tid.max()
if tmx < limit:
tlookup = np.empty((tmx+1,), dtype=np.uint32)
tlookup[tid] = np.arange(len(tid), dtype=np.uint32)
else:
tidx = np.argsort(tid)
tid = tid[tidx]
tnodes = tnodes[tidx]
return nid, ncoords, tid, tnodes
data = mock_data(1_600_000, 6_500_000, 16)
t0 = time()
data = f_extract(*data)
t1 = time()
f_lookup(*data)
t2 = time()
print('extract', t1-t0, 'map', t2-t1)

Categories

Resources