I got 2 functions to get the values of the HDD drives. I am using tkinter and I can access the graphics with the buttons. But, after I click on one, I can't seem to remove the previous.
The objective is: if I click the "C Drive" button, I erase the E graph, and if I click on the "E" drive, I erase the C graph.
#Disk E Storage
def hdd_e():
usage_e=shutil.disk_usage("E:\\")
total_space_e = usage_e[0]
used_space_e = usage_e[1]
free_space_e =usage_e[2]
fig_e = matplotlib.figure.Figure(figsize=(50, 5), facecolor="#F0F0F0")
canvas_e = FigureCanvasTkAgg(fig_e, master=tab3)
ax_e = fig_e.add_subplot(111)
ax_e.pie([total_space_e, used_space_e, free_space_e])
ax_e.legend(["Total", "Used", "Free"])
circle_e = matplotlib.patches.Circle((0, 0), 0)
ax_e.add_artist(circle_e)
canvas_e.get_tk_widget().pack()
canvas_e.draw()
#Disk C Storage
def hdd_c():
usage_c=shutil.disk_usage("C:\\")
total_space_c = usage_c[0]
used_space_c = usage_c[1]
free_space_c =usage_c[2]
fig_c = matplotlib.figure.Figure(figsize=(50, 5), facecolor="#F0F0F0")
canvas_c = FigureCanvasTkAgg(fig_c, master=tab3)
ax_c = fig_c.add_subplot(111)
ax_c.pie([total_space_c, used_space_c, free_space_c ])
ax_c.legend(["Total", "Used", "Free"])
circle_c = matplotlib.patches.Circle((0, 0), 0)
ax_c.add_artist(circle_c)
canvas_c.get_tk_widget().pack()
canvas_c.draw()
Related
I have this code
joint_name = cmds.ls(sl=1)[0]
circle_name = cmds.circle(name = joint_name + "_CTL", nr=(1, 0, 0) )
group_name = cmds.group(name = joint_name + "_OFFSET")
cmds.select(joint_name, group_name)temp_constraint = cmds.parentConstraint()
cmds.delete(temp_constraint)
cmds.select(circle_name, joint_name)
cmds.pointConstraint()
cmds.orientConstraint()
When you select a joint and run this code you will get a circle that will control that joint. While going down the hierarchy you have to select that joint and then run the code.
How would I be able to have all the joints to have circles controlling them without having to go through the outliner selection a joint?
run this on your chain of joint :
for x, joint_name in enumerate(cmds.ls(sl=1, dag=True, type='joint')):
circle_name = cmds.circle(name = '{}_CTL{:02d}'.format(joint_name,x), nr=(1, 0, 0) )
group_name = cmds.group(name = '{}_OFFSET{:02d}'.format(joint_name,x))
cmds.select(joint_name, group_name)
temp_constraint = cmds.parentConstraint()
cmds.delete(temp_constraint)
cmds.select(circle_name, joint_name)
cmds.pointConstraint()
cmds.orientConstraint()
Note that instead of use select, you could feed pointConstraint :
cmds.pointConstraint(circle_name, joint_name, n='something')
I'm trying to make a custom script that replicates/imitates the Maya Sculpt Geometry Tool. Basically I have 2 radio buttons, Push and Relax[which imitates the push and relax from the sculpt parameters obviously], a value slider[replicates the max displacement slider]. The radio and reset button works perfectly however I'm having problems with coding the slider. Any help for this one? Thanks in advance.
Please see images for further clarification.
Click here for image
Here is my code:
import maya.cmds as cmds
import maya.mel as mel
if cmds.window("cusWin", exists = True):
cmds.deleteUI("cusWin")
customwindow = cmds.window("cusWin",t= "Push/Relax", wh = (200, 117), s= False, mnb= False, mxb= False)
cmds.frameLayout( label='Push/Relax Modifier', borderStyle='in', cll= False)
cmds.columnLayout(adj = True, columnOffset= ("both", 3))
cmds.radioCollection()
cmds.radioButton(l = "Push", onc= "mel.eval('artUpdatePuttyOperation artPuttyCtx push ;')")
cmds.radioButton(l = "Relax", onc= "mel.eval('artUpdatePuttyOperation artPuttyCtx relax ;')")
cmds.separator(style= "none", h= 3)
DynFsgCol1 = 30
DynFsgCol2 = 50
DynFsgCol3 = 100
valSlider = cmds.floatSliderGrp(l = "Value", field = True, min = 0, max= 5, precision = 4, cw3= (DynFsgCol1, DynFsgCol2, DynFsgCol3 ))
cmds.separator(style= "none", h= 3)
cmds.rowColumnLayout(numberOfColumns=2, columnWidth=[(1,98),(2,100)], columnOffset=[(1,'left',1),(2,'right',95)])
cmds.button(l = "Apply", w= 92, c= 'slider()')
cmds.button(l = "Reset", w= 91, c= 'resetButton()')
cmds.showWindow( customwindow )
def slider():
valueSlider = cmds.floatSliderGrp(valSlider, q= True, value= True)
mel.eval('artPuttyCtx -e -maxdisp valueSlider `currentCtx`;')
def resetButton():
mel.eval('resetTool artPuttyContext;')
There's two different things going on here.
First, by using the string form of the callbacks you lose control over the scope of your functions. It's better to pass the python objects directly. This version does what it looks like your's is intended to do using the callbacks:
import maya.cmds as cmds
import maya.mel as mel
if cmds.window("cusWin", exists = True):
cmds.deleteUI("cusWin")
customwindow = cmds.window("cusWin",t= "Push/Relax", wh = (200, 117), s= False, mnb= False, mxb= False)
cmds.frameLayout( label='Push/Relax Modifier', cll= False)
cmds.columnLayout(adj = True, columnOffset= ("both", 3))
cmds.radioCollection()
push = lambda _: cmds.artPuttyCtx(cmds.currentCtx(), e = True, mtm='push')
relax = lambda _: cmds.artPuttyCtx(cmds.currentCtx(), e = True, mtm='relax')
cmds.radioButton(l = "Push", onc= push)
cmds.radioButton(l = "Relax", onc=relax)
cmds.separator(style= "none", h= 3)
DynFsgCol1 = 30
DynFsgCol2 = 50
DynFsgCol3 = 100
valSlider = cmds.floatSliderGrp(l = "Value", field = True, min = 0, max= 5, precision = 4, cw3= (DynFsgCol1, DynFsgCol2, DynFsgCol3 ))
cmds.separator(style= "none", h= 3)
cmds.rowColumnLayout(numberOfColumns=2, columnWidth=[(1,98),(2,100)], columnOffset=[(1,'left',1),(2,'right',95)])
# put the defs here, where the names of the slider are known
def slider(*_):
valueSlider = cmds.floatSliderGrp(valSlider, q = True, value = True)
cmds.artPuttyCtx(cmds.currentCtx(), e = True, maxdisp = valueSlider)
def resetButton(*_):
cmds.resetTool(cmds.currentCtx())
cmds.button(l = "Apply", w= 92, c= slider)
cmds.button(l = "Reset", w= 91, c= resetButton)
cmds.showWindow( customwindow )
The thing to note is that order in which things are defined allows you to reference the names of controls you've made. (As an aside, you need to handle the nonsense arguments Maya passes on the button and slider callbacks). It's also a good idea to clean up dangling Mel scripts where you can, if you want to make this tool more complex in future it's much harder if you have to work in two languages at once. In this case artUpdatePuttyOperation looks like it's defined when the sculpt tool UI opens, so if you ran this script without the UI I don't think it would work correctly.
The second issue is that you're not explicitly setting an artPuttyCtx with setTool, so this won't work unless the user has already created the right context and activated it.
I have 2 monitors and when i watch movies on my main monitor, i want the other one to be off.
I don't want to push the power button because it is hard to find in the dark.
This code turn off both screens
class Mode(Object):
STAND_BY = 1
TURN_ON = -1
TURN_OFF = 2
SC_MONITORPOWER = 0xF170
win32gui.SendMessage(win32con.HWND_BROADCAST, win32con.WM_SYSCOMMAND, SC_MONITORPOWER, Mode.TURN_OFF)
I also tried to get a handle to my other screen that way:
monitors = win32api.EnumDisplayMonitors(None, None)
(hSecondMon, _, (_, _, secondRight, secondButtom)) = monitors[1]
win32gui.SendMessage(hSecondMon, win32con.WM_SYSCOMMAND, SC_MONITORPOWER, Mode.TURN_OFF)
but it didn't work.
However, I was able to change its color to black:
monitors = win32api.EnumDisplayMonitors(None, None)
(_, _, (_, _, primRight, _)) = monitors[0]
(hSecondMon, _, (_, _, secondRight, secondButtom)) = monitors[1]
hDeskDC = win32gui.CreateDC(win32api.GetMonitorInfo(hSecondMon)['Device'], None, None)
brush = win32gui.GetSysColorBrush(win32con.COLOR_BACKGROUND)
win32gui.FillRect(hDeskDC, (0, 0, secondRight - primRight, secondButtom), brush)
but that is not good enough since as soon as my desktop background changes, it recolors the second screen.
I'm trying to display further images (ct-scan) using numpy/vtk as describe in this sample code (http://www.vtk.org/Wiki/VTK/Examples/Python/vtkWithNumpy) but I don't get it and don't know why.
If someone could help me it would be kind.
Here's my code :
import vtk
import numpy as np
import os
import cv, cv2
import matplotlib.pyplot as plt
import PIL
import Image
DEBUG =True
directory="splitted_mri/"
w = 226
h = 186
d = 27
stack = np.zeros((w,d,h))
k=-1 #add the next picture in a differente level of depth/z-positions
for file in os.listdir(directory):
k+=1
img = directory + file
im = Image.open(img)
temp = np.asarray(im, dtype=int)
stack[:,k,:]= temp
print stack.shape
#~ plt.imshow(test)
#~ plt.show()
print type(stack[10,10,15])
res = np.amax(stack)
res1 = np.amin(stack)
print res, type(res)
print res1, type(res1)
#~ for (x,y,z), value in np.ndenumerate(stack):
#~ stack[x,y,z]=np.require(stack[x,y,z],dtype=np.int16)
#~ print type(stack[x,y,z])
stack = np.require(stack,dtype=np.uint16)
print stack.dtype
if DEBUG : print stack.shape
dataImporter = vtk.vtkImageImport()
data_string = stack.tostring()
dataImporter.CopyImportVoidPointer(data_string, len(data_string))
dataImporter.SetDataScalarTypeToUnsignedChar()
dataImporter.SetNumberOfScalarComponents(1)
dataImporter.SetDataExtent(0, w-1, 0, 1, 0, h-1)
dataImporter.SetWholeExtent(0, w-1, 0, 1, 0, h-1)
essai = raw_input()
alphaChannelFunc = vtk.vtkPiecewiseFunction()
colorFunc = vtk.vtkColorTransferFunction()
for i in range (0,255):
alphaChannelFunc.AddPoint(i, 0.9)
colorFunc.AddRGBPoint(i,i,i,i)
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(colorFunc)
#volumeProperty.ShadeOn()
volumeProperty.SetScalarOpacity(alphaChannelFunc)
# This class describes how the volume is rendered (through ray tracing).
compositeFunction = vtk.vtkVolumeRayCastCompositeFunction()
# We can finally create our volume. We also have to specify the data for it, as well as how the data will be rendered.
volumeMapper = vtk.vtkVolumeRayCastMapper()
volumeMapper.SetVolumeRayCastFunction(compositeFunction)
volumeMapper.SetInputConnection(dataImporter.GetOutputPort())
# The class vtkVolume is used to pair the preaviusly declared volume as well as the properties to be used when rendering that volume.
volume = vtk.vtkVolume()
volume.SetMapper(volumeMapper)
volume.SetProperty(volumeProperty)
# With almost everything else ready, its time to initialize the renderer and window, as well as creating a method for exiting the application
renderer = vtk.vtkRenderer()
renderWin = vtk.vtkRenderWindow()
renderWin.AddRenderer(renderer)
renderInteractor = vtk.vtkRenderWindowInteractor()
renderInteractor.SetRenderWindow(renderWin)
# We add the volume to the renderer ...
renderer.AddVolume(volume)
# ... set background color to white ...
renderer.SetBackground(1, 1, 1)
# ... and set window size.
renderWin.SetSize(400, 400)
# A simple function to be called when the user decides to quit the application.
def exitCheck(obj, event):
if obj.GetEventPending() != 0:
obj.SetAbortRender(1)
# Tell the application to use the function as an exit check.
renderWin.AddObserver("AbortCheckEvent", exitCheck)
#to quit, press q
renderInteractor.Initialize()
# Because nothing will be rendered without any input, we order the first render manually before control is handed over to the main-loop.
renderWin.Render()
renderInteractor.Start()
I finally find out what was wrong
here's my new code
import vtk
import numpy as np
import os
import matplotlib.pyplot as plt
import PIL
import Image
DEBUG =False
directory="splitted_mri/"
l = []
k=0 #add the next picture in a differente level of depth/z-positions
for file in os.listdir(directory):
img = directory + file
if DEBUG : print img
l.append(img)
# the os.listdir function do not give the files in the right order
#so we need to sort them
l=sorted(l)
temp = Image.open(l[0])
h, w = temp.size
d = len(l)*5 #with our sample each images will be displayed 5times to get a better view
if DEBUG : print 'width, height, depth : ',w,h,d
stack = np.zeros((w,d,h),dtype=np.uint8)
for i in l:
im = Image.open(i)
temp = np.asarray(im, dtype=int)
for i in range(5):
stack[:,k+i,:]= temp
k+=5
#~ stack[:,k,:]= temp
#~ k+=1
if DEBUG :
res = np.amax(stack)
print 'max value',res
res1 = np.amin(stack)
print 'min value',res1
#convert the stack in the right dtype
stack = np.require(stack,dtype=np.uint8)
if DEBUG :#check if the image have not been modified
test = stack [:,0,:]
plt.imshow(test,cmap='gray')
plt.show()
if DEBUG : print 'stack shape & dtype' ,stack.shape,',',stack.dtype
dataImporter = vtk.vtkImageImport()
data_string = stack.tostring()
dataImporter.CopyImportVoidPointer(data_string, len(data_string))
dataImporter.SetDataScalarTypeToUnsignedChar()
dataImporter.SetNumberOfScalarComponents(1)
#vtk uses an array in the order : height, depth, width which is
#different of numpy (w,h,d)
w, d, h = stack.shape
dataImporter.SetDataExtent(0, h-1, 0, d-1, 0, w-1)
dataImporter.SetWholeExtent(0, h-1, 0, d-1, 0, w-1)
alphaChannelFunc = vtk.vtkPiecewiseFunction()
colorFunc = vtk.vtkColorTransferFunction()
for i in range(256):
alphaChannelFunc.AddPoint(i, 0.2)
colorFunc.AddRGBPoint(i,i/255.0,i/255.0,i/255.0)
# for our test sample, we set the black opacity to 0 (transparent) so as
#to see the sample
alphaChannelFunc.AddPoint(0, 0.0)
colorFunc.AddRGBPoint(0,0,0,0)
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(colorFunc)
#volumeProperty.ShadeOn()
volumeProperty.SetScalarOpacity(alphaChannelFunc)
# This class describes how the volume is rendered (through ray tracing).
compositeFunction = vtk.vtkVolumeRayCastCompositeFunction()
# We can finally create our volume. We also have to specify the data for
# it, as well as how the data will be rendered.
volumeMapper = vtk.vtkVolumeRayCastMapper()
# function to reduce the spacing between each image
volumeMapper.SetMaximumImageSampleDistance(0.01)
volumeMapper.SetVolumeRayCastFunction(compositeFunction)
volumeMapper.SetInputConnection(dataImporter.GetOutputPort())
# The class vtkVolume is used to pair the preaviusly declared volume as
#well as the properties to be used when rendering that volume.
volume = vtk.vtkVolume()
volume.SetMapper(volumeMapper)
volume.SetProperty(volumeProperty)
# With almost everything else ready, its time to initialize the renderer and window,
# as well as creating a method for exiting the application
renderer = vtk.vtkRenderer()
renderWin = vtk.vtkRenderWindow()
renderWin.AddRenderer(renderer)
renderInteractor = vtk.vtkRenderWindowInteractor()
renderInteractor.SetRenderWindow(renderWin)
# We add the volume to the renderer ...
renderer.AddVolume(volume)
# ... set background color to white ...
renderer.SetBackground(1, 1, 1)
# ... and set window size.
renderWin.SetSize(550, 550)
renderWin.SetMultiSamples(4)
# A simple function to be called when the user decides to quit the application.
def exitCheck(obj, event):
if obj.GetEventPending() != 0:
obj.SetAbortRender(1)
# Tell the application to use the function as an exit check.
renderWin.AddObserver("AbortCheckEvent", exitCheck)
#to auit, press q
renderInteractor.Initialize()
# Because nothing will be rendered without any input, we order the first
# render manually before control is handed over to the main-loop.
renderWin.Render()
renderInteractor.Start()
If you are ok with a solution not using VTK, you could use Matplotlib imshow and interactive navigation with keys.
This tutorial shows how:
https://www.datacamp.com/community/tutorials/matplotlib-3d-volumetric-data
https://github.com/jni/mpl-volume-viewer
and here an implementation for viewing RTdose files:
https://github.com/pydicom/contrib-pydicom/pull/19
See also:
https://github.com/napari/napari
I've gotten OpenCV working with Python and I can even detect a face through my webcam. What I really want to do though, is see movement and find the point in the middle of the blob of movement. The camshift sample is close to what I want, but I don't want to have to select which portion of the video to track. Bonus points for being able to predict the next frame.
Here's the code I have currently:
#!/usr/bin/env python
import cv
def is_rect_nonzero(r):
(_,_,w,h) = r
return (w > 0) and (h > 0)
class CamShiftDemo:
def __init__(self):
self.capture = cv.CaptureFromCAM(0)
cv.NamedWindow( "CamShiftDemo", 1 )
self.storage = cv.CreateMemStorage(0)
self.cascade = cv.Load("/usr/local/share/opencv/haarcascades/haarcascade_mcs_upperbody.xml")
self.last_rect = ((0, 0), (0, 0))
def run(self):
hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0,180)], 1 )
backproject_mode = False
i = 0
while True:
i = (i + 1) % 12
frame = cv.QueryFrame( self.capture )
if i == 0:
found = cv.HaarDetectObjects(frame, self.cascade, self.storage, 1.2, 2, 0, (20, 20))
for p in found:
# print p
self.last_rect = (p[0][0], p[0][1]), (p[0][2], p[0][3])
print self.last_rect
cv.Rectangle( frame, self.last_rect[0], self.last_rect[1], cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 )
cv.ShowImage( "CamShiftDemo", frame )
c = cv.WaitKey(7) % 0x100
if c == 27:
break
if __name__=="__main__":
demo = CamShiftDemo()
demo.run()
Found a solution at How do I track motion using OpenCV in Python?