Why the output of the following code is just a box? - python

import vtk
import pickle
from numpy import *
data_matrix = Ilog
dataImporter = vtk.vtkImageImport()
# The preaviusly created array is converted to a string of chars and imported.
data_string = data_matrix.tostring()
dataImporter.CopyImportVoidPointer(data_string, len(data_string))
# The type of the newly imported data is set to unsigned char (uint8)
dataImporter.SetDataScalarTypeToUnsignedChar()
# must be told this is the case.
dataImporter.SetNumberOfScalarComponents(1)
# The following two functions describe how the data is stored and the dimensions of the array it is stored in. For this
# simple case, all axes are of length 75 and begins with the first element. For other data, this is probably not the case.
# I have to admit however, that I honestly don't know the difference between SetDataExtent() and SetWholeExtent() although
# VTK complains if not both are used.
dataImporter.SetDataExtent(0, 727, 0, 727, 0, 24)
dataImporter.SetWholeExtent(0, 727, 0, 727, 0, 24)
# The following class is used to store transparency-values for later retrieval. In our case, we want the value 0 to be
# completely opaque whereas the three different cubes are given different transparency-values to show how it works.
alphaChannelFunc = vtk.vtkPiecewiseFunction()
alphaChannelFunc.AddPoint(0, 1)
alphaChannelFunc.AddPoint(0.5, 0.9)
alphaChannelFunc.AddPoint(0.7, 0.9)
alphaChannelFunc.AddPoint(1, 0)
colorFunc = vtk.vtkColorTransferFunction()
colorFunc.AddRGBPoint(0, 1,1,1)
colorFunc.AddRGBPoint(0.2, 0.9, 0.9, 0.9)
colorFunc.AddRGBPoint(0.7, 0.2, 0.2, 0.2)
colorFunc.AddRGBPoint(1, 0, 0, 0)
# The preavius two classes stored properties. Because we want to apply these properties to the volume we want to render,
# we have to store them in a class that stores volume properties.
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(colorFunc)
volumeProperty.SetScalarOpacity(alphaChannelFunc)
# This class describes how the volume is rendered (through ray tracing).
compositeFunction = vtk.vtkVolumeRayCastCompositeFunction()
# We can finally create our volume. We also have to specify the data for it, as well as how the data will be rendered.
volumeMapper = vtk.vtkVolumeRayCastMapper()
volumeMapper.SetVolumeRayCastFunction(compositeFunction)
volumeMapper.SetInputConnection(dataImporter.GetOutputPort())
# The class vtkVolume is used to pair the previously declared volume as well as the properties to be used when rendering that volume.
volume = vtk.vtkVolume()
volume.SetMapper(volumeMapper)
volume.SetProperty(volumeProperty)
# With almost everything else ready, its time to initialize the renderer and window, as well as creating a method for exiting the application
renderer = vtk.vtkRenderer()
renderWin = vtk.vtkRenderWindow()
renderWin.AddRenderer(renderer)
renderInteractor = vtk.vtkRenderWindowInteractor()
renderInteractor.SetRenderWindow(renderWin)
# We add the volume to the renderer ...
renderer.AddVolume(volume)
# ... set background color to white ...
renderer.SetBackground(0.5,0.5,0.7)
# ... and set window size.
renderWin.SetSize(800,800)
# A simple function to be called when the user decides to quit the application.
def exitCheck(obj, event):
if obj.GetEventPending() != 0:
obj.SetAbortRender(1)
# Tell the application to use the function as an exit check.
renderWin.AddObserver("AbortCheckEvent", exitCheck)
renderInteractor.Initialize()
# Because nothing will be rendered without any input, we order the first render manually before control is handed over to the main-loop.
renderWin.Render()
renderInteractor.Start()
Ilog is logical matrix of size 728x728x25 whose cross-section looks like
In this image the red color signifies the value 1 and the blue color signifies the value 0.
but when the above code is compiled the output is always a box like
.
The matrix contains values just zeros and ones. Using that logic the value with zeros have given full transparency and the values with zero have full opacity.

Related

Why Gmsh create more surfaces than expected when I use "gmhs.model.occ.cut()" command?

I am trying to create a volume in Gmsh (using Python API) by cutting some small cylinders from a bigger one.
When I do that, I expect to have one surface for each cutted region, instead, I get the result in the figure. I have highlighted in red the surfaces that give me the problem (some cutted regions behave as expected), as you can see, instead of one surface I get two, that sometimes aren't even equal.
gmsh creates more surfaces than expected:
So, my questions are:
Why gmsh behaves like that?
How can I fix this as I need predictable behavior?
Below is the code I used to generate the geometry.
The code to work requires some parameters such as core_height, core_inner_radius and core_outer_radius, the number of small cylinders and their radius.
gmsh.initialize(sys.argv)
#gmsh.initialize()
gmsh.clear()
gmsh.model.add("circle_extrusion")
inner_cyl_tag = 1
outer_cyl_tag = 2
inner_cyl = gmsh.model.occ.addCylinder(0,0,0, 0, 0, core_height, core_inner_radius, tag = inner_cyl_tag)
outer_cyl = gmsh.model.occ.addCylinder(0,0,0, 0, 0, core_height, core_outer_radius, tag = outer_cyl_tag)
core_tag = 3
cut1 = gmsh.model.occ.cut([(3,outer_cyl)],[(3,inner_cyl)], tag = core_tag)
#create a set of filled cylinders
#set position
angle_vector = np.linspace(0,2*np.pi,number_of_hp+1)
pos_x = hp_radial_position*np.cos(angle_vector)
pos_y = hp_radial_position*np.sin(angle_vector)
pos_z = 0.0
#cut one cylinder at the time and assign the new core tag
for ii in range(0,len(angle_vector)):
old_core_tag = core_tag
heat_pipe = gmsh.model.occ.addCylinder(pos_x[ii], pos_y[ii], pos_z, 0, 0, core_height,hp_outer_radius, tag =-1)
core_tag = heat_pipe+1
core = gmsh.model.occ.cut([(3,old_core_tag)],[(3,heat_pipe)], tag = core_tag)
gmsh.model.occ.synchronize()
#get volume entities and assign physical groups
volumes = gmsh.model.getEntities(dim=3)
solid_marker = 1
gmsh.model.addPhysicalGroup(volumes[0][0], [volumes[0][1]],solid_marker)
gmsh.model.setPhysicalName(volumes[0][0],solid_marker, "solid_volume")
#get surfaces entities and apply physical groups
surfaces = gmsh.model.getEntities(dim=2)
surface_markers= np.arange(1,len(surfaces)+1,1)
for ii in range(0,len(surfaces)):
gmsh.model.addPhysicalGroup(2,[surfaces[ii][1]],tag = surface_markers[ii])
#We finally generate and save the mesh:
gmsh.model.mesh.generate(3)
gmsh.model.mesh.refine()
gmsh.model.mesh.refine()
gmsh.option.setNumber("Mesh.MshFileVersion", 2.2) #save in ASCII 2 format
gmsh.write(mesh_name+".msh")
# Launch the GUI to see the results:
#if '-nopopup' not in sys.argv:
# gmsh.fltk.run()
gmsh.finalize()
I do not think that you have additional surfaces in the sense of gmsh.model.occ surfaces. To me this looks like your volume mesh is sticking out of your surface mesh, i.e. volume and surface mesh do not fit together.
Here is what I did to check your case:
First I added the following lines at the beginning of our code to get a minimum working example:
import gmsh
import sys
import numpy as np
inner_cyl_tag = 1
outer_cyl_tag = 2
core_height = 1
core_inner_radius = 0.1
core_outer_radius = 0.2
number_of_hp = 5
hp_radial_position = 0.1
hp_outer_radius = 0.05
What I get with this code is the following:
To visualize it like this go to "Tools"-->"Options"-->"Mesh" and check "2D element faces", "3D element edges" and "3D element faces".
You can see that there are some purple triangles sticking out of the green/yellowish surfaces triangles of the inner surfaces.
You could try to visualize your case the same way and check <--> uncheck the "3D element faces" a few times.
So here is the solution for this behaviour, I did not know that gmsh behaves like this myself. It seems that when you create your mesh and refine it the refinement will be applied on the 2D surface mesh and the 3D volume mesh seperately, which means that those two meshes are not connected after the refinement anymore. What I did next was to try what happens if you create the 2D mesh only, refine it, and then create the 3D mesh, i.e.:
replace:
gmsh.model.mesh.generate(3)
gmsh.model.mesh.refine()
gmsh.model.mesh.refine()
by:
gmsh.model.mesh.generate(2)
gmsh.model.mesh.refine()
gmsh.model.mesh.refine()
gmsh.model.mesh.generate(3)
The result then looks like this:
I hope that this was actually your problem. But in future it would be good if you could provide us a minimum working example of code that we can copy-paste and get the same visualization you showed us in your image.

Add a color gradient to Google Earth Engine Feature collection using Python

I am displaying a map of a Feature Collection on a sample Python Flask website I've created (not in the Earth Engine code tool). Let's say for example it's a map of piano stores and their annual revenue. I want a color-coded map of a Google Earth Engine FeatureCollection based on one key (in this case revenue) in the underlying GeoJSON but can't figure out how to do it. Here is what I've tried:
I assign the FeatureCollection:
piano_shops = ee.FeatureCollection('my/feature/collection')
I get the feature collection using getmapID and getTileUrl like this, but all features display as black:
url = ee.data.getTileUrl(piano_shops.getMapId(), 0, 0, 0)
I can change the color like this, but it changes the entire FeatureCollection to that one color:
url = ee.data.getTileUrl(piano_shops.getMapId({'color': 'red'}), 0, 0, 0)
I can filter the FeatureCollection like this:
piano_shops_filtered = piano_shops.filter(ee.Filter.gt('revenue', 100000))
url = ee.data.getTileUrl(piano_shops_filtered.getMapId({'color': 'red'}), 0, 0, 0)
But what I really want is two things:
A color gradient from red to green of 0 revenue to 1,000,000 revenue. It seems like you can do this on an Image using palette and bands but FeatureCollection doesn't have that.
Manually-specified color codes of, say, 'red': 0 to 250000, 'orange': 250000 to 500000, 'yellow': 500000 to 750000, 'green': >750000. It seems like for Images you can assign a list to 'palette' based on categories like this (https://developers.google.com/earth-engine/guides/image_visualization):
# Load 2012 MODIS land cover and select the IGBP classification.
cover = ee.Image('MODIS/051/MCD12Q1/2012_01_01').select('Land_Cover_Type_1')
# Define a palette for the 18 distinct land cover classes.
igbp_palette = [
'aec3d4', # water
'152106',
'225129',
'369b47',
'30eb5b',
'387242', # forest
'6a2325',
'c3aa69',
'b76031',
'd9903d',
'91af40', # shrub, grass
'111149', # wetlands
'cdb33b', # croplands
'cc0013', # urban
'33280d', # crop mosaic
'd7cdcc', # snow and ice
'f7e084', # barren
'6f6f6f' # tundra
]
# Define a map centered on the United States.
map_palette = folium.Map(location=[40.413, -99.229], zoom_start=5)
# Add the image layer to the map and display it. Specify the min and max labels
# and the color palette matching the labels.
map_palette.add_ee_layer(
cover, {'min': 0, 'max': 17, 'palette': igbp_palette}, 'IGBP classes')
display(map_palette)
In the Google Earth Engine code editor it seems like you can filter and add lots of layers but I'm not sure that solution will work for my little web map and I want this web map to be fast even if there happened to be a million piano stores (hypothetical) but it also assumes you're using folium, which I'm not. Anyone find a solution for this?
Found the solution to the gradient part of my own question. The trick is Image().paint() (https://developers.google.com/earth-engine/apidocs/ee-image-paint). It explains here - https://developers.google.com/earth-engine/guides/image_visualization - how to do it:
To color the feature edges with values set from a property of the features, set the color parameter to the name of the property with numeric values
You have to create an empty image first, then paint it based on a numeric property, like "revenue". Here's how to do it specifically:
empty = ee.Image().byte() # just an empty "Image"
piano_shops = ee.FeatureCollection('my/feature/collection')
painted_pianos = empty.paint(piano_shops, 'revenue', 8)
# parameters of paint() are:
# FeatureCollection, property (must be numeric)
# to govern the color gradient/code, width (can
# be a number or can also be set to a numeric property)
palette = ['FF0000', '00FF00', '0000FF']
url = ee.data.getTileUrl(
painted_pianos.getMapId({'palette': palette, 'max':1000000}), 0, 0, 0
)

Calculate new RGB Values with current RGB Values and brightness

My goal is to read out the current RGB values of my device (this part works) and calculating the new RGB values with a brightness value. The brightness value comes via mqtt and is between 0 and 255, RGB values also range from 0 to 255. Currently, I get no usable RGB values, like 100023 and so. \n
My code works with callbacks, each callback is setup for multiple devices. I have one callback with if statement, which checks for the payload and executes each case, like state = 0 or color = [245, 0, 30]
My first idea was calculating a value called fac by mapping the range from 0 to 255 to the range of 0 and 100 and mulitplying each RGb value with the fac, but that gives the unusable output. In the current code ive added a bit math to get usable values, but i have some thought errors in it
Some minimal code:
elif 'brightness' in payload: #brightness changed
print(f"brightness changed: {payload['brightness']}")
color = {}
led_info = sdk.get_led_positions_by_device_index(device_index)
current_color = sdk.get_led_colors_by_device_index(device_index, list(led_info))
current_color2 = tuple(current_color.values())
print(f"nwo current color is: {current_color2}")
current_color3 = current_color2[1]
print(type(current_color3))
print(current_color3)
fac = payload['brightness']
print(fac)
color['r'] = math.floor(current_color3[0] / fac)
color['g'] = math.floor(current_color3[1] / fac)
color['b'] = math.floor(current_color3[2] / fac)
set_all_device_leds(device_index, color)
state_payload["state"] = "ON"
state_payload["brightness"] = payload["brightness"]
state_payload["color"] = color
print(state_payload["color"])
For a view of my full sources (if needed), heres the github link: https://github.com/Tenn0/iCue2sACN/blob/67ab0a52eeb127d98c768228f7015326323a4a1c/src/main.py
(the function is at line 177)

Creating an ASCII art world map

I'd like to render an ASCII art world map given this GeoJSON file.
My basic approach is to load the GeoJSON into Shapely, transform the points using pyproj to Mercator, and then do a hit test on the geometries for each character of my ASCII art grid.
It looks (edit: mostly) OK when centered one the prime meridian:
But centered on New York City (lon_0=-74), and it suddenly goes haywire:
I'm fairly sure I'm doing something wrong with the projections here. (And it would probably be more efficient to transform the ASCII map coordinates to lat/lon than transform the whole geometry, but I am not sure how.)
import functools
import json
import shutil
import sys
import pyproj
import shapely.geometry
import shapely.ops
# Load the map
with open('world-countries.json') as f:
countries = []
for feature in json.load(f)['features']:
# buffer(0) is a trick for fixing polygons with overlapping coordinates
country = shapely.geometry.shape(feature['geometry']).buffer(0)
countries.append(country)
mapgeom = shapely.geometry.MultiPolygon(countries)
# Apply a projection
tform = functools.partial(
pyproj.transform,
pyproj.Proj(proj='longlat'), # input: WGS84
pyproj.Proj(proj='webmerc', lon_0=0), # output: Web Mercator
)
mapgeom = shapely.ops.transform(tform, mapgeom)
# Convert to ASCII art
minx, miny, maxx, maxy = mapgeom.bounds
srcw = maxx - minx
srch = maxy - miny
dstw, dsth = shutil.get_terminal_size((80, 20))
for y in range(dsth):
for x in range(dstw):
pt = shapely.geometry.Point(
(srcw*x/dstw) + minx,
(srch*(dsth-y-1)/dsth) + miny # flip vertically
)
if any(country.contains(pt) for country in mapgeom):
sys.stdout.write('*')
else:
sys.stdout.write(' ')
sys.stdout.write('\n')
I made edit at the bottom, discovering new problem (why there is no Canada and unreliability of Shapely and Pyproj)
Even though its not exactly solving the problem, I believe this attitude has more potential than using pyproc and Shapely and in future, if you will do more Ascii art, will give you more possibilites and flexibility. Firstly I will write pros and cons.
PS: Initialy I wanted to find problem in your code, but I had problems with running it, because pyproj was returning me some error.
PROS
1) I was able to extract all points (Canada is really missing) and rotate image
2) The processing is very fast and therefore you can create Animated Ascii art.
3) Printing is done all at once without need to loop
CONS (known Issues, solvable)
1) This attitude is definetly not translating geo-coordinates correctly - too plane, it should look more spherical
2) I didnt take time to try to find out solution to filling the borders, so only borders has '*'. Therefore this attitude needs to find algorithm to fill the countries. I think it shouldnt be problem since the JSON file contains countries separated
3) You need 2 extra libs beside numpy - opencv(you can use PIL instead) and Colorama, because my example is animated and I needed to 'clean' terminal by moving cursor to (0,0) instead of using os.system('cls')
4) I made it run only in python 3. In python 2 it works too but I am getting error with sys.stdout.buffer
Change font size on terminal to lowest point so the the printed chars fit in terminal. Smaller the font, better resolution
The animation should look like the map is 'rotating'
I used little bit of your code to extract the data. Steps are in the commentaries
import json
import sys
import numpy as np
import colorama
import sys
import time
import cv2
#understand terminal_size as how many letters in X axis and how many in Y axis. Sorry not good name
if len(sys.argv)>1:
terminal_size = (int(sys.argv[1]),int(sys.argv[2]))
else:
terminal_size=(230,175)
with open('world-countries.json') as f:
countries = []
minimal = 0 # This can be dangerous. Expecting negative values
maximal = 0 # Expecting bigger values than 0
for feature in json.load(f)['features']: # getting data - I pretend here, that geo coordinates are actually indexes of my numpy array
indexes = np.int16(np.array(feature['geometry']['coordinates'][0])*2)
if indexes.min()<minimal:
minimal = indexes.min()
if indexes.max()>maximal:
maximal = indexes.max()
countries.append(indexes)
countries = (np.array(countries)+np.abs(minimal)) # Transform geo-coordinates to image coordinates
correction = np.abs(minimal) # because geo-coordinates has negative values, I need to move it to 0 - xaxis
colorama.init()
def move_cursor(x,y):
print ("\x1b[{};{}H".format(y+1,x+1))
move = 0 # 'rotate' the globe
for i in range(1000):
image = np.zeros(shape=[maximal+correction+1,maximal+correction+1]) #creating clean image
move -=1 # you need to rotate with negative values
# because negative one are by numpy understood. Positive one will end up with error
for i in countries: # VERY STRANGE,because parsing the json, some countries has different JSON structure
if len(i.shape)==2:
image[i[:,1],i[:,0]+move]=255 # indexes that once were geocoordinates now serves to position the countries in the image
if len(i.shape)==3:
image[i[0][:,1],i[0][:,0]+move]=255
cut = np.where(image==255) # Bounding box
if move == -1: # creating here bounding box - removing empty edges - from sides and top and bottom - we need space. This needs to be done only once
max_x,min_x = cut[0].max(),cut[0].min()
max_y,min_y = cut[1].max(),cut[1].min()
new_image = image[min_x:max_x,min_y:max_y] # the bounding box
new_image= new_image[::-1] # reverse, because map is upside down
new_image = cv2.resize(new_image,terminal_size) # resize so it fits inside terminal
ascii = np.chararray(shape = new_image.shape).astype('|S4') #create container for asci image
ascii[:,:]='' #chararray contains some random letters - dunno why... cleaning it
ascii[:,-1]='\n' #because I pring everything all at once, I am creating new lines at the end of the image
new_image[:,-1]=0 # at the end of the image can be country borders which would overwrite '\n' created one step above
ascii[np.where(new_image>0)]='*' # transforming image array to chararray. Better to say, anything that has pixel value higher than 0 will be star in chararray mask
move_cursor(0,0) # 'cleaning' the terminal for new animation
sys.stdout.buffer.write(ascii) # print into terminal
time.sleep(0.025) # FPS
Maybe it would be good to explain what is the main algorithm in the code. I like to use numpy whereever I can. The whole thing is that I pretend that coordinates in the image, or whatever it may be (in your case geo-coordinates) are matrix indexes. I have then 2 Matrixes - Real Image and Charray as Mask. I then take indexes of interesting pixels in Real image and for the same indexes in Charray Mask I assign any letter I want. Thanks to this, the whole algorithm doesnt need a single loop.
About Future posibilities
Imagine you will also have information about terrain(altitude). Let say you somehow create grayscale image of world map where gray shades expresses altitude. Such grayscale image would have shape x,y. You will prepare 3Dmatrix with shape = [x,y,256]. For each layer out of 256 in the 3D matrix, you assign one letter ' ....;;;;### and so on' that will express shade.
When you have this prepared, you can take your grayscale image where any pixel will actually have 3 coordinates: x,y and shade value. So you will have 3 arrays of indexes out of your grascale map image -> x,y,shade. Your new charray will simply be extraction of your 3Dmatrix with layer letters, because:
#Preparation phase
x,y = grayscale.shape
3Dmatrix = np.chararray(shape = [x,y,256])
table = ' ......;;;;;;;###### ...'
for i in range(256):
3Dmatrix[:,:,i] = table[i]
x_indexes = np.arange(x*y)
y_indexes = np.arange(x*y)
chararray_image = np.chararray(shape=[x,y])
# Ready to print
...
shades = grayscale.reshape(x*y)
chararray_image[:,:] = 3Dmatrix[(x_indexes ,y_indexes ,shades)].reshape(x,y)
Because there is no loop in this process and you can print chararray all at once, you can actually print movie into terminal with huge FPS
For example if you have footage of rotating earth, you can make something like this - (250*70 letters), render time 0.03658s
You can ofcourse take it into extreme and make super-resolution in your terminal, but resulting FPS is not that good: 0.23157s, that is approximately 4-5 FPS. Interesting to note is, that this attitude FPS is enourmous, but terminal simply cannot handle printing, so this low FPS is due to limitations of terminal and not of calculation as calculation of this high resolution took 0.00693s, that is 144 FPS.
BIG EDIT - contradicting some of above statements
I accidentaly opened raw json file and find out, there is CANADA and RUSSIA with full correct coordinates. I made mistake to rely on the fact that we both didnt have canada in the result, so I expected my code is ok. Inside JSON, the data has different NOT-UNIFIED structure. Russia and Canada has 'Multipolygon', so you need to iterate over it.
What does it mean? Dont rely on Shapely and pyproj. Obviously they cant extract some countries and if they cant do it reliably, you cant expect them to do anything more complicated.
After modifying the code, everything is allright
CODE: This is how to load the file correctly
...
with open('world-countries.json') as f:
countries = []
minimal = 0
maximal = 0
for feature in json.load(f)['features']: # getting data - I pretend here, that geo coordinates are actually indexes of my numpy array
for k in range((len(feature['geometry']['coordinates']))):
indexes = np.int64(np.array(feature['geometry']['coordinates'][k]))
if indexes.min()<minimal:
minimal = indexes.min()
if indexes.max()>maximal:
maximal = indexes.max()
countries.append(indexes)
...

Change color depending on height in Mayavi iso_surface

Is is possible to change the colour of an iso-surface depending on height of the points (in python / mayavi) ?
I can create an iso-surface visualization with my script, but I don't know how to make the iso_surface change colour with z axis so that it will be let's say black at the bottom and white at the top of the plot.
I need this in order to make sense of the visualization when it is viewed from directly above the graph.
If you know any other way to achieve this, please let me know as well.
I only want to show one iso_surface plot.
I managed to do this by combining some code from examples http://docs.enthought.com/mayavi/mayavi/auto/example_atomic_orbital.html#example-atomic-orbital and http://docs.enthought.com/mayavi/mayavi/auto/example_custom_colormap.html . Basically you must create a surface as in atomic-orbital example and then make it change colour depending on x. You must create an array of values for x. My code is (the relevant part) :
#src.image_data.point_data.add_array(np.indices(list(self.data.shape)[self.nx,self.ny,self.nz])[2].T.ravel())
src.image_data.point_data.add_array(np.indices(list(self.data.shape))[0].T.ravel())
src.image_data.point_data.get_array(1).name = 'z'
# Make sure that the dataset is up to date with the different arrays:
src.image_data.point_data.update()
# We select the 'scalar' attribute, ie the norm of Phi
src2 = mlab.pipeline.set_active_attribute(src, point_scalars='scalar')
# Cut isosurfaces of the norm
contour = mlab.pipeline.contour(src2)
# contour.filter.contours=[plotIsoSurfaceContours]
# contour.filter.contours=[plotIsoSurfaceContours[0]]
min_c = min(contour.filter._data_min * 1.05,contour.filter._data_max)
max_c = max(contour.filter._data_max * 0.95,contour.filter._data_min)
plotIsoSurfaceContours = [ max(min(max_c,x),min_c) for x in plotIsoSurfaceContours ]
contour.filter.contours= plotIsoSurfaceContours
# Now we select the 'angle' attribute, ie the phase of Phi
contour2 = mlab.pipeline.set_active_attribute(contour, point_scalars='z')
# And we display the surface. The colormap is the current attribute: the phase.
# mlab.pipeline.surface(contour2, colormap='hsv')
xxx = mlab.pipeline.surface(contour2, colormap='gist_ncar')
colorbar = xxx.module_manager.scalar_lut_manager
colorbar.reverse_lut = True
lut = xxx.module_manager.scalar_lut_manager.lut.table.to_array()
lut[:,-1] = int(plotIsoSurfaceOpacity * 254)
xxx.module_manager.scalar_lut_manager.lut.table = lut
# mlab.colorbar(title='Phase', orientation='vertical', nb_labels=3)
self.data is my data, and for unknown reasons if you want to set opacity of your surface you must reverse the lut first and then set the opacity. Multiplication by 254 instead of 255 is done to avoid a possible bug in mayavi.
I hope this helps someone.

Categories

Resources