I am trying to make a small application using PyQt5 and PyOpenGL. Everything works fine, however rendering takes way too long with even only one sphere. I tried different routes to try and optimise the speed of the app, and right now I am using a simple QWindow with an OpenGLSurface.
I managed to figure out that it is the context.swapBuffers call that takes a long time to complete and varies between approx. 0.01s (which is fine) and 0.05s (which is way to long), when displaying 1 sphere with some shading and 240 vertices.
Now my questions are the following: Is this normal? If so, is there a way to speed this process up or is this related to how pyqt works, since it is a python wrap around the library? Basically: is there any way for me to continue developing this program without needing to learn c++. It's quite a simple application that just needs to visualise some atomic structure and be able to manipulate it.
Is there another gui toolkit I could maybe use to have less overhead when working with OpenGL from pyopengl?
This is the definition that does the rendering:
def renderNow(self):
if not self.isExposed():
return
self.m_update_pending = False
needsInitialize = False
if self.m_context is None:
self.m_context = QOpenGLContext(self)
self.m_context.setFormat(self.requestedFormat())
self.m_context.create()
needsInitialize = True
self.m_context.makeCurrent(self)
if needsInitialize:
self.m_gl = self.m_context.versionFunctions()
self.m_gl.initializeOpenGLFunctions()
self.initialize()
self.render()
self.m_context.swapBuffers(self)
if self.m_animating:
self.renderLater()
I am using OpenGl directly without using Qt opengl definitions, the format for the surface is given by:
fmt = QSurfaceFormat()
fmt.setVersion(4, 2)
fmt.setProfile(QSurfaceFormat.CoreProfile)
fmt.setSamples(4)
fmt.setSwapInterval(1)
QSurfaceFormat.setDefaultFormat(fmt)
Edit1:
Some more clarification on how my code works:
def render(self):
t1 = time.time()
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
wtvMatrix = self.camera.get_wtv_mat()
transformMatrix = matrices.get_projection_matrix(60, self.width() / self.height(), 0.1, 30, matrix=wtvMatrix)
transformMatrixLocation = glGetUniformLocation(self.shader,"transformMatrix")
glUniformMatrix4fv(transformMatrixLocation,1,GL_FALSE,transformMatrix)
eye_pos_loc = glGetUniformLocation(self.shader, "eye_world_pos0")
glUniform3f(eye_pos_loc, self.camera.position[0], self.camera.position[1], self.camera.position[2])
glDrawElementsInstanced(GL_TRIANGLES,self.num_vertices,GL_UNSIGNED_INT,None,self.num_objects)
print("drawing took:{}".format(time.time()-t1))
self.frame+=1
t1=time.time()
self.m_context.swapBuffers(self)
print('swapping buffers took:{}'.format(time.time()-t1))
This is the only drawElementsInstanced that I call. Shaders are set up as follows (sorry for the mess):
VERTEX_SHADER = compileShader("""#version 410
layout(location = 0) in vec3 vertex_position;
layout(location = 1) in vec3 vertex_colour;
layout(location = 2) in vec3 vertex_normal;
layout(location = 3) in mat4 model_mat;
layout(location = 7) in float mat_specular_intensity;
layout(location = 8) in float mat_specular_power;
uniform mat4 transformMatrix;
uniform vec3 eye_world_pos0;
out vec3 normal0;
out vec3 colour;
out vec3 world_pos;
out float specular_intensity;
out float specular_power;
out vec3 eye_world_pos;
void main () {
colour = vertex_colour;
normal0 = (model_mat*vec4(vertex_normal,0.0)).xyz;
world_pos = (model_mat*vec4(vertex_position,1.0)).xyz;
eye_world_pos = eye_world_pos0;
specular_intensity = mat_specular_intensity;
specular_power = mat_specular_power;
gl_Position = transformMatrix*model_mat*vec4(vertex_position,1.0);
}""", GL_VERTEX_SHADER)
FRAGMENT_SHADER = compileShader("""#version 410
in vec3 colour;
in vec3 normal0;
in vec3 world_pos;
in float specular_intensity;
in float specular_power;
in vec3 eye_world_pos;
out vec4 frag_colour;
struct directional_light {
vec3 colour;
float amb_intensity;
float diff_intensity;
vec3 direction;
};
uniform directional_light gdirectional_light;
void main () {
vec4 ambient_colour = vec4(gdirectional_light.colour * gdirectional_light.amb_intensity,1.0f);
vec3 light_direction = -gdirectional_light.direction;
vec3 normal = normalize(normal0);
float diffuse_factor = dot(normal,light_direction);
vec4 diffuse_colour = vec4(0,0,0,0);
vec4 specular_colour = vec4(0,0,0,0);
if (diffuse_factor>0){
diffuse_colour = vec4(gdirectional_light.colour,1.0f) * gdirectional_light.diff_intensity*diffuse_factor;
vec3 vertex_to_eye = normalize(eye_world_pos-world_pos);
vec3 light_reflect = normalize(reflect(gdirectional_light.direction,normal));
float specular_factor = dot(vertex_to_eye, light_reflect);
if(specular_factor>0) {
specular_factor = pow(specular_factor,specular_power);
specular_colour = vec4(gdirectional_light.colour*specular_intensity*specular_factor,1.0f);
}
}
frag_colour = vec4(colour,1.0)*(ambient_colour+diffuse_colour+specular_colour);
}""", GL_FRAGMENT_SHADER)
Now the code that I use when I want to rotate the scene is the following (the camera updates etc are as normally done afaik):
def mouseMoveEvent(self, event):
dx = event.x() - self.lastPos.x()
dy = event.y() - self.lastPos.y()
self.lastPos = event.pos()
if event.buttons() & QtCore.Qt.RightButton:
self.camera.mouse_update(dx,dy)
elif event.buttons()& QtCore.Qt.LeftButton:
pass
self.renderNow()
Some final info: All vertex info needed in the shaders is given through a vao that I initialized and bound earlier in the initialize definition, does not contain too many objects (I'm just testing and it uses an icosahedron with 2 subdivisions to render a sphere, also, I removed the duplicate vertices but that did not do anything since that really should not be the bottleneck I think).
To answer some questions: I did try with varius different versions of opengl just for gigglez, no changes, tried without vsync, nothing changes, tried with different sample sizes, no changes.
Edit2:
Might be a clue: the swapBuffers takes around 0.015s most of the time, but when I start moving around a lot, it stutters and jumps up to 0.05s for some renders. Why is this happening? From what I understand, every render has to process all the data anyways?
By the way OpenGL works, the rendering commands you submit are sent to the GPU and executed asynchronously (frankly even the process of sending them to the GPU is asynchronous). When you request to display the back buffer by a call to swapBuffers the display driver must wait till the content of the back buffer finishes rendering (i.e. all previously issued commands finish executing), and only then it can swap the buffers.†
If you experience low frame rate then you shall optimize your rendering code, that is the stuff you submit to the GPU. Switching to C++ will not help you here (though it would be a great idea independently).
EDIT: You say that when you do nothing then your swapBuffers executes in 0.015 seconds, which is suspiciously ~1/60th of a second. It implies that your rendering code is efficient enough to render at 60 FPS and you have no reason to optimize it yet. What probably happens is that your call to renderNow() from mouseMoveEvent causes re-rendering the scene more than 60 times per second, which is redundant. Instead you should call renderLater() in mouseMoveEvent, and restructure your code accordingly.
NOTE: you call swapBuffers twice, once in render() and once in renderNow() immediately after.
DISCLAIMER: I'm not familiar with PyOpenGL.
† swapBuffer may also execute asynchronously, but even then if the display driver swaps buffers faster than you can render you will eventually block on the swapBuffer call.
Related
I am writing a 3d graphics toolkit for Python and PyQt5, using PyOpenGL. I am writing my own shaders to go with it, if that helps. What I am trying to do is go from using glBegin to using a Vertex Buffer Array. I have found the following on using VBOs:
http://www.songho.ca/opengl/gl_vbo.html - I could only scrape together a bit of information from this because it is in C/C++.
How to get VBOs to work with Python and PyOpenGL - This was in Python2 and was fairly limiting as a result.
I cannot, however, piece together what I need to take the vertexes of each of my shape objects and compile them into a scene VBO. I also have no idea how the data in an array is laid out. My initGL and paintGL functions are below, as are my vertex and fragment shaders' GLSL code.
def initGL(self):
self.vertProg = open(self.vertPath, 'r')
self.fragProg = open(self.fragPath, 'r')
self.vertCode = self.vertProg.read()
self.fragCode = self.fragProg.read()
self.vertShader = shaders.compileShader(self.vertCode, GL_VERTEX_SHADER)
self.fragShader = shaders.compileShader(self.fragCode, GL_FRAGMENT_SHADER)
self.shader = shaders.compileProgram(self.vertShader, self.fragShader)
#paintGL uses shape objects, such as cube() or mesh(). Shape objects require the following:
#a list named 'vertices' - This list is a list of points, from which edges and faces are drawn.
#a list named 'wires' - This list is a list of tuples which refer to vertices, dictating where to draw wires.
#a list named 'facets' - This list is a list of tuples which refer to vertices, ditating where to draw facets.
#a bool named 'render' - This bool is used to dictate whether or not to draw the shape.
#a bool named 'drawWires' - This bool is used to dictate whether wires should be drawn.
#a bool named 'drawFaces' - This bool is used to dictate whether facets should be drawn.
def paintGL(self):
shaders.glUseProgram(self.shader)
glLoadIdentity()
gluPerspective(45, self.sizeX / self.sizeY, 0.1, 110.0) #set perspective?
glTranslatef(0, 0, self.zoomLevel) #I used -10 instead of -2 in the PyGame version.
glRotatef(self.rotateDegreeV + self.vOffset, 1, 0, 0) #I used 2 instead of 1 in the PyGame version.
glRotatef(self.rotateDegreeH, 0, 0, 1)
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
for s in self.shapes:
if s.drawWires:
glBegin(GL_LINES)
for w in s.wires:
for v in w:
glVertex3fv(s.vertices[v])
glEnd()
if s.drawFaces:
glBegin(GL_QUADS)
for f in s.facets:
for v in f:
glVertex3fv(s.vertices[v])
glEnd()
Vertex shader:
#version 120
void main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
Fragment shader:
#version 120
void main() {
gl_FragColor = vec4( 0, 1, 0, 1 );
}
In the final form of this project, I want to have information in my buffer for vertex positions, color, and maybe even glow. (That will be achieved when I put this to ray marching eventually.) I also need a way to specify whether or not I should draw the wires and faces.
How do I set up and configure one or more VBOs to transfer all this information to the GPU and OpenGL?
Python 3.7.6, Windows 10
After a while longer researching, I decided to try using less specific search terms. I eventually stumbled upon this site: https://www.metamost.com/opengl-with-python/
It is easy to resize entity in a code:
self.transform = Qt3DCore.QTransform()
self.transform.setScale(1.5)
But I want to resize entity dynamically. I want that my entity enlarge when I move camera away from it or shrinks when I approach my camera. Is it possible to do this using proper shaders?
I found this link.
where is a code which I have added added to my vertex shader:
in vec3 vertexPosition;
uniform mat4 modelViewProjection;
void main()
{
float reciprScaleOnscreen = 0.005;
float w = (modelViewProjection * vec4(0.0, 0.0, 0.0, 1.0)).w;
w *= reciprScaleOnscreen;
gl_Position = modelViewProjection * vec4(vertexPosition.xyz * w , 1.0);
}
So there is no need to scale entities in a program. It is simpler to use a shader.
The pyautogui scroll amount value 1 is too small, 2 is to big for a specific task I want to do. Is there a way to scroll inbetween? I tried 1.5, but it didn't work.
I'm on OSX 10.13 and I can certainly scroll with more precision than what pyautogui is doing, when using the trackpad.
This is an issue that has been annoying me, so I took a look at the pyautogui source code and was able to solve the problem. This will probably be quite a long answer; I'll try to explain every step in detail. Note that this only works for Mac. (scroll to the bottom if you want the answer, not the explanation)
First, here is the source code for the scroll function:
def _scroll(clicks, x=None, y=None):
_vscroll(clicks, x, y)
def _vscroll(clicks, x=None, y=None):
_moveTo(x, y)
clicks = int(clicks)
for _ in range(abs(clicks) // 10):
scrollWheelEvent = Quartz.CGEventCreateScrollWheelEvent(
None, # no source
Quartz.kCGScrollEventUnitLine, # units
1, # wheelCount (number of dimensions)
10 if clicks >= 0 else -10) # vertical movement
Quartz.CGEventPost(Quartz.kCGHIDEventTap, scrollWheelEvent)
scrollWheelEvent = Quartz.CGEventCreateScrollWheelEvent(
None, # no source
Quartz.kCGScrollEventUnitLine, # units
1, # wheelCount (number of dimensions)
clicks % 10 if clicks >= 0 else -1 * (-clicks % 10)) # vertical movement
Quartz.CGEventPost(Quartz.kCGHIDEventTap, scrollWheelEvent)
Let's break this down:
1.
def _scroll(clicks, x=None, y=None):
_vscroll(clicks, x, y)
This is just a wrapper for the _vscroll function, simple enough.
2.
The main thing to realise is that pyautogui, for Mac, uses Quartz Core Graphics, all it does is provide a simpler, more readable wrapper for the Quartz code.
With the scroll function, what it is doing is creating a scroll event:
scrollWheelEvent = Quartz.CGEventCreateScrollWheelEvent
And then posting it:
Quartz.CGEventPost(Quartz.kCGHIDEventTap, scrollWheelEvent)
Ignore the details of the posting, we won't be changing any of that.
To me, it seems as if this code repeats itself, and I have no clue why any of the code after the for loop is included. I deleted this from my source code and everything works; If anyone knows why this code is included, please comment below and correct me.
3.
So we are left with the following code (ignoring the mouse moveTo, which has nothing to do with the scrolling itself):
clicks = int(clicks)
for _ in range(abs(clicks) // 10):
scrollWheelEvent = Quartz.CGEventCreateScrollWheelEvent(
None, # no source
Quartz.kCGScrollEventUnitLine, # units
1, # wheelCount (number of dimensions)
10 if clicks >= 0 else -10) # vertical movement
Quartz.CGEventPost(Quartz.kCGHIDEventTap, scrollWheelEvent)
The format of a CGEventCreateScrollWheelEvent is the following:
Quartz.CGEventCreateScrollWheelEvent(source, units, wheelCount, scroll distance)
The source in this case is None, don't worry about that, and we are only dealing with 1 wheel, so wheelCount is 1.
What the source code is doing, therefore, is scrolling a distance of ±10 Quartz.kCGScrollEventUnitLine, which are your computers units for one 'scroll'. It repeats this in a for loop for however many times you specify because the system can bug if too many scroll units are sent at once.
Therefore, the minimum one can scroll on pyautogui is one iteration of this loop, which sends one computer unit. The problem is that these units are too big for fine scrolling.
SOLUTION
We need to change the minimum value we can send. Currently it is 1 Quartz.kCGScrollEventUnitLine, but we can change these to base units by replacing them with a zero. I also see no need to floor divide clicks (in range(abs(clicks) // 10)) and then send 10 scroll units.
We can change these two parts, and remove the unnecessary repetition:
def _scroll(clicks, x=None, y=None):
_vscroll(clicks, x, y)
def _vscroll(clicks, x=None, y=None):
_moveTo(x, y)
clicks = int(clicks)
for _ in range(abs(clicks)): # <------------------------------------
scrollWheelEvent = Quartz.CGEventCreateScrollWheelEvent(
None, # no source
0, # units <------------------------------------------------
1, # wheelCount (number of dimensions)
1 if clicks >= 0 else -1) # vertical movement <--------------
Quartz.CGEventPost(Quartz.kCGHIDEventTap, scrollWheelEvent)
If you don't feel comfortable editing the source code itself, you can use these functions in your code directly, skipping out the need for pyautogui. Just have pyobjc installed (which you'll have anyway if you use pyautogui), remove _moveTo(x, y) and the keyword arguments, and use the following imports:
from Quartz.CoreGraphics import CGEventCreateScrollWheelEvent, CGEventPost, kCGHIDEventTap
I realise this answer is a bit late, but I came looking for answers to this problem and saw your question; When I solved the problem I thought I would share the knowledge.
I really struggled with this one, so I thought I'd post my solution for Windows.
After a quick pip install pywin32, I got access to the necessary win32api & win32con, among others.
NOTE: The last time I checked, pywin32 was only supported for:
Python :: 2.7
Python :: 3.5
Python :: 3.6
Python :: 3.7
import time
import win32api
import win32con
def scroll(clicks=0, delta_x=0, delta_y=0, delay_between_ticks=0):
"""
Source: https://learn.microsoft.com/en-gb/windows/win32/api/winuser/nf-winuser-mouse_event?redirectedfrom=MSDN
void mouse_event(
DWORD dwFlags,
DWORD dx,
DWORD dy,
DWORD dwData,
ULONG_PTR dwExtraInfo
);
If dwFlags contains MOUSEEVENTF_WHEEL,
then dwData specifies the amount of wheel movement.
A positive value indicates that the wheel was rotated forward, away from the user;
A negative value indicates that the wheel was rotated backward, toward the user.
One wheel click is defined as WHEEL_DELTA, which is 120.
:param delay_between_ticks:
:param delta_y:
:param delta_x:
:param clicks:
:return:
"""
if clicks > 0:
increment = win32con.WHEEL_DELTA
else:
increment = win32con.WHEEL_DELTA * -1
for _ in range(abs(clicks)):
win32api.mouse_event(win32con.MOUSEEVENTF_WHEEL, delta_x, delta_y, increment, 0)
time.sleep(delay_between_ticks)
Then, after defining
click_point = x_position, y_position
and then using
pyautogui.moveTo(x=click_point[0], y=click_point[1], duration=0.25)
to make sure that my mouse is in the correct location. I just call the above scroll function:
scroll(-4, 0.1)
to scroll down 4 ticks with a 100ms delay between ticks.
I am trying to time the houghcircle in python and c++ to see if c++ gives edge over processing time (intuitively it should!)
Versions
python: 3.6.4
gcc compiler: gcc (Ubuntu 5.4.0-6ubuntu1~16.04.9) 5.4.0 20160609
cmake : 3.5.1
opencv : 3.4.1
I actually installed opencv using anaconda. Surprisingly c++ version
also worked
The image I am using is given here:
Python code
import cv2
import time
import sys
def hough_transform(src,dp,minDist,param1=100,param2=100,minRadius=0,maxRadius=0):
gray = cv2.cvtColor(src,cv2.COLOR_RGB2GRAY)
start_time = time.time()
circles=cv2.HoughCircles(gray,
cv2.HOUGH_GRADIENT,
dp = dp,
minDist = minDist,
param1=param1,
param2=param2,
minRadius=minRadius,
maxRadius=maxRadius)
end_time = time.time()
print("Time taken for hough circle transform is : {}".format(end_time-start_time))
# if circles is not None:
# circles = circles.reshape(circles.shape[1],circles.shape[2])
# else:
# raise ValueError("ERROR!!!!!! circle not detected try tweaking the parameters or the min and max radius")
#
# a = input("enter 1 to visualize")
# if int(a) == 1 :
# for circle in circles:
# center = (circle[0],circle[1])
# radius = circle[2]
# cv2.circle(src, center, radius, (255,0,0), 5)
#
# cv2.namedWindow("Hough circle",cv2.WINDOW_NORMAL)
# cv2.imshow("Hough circle",src)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
#
#
return
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError("usage: python hough_circle.py <path to image>")
image = cv2.imread(sys.argv[1])
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
hough_transform(image,1.7,100,50,30,690,700)
C++ code
#include <iostream>
#include <opencv2/opencv.hpp>
#include <ctime>
using namespace std;
using namespace cv;
void hough_transform(Mat src, double dp, double minDist, double param1=100, double param2=100, int minRadius=0, int maxRadius=0 )
{
Mat gray;
cvtColor( src, gray, COLOR_RGB2GRAY);
vector<Vec3f> circles;
int start_time = clock();
HoughCircles( gray, circles, HOUGH_GRADIENT, dp, minDist, param1, param2, minRadius, maxRadius);
int end_time = clock();
cout<<"Time taken hough circle transform: "<<(end_time-start_time)/double(CLOCKS_PER_SEC)<<endl;
// cout<<"Enter 1 to visualize the image";
// int vis;
// cin>>vis;
// if (vis == 1)
// {
// for( size_t i = 0; i < circles.size(); i++ )
// {
// Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
// int radius = cvRound(circles[i][2]);
// circle( src, center, radius, Scalar(255,0,0), 5);
// }
// namedWindow( "Hough Circle", WINDOW_NORMAL);
// imshow( "Hough Circle", src);
// waitKey(0);
// destroyAllWindows();
// }
return;
}
int main(int argc, char** argv)
{
if( argc != 2 ){
cout<<"Usage hough_circle <path to image.jpg>";
return -1;
}
Mat image;
image = imread(argv[1]);
cvtColor(image,image,COLOR_BGR2RGB);
hough_transform(image,1.7,100,50,30,690,700);
return 0;
}
I was hoping for C++ hough transform to ace python but what happened was actually opposite.
Python result:
C++ result:
Even though C++ ran the complete program ~2X faster it is very slow in hough transform. Why is it so? This is very counter intuitive. What am I missing here?
I wouldn't expect any difference between the two at all to be honest. The python library more than likely is a wrapper around the C++ library; meaning that once they get into the core of the opencv they will have identical performance if compiled with the same optimisation flags.
The only slight slowdown I'd EXPECT is python getting to that point; and with so little python code actually there; the difference is unlikely to be measureable. The fact that you're seeing it the other way around I don't think proves anything as you're performing a single test; and getting a difference of 0.2s which could trivially be the difference in just the hard disk seeking to the file to process.
I was actually comparing 2 different times. Namely wall and CPU.
In Linux, in C++ clock() gives CPU time and in Windows it gives wall time. So when I changed my python code to time.clock() Both gave same results.
As explained by #UKMonkey, The time to calculate hough in python and C++ did not have any difference at all. But, running the entire program in c++ was almost 2.5 times faster (looped 100 times).Hands down to C++ :P.
We recently encountered strange behavior of VTK (v7) after rendering a transparent object for the second time using depth peeling.
The first render looks nice, rendering transparency as it should. After closing the render window and creating another one (same set-up), vtk shows an empty render and the application/python crashes after closing that window.
Problem Event Name: APPCRASH
Application Name: Test.exe
Application Version: 0.0.0.0
Application Timestamp: 57be97a5
Fault Module Name: nvoglv64.DLL
Fault Module Version: 9.18.13.2762
Fault Module Timestamp: 526ed933
Exception Code: c0000005
Exception Offset: 000000000062e180
I included a small example below, both python (3.5) and C++ seem to behave similarly.
c++:
#include "vtkCylinderSource.h"
#include "vtkPolyDataMapper.h"
#include "vtkActor.h"
#include "vtkRenderer.h"
#include "vtkRenderWindow.h"
#include "vtkRenderWindowInteractor.h"
#include "vtkProperty.h"
#include "vtkCamera.h"
#include "vtkSmartPointer.h"
int main()
{
// This creates a polygonal cylinder model with eight circumferential facets
// (i.e, in practice an octagonal prism).
vtkSmartPointer<vtkCylinderSource> cylinder =
vtkSmartPointer<vtkCylinderSource>::New();
cylinder->SetResolution(8);
// The mapper is responsible for pushing the geometry into the graphics library.
// It may also do color mapping, if scalars or other attributes are defined.
vtkSmartPointer<vtkPolyDataMapper> cylinderMapper =
vtkSmartPointer<vtkPolyDataMapper>::New();
cylinderMapper->SetInputConnection(cylinder->GetOutputPort());
// The actor is a grouping mechanism: besides the geometry (mapper), it
// also has a property, transformation matrix, and/or texture map.
// Here we set its color and rotate it around the X and Y axes.
vtkSmartPointer<vtkActor> cylinderActor =
vtkSmartPointer<vtkActor>::New();
cylinderActor->SetMapper(cylinderMapper);
cylinderActor->GetProperty()->SetColor(1.0000, 0.3882, 0.2784);
cylinderActor->RotateX(30.0);
cylinderActor->RotateY(-45.0);
cylinderActor->GetProperty()->SetOpacity(0.5);
// The renderer generates the image
// which is then displayed on the render window.
// It can be thought of as a scene to which the actor is added
vtkSmartPointer<vtkRenderer> renderer =
vtkSmartPointer<vtkRenderer>::New();
renderer->AddActor(cylinderActor);
renderer->SetBackground(0.1, 0.2, 0.4);
// Zoom in a little by accessing the camera and invoking its "Zoom" method.
renderer->ResetCamera();
renderer->GetActiveCamera()->Zoom(1.5);
// The render window is the actual GUI window
// that appears on the computer screen
vtkSmartPointer<vtkRenderWindow> renderWindow =
vtkSmartPointer<vtkRenderWindow>::New();
renderWindow->SetSize(200, 200);
renderWindow->AddRenderer(renderer);
// 1. Use a render window with alpha bits (as initial value is 0 (false)):
renderWindow->SetAlphaBitPlanes(true);
// 2. Force to not pick a framebuffer with a multisample buffer
// (as initial value is 8):
renderWindow->SetMultiSamples(0);
// 3. Choose to use depth peeling (if supported) (initial value is 0 (false)):
renderer->SetUseDepthPeeling(true);
// 4. Set depth peeling parameters
// - Set the maximum number of rendering passes (initial value is 4):
renderer->SetMaximumNumberOfPeels(100);
// - Set the occlusion ratio (initial value is 0.0, exact image):
renderer->SetOcclusionRatio(0.1);
// The render window interactor captures mouse events
// and will perform appropriate camera or actor manipulation
// depending on the nature of the events.
vtkSmartPointer<vtkRenderWindowInteractor> renderWindowInteractor =
vtkSmartPointer<vtkRenderWindowInteractor>::New();
renderWindowInteractor->SetRenderWindow(renderWindow);
// This starts the event loop and as a side effect causes an initial render.
renderWindowInteractor->Start();
// This creates a polygonal cylinder model with eight circumferential facets
// (i.e, in practice an octagonal prism).
vtkSmartPointer<vtkCylinderSource> cylinder2 =
vtkSmartPointer<vtkCylinderSource>::New();
cylinder2->SetResolution(8);
// The mapper is responsible for pushing the geometry into the graphics library.
// It may also do color mapping, if scalars or other attributes are defined.
vtkSmartPointer<vtkPolyDataMapper> cylinderMapper2 =
vtkSmartPointer<vtkPolyDataMapper>::New();
cylinderMapper2->SetInputConnection(cylinder2->GetOutputPort());
// The actor is a grouping mechanism: besides the geometry (mapper), it
// also has a property, transformation matrix, and/or texture map.
// Here we set its color and rotate it around the X and Y axes.
vtkSmartPointer<vtkActor> cylinderActor2 =
vtkSmartPointer<vtkActor>::New();
cylinderActor2->SetMapper(cylinderMapper2);
cylinderActor2->GetProperty()->SetColor(1.0000, 0.3882, 0.2784);
cylinderActor2->RotateX(30.0);
cylinderActor2->RotateY(-45.0);
cylinderActor2->GetProperty()->SetOpacity(0.5);
// The renderer generates the image
// which is then displayed on the render window.
// It can be thought of as a scene to which the actor is added
vtkSmartPointer<vtkRenderer> renderer2 =
vtkSmartPointer<vtkRenderer>::New();
renderer2->AddActor(cylinderActor);
renderer2->SetBackground(0.1, 0.2, 0.4);
// Zoom in a little by accessing the camera and invoking its "Zoom" method.
renderer2->ResetCamera();
renderer2->GetActiveCamera()->Zoom(1.5);
// The render window is the actual GUI window
// that appears on the computer screen
vtkSmartPointer<vtkRenderWindow> renderWindow2 =
vtkSmartPointer<vtkRenderWindow>::New();
renderWindow2->SetSize(200, 200);
renderWindow2->AddRenderer(renderer2);
// 1. Use a render window with alpha bits (as initial value is 0 (false)):
renderWindow2->SetAlphaBitPlanes(true);
// 2. Force to not pick a framebuffer with a multisample buffer
// (as initial value is 8):
renderWindow2->SetMultiSamples(0);
// 3. Choose to use depth peeling (if supported) (initial value is 0 (false)):
renderer2->SetUseDepthPeeling(true);
// 4. Set depth peeling parameters
// - Set the maximum number of rendering passes (initial value is 4):
renderer2->SetMaximumNumberOfPeels(100);
// - Set the occlusion ratio (initial value is 0.0, exact image):
renderer2->SetOcclusionRatio(0.1);
// The render window interactor captures mouse events
// and will perform appropriate camera or actor manipulation
// depending on the nature of the events.
vtkSmartPointer<vtkRenderWindowInteractor> renderWindowInteractor2 =
vtkSmartPointer<vtkRenderWindowInteractor>::New();
renderWindowInteractor2->SetRenderWindow(renderWindow2);
// This starts the event loop and as a side effect causes an initial render.
renderWindowInteractor2->Start();
return 0;
}
and python:
import vtk
###
# First Render
###
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
ren.SetBackground([1., 1., 1.])
ren.SetUseDepthPeeling(1)
ren.SetOcclusionRatio(0.1)
ren.SetMaximumNumberOfPeels(100)
renWin.SetMultiSamples(0)
renWin.SetAlphaBitPlanes(1)
# create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create source
source = vtk.vtkCylinderSource()
source.SetCenter(0, 0, 0)
source.SetRadius(5.0)
source.SetHeight(7.0)
source.SetResolution(100)
source.Update()
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(source.GetOutput())
# actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetOpacity(0.5)
# assign actor to the renderer
ren.AddActor(actor)
ren.ResetCamera()
# enable user interface interactor
iren.Initialize()
renWin.Render()
# print(ren)
iren.Start()
# close_window(iren)
# del renWin, ren
###
# Second Render
###
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
ren.SetBackground([1., 1., 1.])
ren.SetUseDepthPeeling(1)
ren.SetOcclusionRatio(0.1)
ren.SetMaximumNumberOfPeels(100)
renWin.SetMultiSamples(0)
renWin.SetAlphaBitPlanes(1)
# create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create source
source = vtk.vtkCylinderSource()
source.SetCenter(0, 0, 0)
source.SetRadius(5.0)
source.SetHeight(7.0)
source.SetResolution(100)
source.Update()
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(source.GetOutput())
# actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetOpacity(0.5)
# assign actor to the renderer
ren.AddActor(actor)
ren.ResetCamera()
# enable user interface interactor
iren.Initialize()
renWin.Render()
iren.Start()