When I am running a pyopengl program, I get an error.
I searhed the web but all it says is that it is a pyopengl version problem, but I am using the latest update.
Traceback (most recent call last):
File "C:/Users/TheUser/Desktop/MyPytonDen/ThinMatrixOpenGl/engineTester/MainGameLoop.py", line 10, in
from ThinMatrixOpenGl.renderEngine.MasterRenderer import MasterRendererClass
File "C:\Users\TheUser\Desktop\MyPytonDen\ThinMatrixOpenGl\renderEngine\MasterRenderer.py", line 10, in
class MasterRendererClass:
File "C:\Users\TheUser\Desktop\MyPytonDen\ThinMatrixOpenGl\renderEngine\MasterRenderer.py", line 11, in MasterRendererClass
shader = StaticShaderClass()
File "C:\Users\TheUser\Desktop\MyPytonDen\ThinMatrixOpenGl\shaders\staticShader.py", line 22, in init
super().init(self.VERTEX_FILE, self.FRAGMENT_FILE)
File "C:\Users\TheUser\Desktop\MyPytonDen\ThinMatrixOpenGl\shaders\shaderProgram.py", line 13, in init
self.Vertex_Shader_Id = Load_Shader(vertex_file, GL_VERTEX_SHADER)
File "C:\Users\TheUser\Desktop\MyPytonDen\ThinMatrixOpenGl\shaders\shaderProgram.py", line 84, in Load_Shader
Shader_Id = glCreateShader(type_of_shader)
File "C:\Users\TheUser\AppData\Local\Programs\Python\Python38-32\lib\site-packages\OpenGL\platform\baseplatform.py", line 423, in call
raise error.NullFunctionError(
OpenGL.error.NullFunctionError: Attempt to call an undefined function glCreateShader, check for bool(glCreateShader) before calling
Process finished with exit code 1
I checked the OpenGL source code. Not that I meddle with it in the first place but its fine.
For some reason, StaticShader refuses to initialize now.
In my program, before doing some change, it was working just fine and it is still working in some other project.
Despite I didn't even get close to shader codes it gave me this.
What exactly is this and how can I handle it.
Btw while this poped up I was trying to update the render algorithm although it did not change much.
class StaticShaderClass(ShaderProgramClass):
VERTEX_FILE = "../shaders/vertexShader.txt"
FRAGMENT_FILE = "../shaders/fragmentShader.txt"
location_transformation_matrix: int
location_projection_matrix: int
location_view_matrix: int
location_light_position: int
location_light_color: int
location_shine_damper: int
location_reflectivity: int
def __init__(self):
super().__init__(self.VERTEX_FILE, self.FRAGMENT_FILE)
def Bind_Attributes(self):
super().Bind_Attribute(0, "position")
super().Bind_Attribute(1, "texture_coord")
super().Bind_Attribute(2, "normal")
def GetAllUniformLocation(self):
self.location_transformation_matrix = super().GetUniformLocation("transformation_matrix")
self.location_projection_matrix = super().GetUniformLocation("projection_matrix")
self.location_view_matrix = super().GetUniformLocation("view_matrix")
self.location_light_position = super().GetUniformLocation("light_position")
self.location_light_color = super().GetUniformLocation("light_color")
self.location_shine_damper = super().GetUniformLocation("shine_damper")
self.location_reflectivity = super().GetUniformLocation("reflectivity")
def Load_Shine_Variables(self, damper, reflectivity):
Load_Float(self.location_shine_damper, damper)
Load_Float(self.location_reflectivity, reflectivity)
def Load_Transformation_Matrix(self, matrix: Matrix44):
super().Load_Matrix(self.location_transformation_matrix, matrix)
def Load_Projection_Matrix(self, projection: Matrix44):
super().Load_Matrix(self.location_projection_matrix, projection)
def Load_view_Matrix(self, camera: CameraClass):
view_matrix = Maths.Create_view_Matrix(camera)
super().Load_Matrix(self.location_view_matrix, view_matrix)
def Load_Light(self, light: Light):
Load_Vector(self.location_light_position, light.position)
Load_Vector(self.location_light_color, light.color)
class ShaderProgramClass(ABC):
Program_Id: int
Vertex_Shader_Id: int
Fragment_Shader_Id: int
def __init__(self, vertex_file: str, fragment_file: str):
self.Vertex_Shader_Id = Load_Shader(vertex_file, GL_VERTEX_SHADER)
self.Fragment_Shader_Id = Load_Shader(fragment_file, GL_FRAGMENT_SHADER)
self.Program_Id = glCreateProgram()
glAttachShader(self.Program_Id, self.Vertex_Shader_Id)
glAttachShader(self.Program_Id, self.Fragment_Shader_Id)
self.Bind_Attributes()
glLinkProgram(self.Program_Id)
# glGetProgramInfoLog(self.Program_Id)
glValidateProgram(self.Program_Id)
self.GetAllUniformLocation()
def Start(self):
glUseProgram(self.Program_Id)
def Clean_up(self):
self.Stop()
glDetachShader(self.Program_Id, self.Vertex_Shader_Id)
glDetachShader(self.Program_Id, self.Fragment_Shader_Id)
glDeleteShader(self.Vertex_Shader_Id)
glDeleteShader(self.Fragment_Shader_Id)
glDeleteProgram(self.Program_Id)
#abstractmethod
def Bind_Attributes(self):
pass
def Bind_Attribute(self, attribute: int, variable_name: str):
glBindAttribLocation(self.Program_Id, attribute, variable_name)
#staticmethod
def Stop():
glUseProgram(0)
#abstractmethod
def GetAllUniformLocation(self):
pass
def GetUniformLocation(self, uniform_name: str):
return glGetUniformLocation(self.Program_Id, uniform_name)
#staticmethod
def Load_Matrix(location, matrix):
matrix = np.array(matrix, dtype=np.float32)
# it may require matrix s data type to change float later
glUniformMatrix4fv(location, 1, False, matrix)
def Load_Float(location: int, value: float):
glUniform1f(location, value)
def Load_Vector(location: int, vector: Vector3):
glUniform3f(location, vector.x, vector.y, vector.z)
def Load_Boolean(location: int, value: bool):
to_load = 0
if value:
to_load = 1
glUniform1f(location, to_load)
def Load_Shader(file: str, type_of_shader: int):
try:
src = ""
with open(file, "r") as f:
text = f.readlines()
for i in text:
src += str(i)
except ():
raise Exception(FileNotFoundError, "file is not exist or could not be readied for some reason")
Shader_Id = glCreateShader(type_of_shader)
print(Shader_Id)
glShaderSource(Shader_Id, src)
glCompileShader(Shader_Id)
if glGetShaderiv(Shader_Id, GL_COMPILE_STATUS) == GL_FALSE:
print(glGetShaderInfoLog(Shader_Id))
print("could not compile shader!")
return Shader_Id
#version 400 core
in vec3 position;
in vec2 texture_coord;
in vec3 normal;
out vec2 pass_texture_coord;
out vec3 surface_normal;
out vec3 to_light_vector;
out vec3 to_camera_vector;
uniform mat4 transformation_matrix;
uniform mat4 projection_matrix;
uniform mat4 view_matrix;
uniform vec3 light_position;
void main(){
vec4 world_position = transformation_matrix * vec4(position, 1.0f);
gl_Position = projection_matrix * view_matrix * world_position;
pass_texture_coord = texture_coord;
surface_normal = (transformation_matrix * vec4(normal,0.0)).xyz;
to_light_vector = light_position - world_position.xyz;
to_camera_vector = (inverse(view_matrix) * vec4(0.0,0.0,0.0,1.0)).xyz - world_position.xyz;
}
Let me quote Python class attributes are evaluated on declaration:
In Python, class attributes are evaluated and put into memory when the class is defined (or imported).
A valid and current OpenGL context is required for each OpenGL instruction, such as for creating the shader program. Therefore, if the shader program is stored in a class attribute and the class is defined or imported before the OpenGL window and context are created, the shader program cannot be generated.
Related
I try to resolve this problem about asynchronous WebSocket using python. I don't know why when I used the asynchronous process I get the error log bellow. But my code is totally fine when I didn't used the asynchronous.
error log
C:\ProgramData\Anaconda3\lib\site-packages\numpy\__init__.py:148: UserWarning: mkl-service package failed to import, therefore Intel(R) MKL initialization ensuring its correct out-of-the box operation under condition when Gnu OpenMP had already been loaded by Python process is not assured. Please install mkl-service package, see http://github.com/IntelPython/mkl-service
from . import _distributor_init
[2022-12-15 12:51:26,677][root][INFO] - Connected to localhost object_detection mysql database
Error executing job with overrides: []
Traceback (most recent call last):
File "e:\alfa_beta\etl-python\object_counting\object_counting.py", line 102, in object_counter
File "e:\alfa_beta\etl-python\object_counting\object_counting.py", line 102, in object_counter
obj_counting.async_runner()
File "e:\alfa_beta\etl-python\object_counting\object_counting.py", line 41, in async_runner
asyncio.run(self.vehicle_async())
File "C:\ProgramData\Anaconda3\lib\asyncio\runners.py", line 44, in run
return loop.run_until_complete(main)
File "C:\ProgramData\Anaconda3\lib\asyncio\base_events.py", line 647, in run_until_complete
return future.result()
File "e:\alfa_beta\etl-python\object_counting\object_counting.py", line 62, in vehicle_async
async with create_connection(self.ws_connection) as async_ws_connection:
AttributeError: __aenter__
Set the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.
hydra config
mode: object_counter
# internal database (mysql)
local_db:
database: object_detection
enabled: true
host: localhost
password: ""
port: 3306
user: root
db_type: mysql
# service camera
service:
vehichle_counting:
table: history_vehicles
ip_cam: 123.23.322.10
ws_connect: ws://123.23.322.24:324/
ws_host: 123.23.322.24
ws_port: 324
ws_prefix:
json_data: 3242
unknown: 42
video_stream: 0222
Main code to run async object_counting.py
from omegaconf import DictConfig
from pyrootutils import setup_root
from types import SimpleNamespace
from websocket import create_connection, enableTrace
import dill as pickle
import asyncio
import hydra
import json
root = setup_root(
search_from=__file__,
indicator=[".git", "pyproject.toml"],
pythonpath=True,
dotenv=True,
)
# internal package
import websocket_data as wsd
from src.config.database_config import Database
from src.infra import time_infra as ABTime
# from object_counting import websocket_data as wsd
# TODO multi processing
class ObjectCounting:
def __init__(self, config: DictConfig, ip_camera: str, ws_connection: str):
self.config = config
self.ip_camera = ip_camera
self.ws_connection = ws_connection # websocket
def async_runner(self):
"""
async_runner will help us to run the asynchronous process in object detection
for now will runn vehicle_async
"""
asyncio.run(self.vehicle_async())
async def vehicle_async(self):
"""
vehicle_async is the asynchronous process for vehicle detection.
It will take data from WebSocket with ANPR type and push the license
plate number with vehicle type of each caught vehicle to MYSQL database
"""
# database
self.local_db = Database(
self.config.local_db.host,
self.config.local_db.port,
self.config.local_db.user,
self.config.local_db.password,
self.config.local_db.database,
self.config.local_db.db_type,
) if self.config.local_db.enabled else None
self.push_table = self.config.service.vehichle_counting_cam_1.table
# enableTrace(True)
async with create_connection(self.ws_connection) as async_ws_connection:
while True:
try:
json_data = json.loads(await async_ws_connection.recv(),
object_hook=lambda d: SimpleNamespace(**d))
if json_data.type == "anpr":
raw_data = wsd.WebsocketData(json_data.type,
json_data.payload,
json_data.img)
payload_ws = raw_data.payload[0]
result_json = payload_ws.resjson
image_ws = raw_data.img
if result_json != '':
print("\ncatch!")
print('\nPayload: {}'.format(payload_ws))
# push_table = "history_vehicle"
response = {"camera_id": 123,
"camera_name": "test_camera",
"plate_number": payload_ws.label,
"vehicle_type": payload_ws.vehicle_type,
"datetime": ABTime.ab_timestamp(),
"image": image_ws,
}
self.local_db.push_data(self.push_table, response, self.local_db.db_type)
# TODO push data to mysql
except Exception as e:
print("error :", e)
if __name__ == "__main__":
#hydra.main(config_path=root / "config", config_name="object_counter", version_base=None)
def object_counter(hydra_config: DictConfig):
hydra_service = hydra_config.service.vehichle_counting_cam_1
ip_camera = hydra_service.ip_cam
ws_connection = hydra_service.ws_connect
obj_counting = ObjectCounting(hydra_config, ip_camera, ws_connection)
obj_counting.async_runner()
object_counter()
Object parsing data to convert WebSocket data to python object
from typing import List
class Box:
xmin: int
ymin: int
xmax: int
ymax: int
def __init__(self, xmin: int, ymin: int, xmax: int, ymax: int) -> None:
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
class Candidate:
score: float
plate: str
def __init__(self, score: float, plate: str) -> None:
self.score = score
self.plate = plate
class Color:
color: str
score: float
def __init__(self, color: str, score: float) -> None:
self.color = color
self.score = score
class ModelMake:
make: str
model: str
score: float
def __init__(self, make: str, model: str, score: float) -> None:
self.make = make
self.model = model
self.score = score
class Orientation:
orientation: str
score: float
def __init__(self, orientation: str, score: float) -> None:
self.orientation = orientation
self.score = score
class Region:
code: str
score: float
def __init__(self, code: str, score: float) -> None:
self.code = code
self.score = score
class Vehicle:
score: float
type: str
box: Box
def __init__(self, score: float, type: str, box: Box) -> None:
self.score = score
self.type = type
self.box = box
class Resjson:
box: Box
plate: str
region: Region
score: float
candidates: List[Candidate]
dscore: float
vehicle: Vehicle
model_make: List[ModelMake]
color: List[Color]
orientation: List[Orientation]
def __init__(self, box: Box, plate: str, region: Region, score: float, candidates: List[Candidate], dscore: float, vehicle: Vehicle, model_make: List[ModelMake], color: List[Color], orientation: List[Orientation]) -> None:
self.box = box
self.plate = plate
self.region = region
self.score = score
self.candidates = candidates
self.dscore = dscore
self.vehicle = vehicle
self.model_make = model_make
self.color = color
self.orientation = orientation
class Payload:
label: str
vehicle_type: str
resjson: Resjson
def __init__(self, label: str, vehicle_type: str, resjson: Resjson) -> None:
self.label = label
self.vehicle_type = vehicle_type
self.resjson = resjson
class WebsocketData:
type: str
payload: List[Payload]
img: str
def __init__(self, type: str, payload: List[Payload], img: str) -> None:
self.type = type
self.payload = payload
self.img = img
I'm trying to implement a neuron model with Hodgkin and Huxley formalism on my RTX 2080 Ti with PyCuda.
The code is quite large so I wont put all of it here.
the first part of my class is to set the number of neurons, create all variables in the GPU and get the block and grid size according on the number of neurons (1 neuron by thread)
class Inter_PC:
def __init__(self, ):
self.NbODEs = 25
self.NbCells = int(1024 * 1)
self.init_vector()
self.init_vector_param()
self.Create_GPU_SourceModule()
BLOCK_SIZE = 1024
self.grid = (int(self.NbCells / BLOCK_SIZE), 1)
self.block = (BLOCK_SIZE, 1, 1)
In the function init_vector and init_vector_param, I put vectors to compute ODE results in the GPU
def init_vector(self):
self.Vs_PC_dydx1 = self.put_vect_on_GPU(np.zeros((self.NbCells), dtype=np.float32))
self.Vs_PC_dydx2 = self.put_vect_on_GPU(np.zeros((self.NbCells), dtype=np.float32))
self.Vs_PC_dydx3 = self.put_vect_on_GPU(np.zeros((self.NbCells), dtype=np.float32))
self.Vs_PC_dydx4 = self.put_vect_on_GPU(np.zeros((self.NbCells), dtype=np.float32))
self.Vs_PC_y = self.put_vect_on_GPU(np.zeros((self.NbCells), dtype=np.float32))
self.Vs_PC_yt = self.put_vect_on_GPU(np.zeros((self.NbCells), dtype=np.float32))
...
def init_vector_param(self):
self.E_leak = self.put_vect_on_GPU(np.ones((self.NbCells), dtype=np.float32) * -65)
self.E_Na = self.put_vect_on_GPU(np.ones((self.NbCells), dtype=np.float32) * 55)
...
def put_vect_on_GPU(self, Variable):
Variable_gpu = cuda.mem_alloc(Variable.nbytes)
cuda.memcpy_htod(Variable_gpu, Variable)
return Variable_gpu
In the function Create_GPU_SourceModule, I create kernels to use on the GPU.
def Create_GPU_SourceModule(self):
self.mod = SourceModule("""
#include <math.h>
__global__ void m_inf_PC(float *V_PC, float *res)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
res[idx] = 1.0 / ( 1. * exp(-(V_PC[idx] + 40.) / 3.));
}
__global__ void h_inf_PC(float *V_PC, float *res)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
res[idx] = 1.0 / ( 1. * exp((V_PC[idx] + 45.) / 3.));
}
...
I have the a function to update all my variables in a RK4 solver updateParameters
def setParameters(self):
func = self.mod.get_function("set_vect_val")
func(self.Vs_PC_y, self.E_leak, block=self.block, grid=self.grid)
func = self.mod.get_function("set_vect_val")
func(self.Vd_PC_y, self.E_leak, block=self.block, grid=self.grid)
func = self.mod.get_function("h_inf_PC")
func(self.Vs_PC_y, self.h_s_PC_y, block=self.block, grid=self.grid)
func = self.mod.get_function("m_KDR_inf_PC")
func(self.Vs_PC_y, self.m_KDR_s_PC_y, block=self.block, grid=self.grid)
func = self.mod.get_function("m_m_inf_PC")
func(self.Vs_PC_y, self.m_s_PC_y, block=self.block, grid=self.grid)
func = self.mod.get_function("m_m_inf_PC")
func(self.Vd_PC_y, self.m_d_PC_y, block=self.block, grid=self.grid)
func = self.mod.get_function("h_inf_PC")
func(self.Vd_PC_y, self.h_d_PC_y, block=self.block, grid=self.grid)
When I run the code I get this error:
Traceback (most recent call last):
File "C:/Users/maxime/Desktop/SESAME/PycharmProjects/Modele_Micro3/Class_PyrCell_GPU.py", line 1668, in <module>
Vm = PC.rk4_Time(30000)
File "C:/Users/maxime/Desktop/SESAME/PycharmProjects/Modele_Micro3/Class_PyrCell_GPU.py", line 1637, in rk4_Time
self.updateParameters()
File "C:/Users/maxime/Desktop/SESAME/PycharmProjects/Modele_Micro3/Class_PyrCell_GPU.py", line 998, in updateParameters
func = self.mod.get_function("h_inf_PC")
File "C:\Python389\lib\site-packages\pycuda\compiler.py", line 326, in get_function
return self.module.get_function(name)
pycuda._driver.LogicError: cuModuleGetFunction failed: an illegal memory access was encountered
PyCUDA WARNING: a clean-up operation failed (dead context maybe?)
cuMemFree failed: an illegal memory access was encountered
What I'm not understanding is that the error does not occur the first time I use the kernel h_inf_PC, it happens on the 13th line of the function setParameters but I already calling the same kernel in line 5 of the same function. If I comment out the calling to the kernel (h_inf_PC) that causes the issue, the error switched on another calling to a kernel but not necessarily the next one.
thank you for your time reading this, this bugs me long time.I'm using Python3.8 and using ctypes to call a DLL. In most of cases ,ctypes works like charm.
One function from C DLL will output a list of names i.e
int get_date(char* names[]) // each name has length 10, with size of 5
in Python I'm successfully call it like
string_buffers = [ctypes.create_string_buffer(10) for i in range(5)]
char_array = (ctypes.c_char_p*5)(*map(ctypes.addressof, string_buffers))
handler.get_date(char_array)
But I am try to factor out a function to create such case :
def create_char_p_array(length:int, size:int):
string_buffers = [create_string_buffer(length) for i in range(size)]
ia = (c_char_p*size)(*map(addressof, string_buffers))
return ia
char_array = create_char_p_array(10,5)
handler.get_date(char_array) ### failed, or ,crashed, or random value ###
So to narrow down the issue, I move out the map(addressof) out, it magically works! :
def create_char_p_array(length:int, size:int):
string_buffers = [create_string_buffer(length) for i in range(size)]
return string_buffers
sbrs = create_char_p_array(10,5)
char_array = (c_char_p*5)(*map(addressof, sbrs ))
handler.get_date(char_array) # Success !
Is it because create_string_buffer created in function scope will be wipe out once the function create_char_p_array return ?
Thank you for your time ,much appreciated.
Here's what I came up with. addressof doesn't create a reference to the object it operates on. cast will.
test.c:
#include <string.h>
__declspec(dllexport) int get_date(char* names[])
{
strcpy_s(names[0],10,"One");
strcpy_s(names[1],10,"Two");
strcpy_s(names[2],10,"Three");
strcpy_s(names[3],10,"Four");
strcpy_s(names[4],10,"Five");
return 5;
}
test.py:
from ctypes import *
dll = CDLL('./x')
dll.get_date.argtypes = POINTER(c_char_p),
dll.get_date.restype = c_int
def create_char_p_array(length:int, size:int):
return (c_char_p*size)(*[cast(create_string_buffer(length),c_char_p) for _ in range(size)])
a = create_char_p_array(10,5)
dll.get_date(a)
for s in a:
print(s)
Output:
b'One'
b'Two'
b'Three'
b'Four'
b'Five'
I show you below an example of code using pycuda with "kernel" code included in itself (with SourceModule)
import pycuda
import pycuda.driver as cuda
from pycuda.compiler import SourceModule
import threading
import numpy
class GPUThread(threading.Thread):
def __init__(self, number, some_array):
threading.Thread.__init__(self)
self.number = number
self.some_array = some_array
def run(self):
self.dev = cuda.Device(self.number)
self.ctx = self.dev.make_context()
self.array_gpu = cuda.mem_alloc(some_array.nbytes)
cuda.memcpy_htod(self.array_gpu, some_array)
test_kernel(self.array_gpu)
print "successful exit from thread %d" % self.number
self.ctx.pop()
del self.array_gpu
del self.ctx
def test_kernel(input_array_gpu):
mod = SourceModule("""
__global__ void f(float * out, float * in)
{
int idx = threadIdx.x;
out[idx] = in[idx] + 6;
}
""")
func = mod.get_function("f")
output_array = numpy.zeros((1,512))
output_array_gpu = cuda.mem_alloc(output_array.nbytes)
func(output_array_gpu,
input_array_gpu,
block=(512,1,1))
cuda.memcpy_dtoh(output_array, output_array_gpu)
return output_array
cuda.init()
some_array = numpy.ones((1,512), dtype=numpy.float32)
num = cuda.Device.count()
gpu_thread_list = []
for i in range(num):
gpu_thread = GPUThread(i, some_array)
gpu_thread.start()
gpu_thread_list.append(gpu_thread)
I would like to use the same method but instead of using a "kernel code", I would like to do multiple calls of a function which is external (not a function like "kernel code"), i.e a classical function defined in my main program and which takes in argument different parameters shared by all the main program. Is it possible ?
People who have practiced Matlab may know the function arrayfun where B = arrayfun(func,A) is a vector of results given by applying function funcfor each element of vector A.
Actually, it is a version of what is commonly called the map function: I would like to do the same but with GPU/pycuda version.
Update 1
Sorry, I forgot from the beginning of my post to say what I call an extern and classical function. Here is below an example of function which is used in main section :
def integ(I1):
function_A = aux_fun_LU(way, ecs, I1[0], I1[1])
integrale_A = 0.25*delta_x*delta_y*np.sum(function_A[0:-1, 0:-1] + function_A[1:, 0:-1] + function_A[0:-1, 1:] + function_A[1:, 1:])
def g():
for j in range(6*i, 6*i+6):
for l in range(j, 6*i+6):
yield j, l
## applied integ function to g() generator.
## Here I a using simple map function (no parallelization)
if __name__ == '__main__':
map(integ, g())
Update 2
Maybe a solution would be to call the extern function from a kernel code, benefiting as well of the high GPU power of multiple calls on kernel code. But how to deal with the returned value of this extern function to get it back into main program?
Update 3
Here is below what I have tried:
# Class GPUThread
class GPUThread(threading.Thread):
def __init__(self, number, some_array):
threading.Thread.__init__(self)
self.number = number
self.some_array = some_array
def run(self):
self.dev = cuda.Device(self.number)
self.ctx = self.dev.make_context()
self.array_gpu = cuda.mem_alloc(some_array.nbytes)
cuda.memcpy_htod(self.array_gpu, some_array)
test_kernel(self.array_gpu)
print "successful exit from thread %d" % self.number
self.ctx.pop()
del self.array_gpu
del self.ctx
def test_kernel(input_array_gpu):
mod1 = SourceModule("""
__device__ void integ1(int *I1)
{
function_A = aux_fun_LU(way, ecs, I1[0], I1[1]);
integrale_A = 0.25*delta_x*delta_y*np.sum(function_A[0:-1, 0:-1] + function_A[1:, 0:-1] + function_A[0:-1, 1:] + function_A[1:, 1:]);
}""")
func1 = mod1.get_function("integ1")
# Calling function
func1(input_array_gpu)
# Define couples (i,j) to build Fisher matrix
def g1():
for j in range(6*i, 6*i+6):
for l in range(j, 6*i+6):
yield j, l
# Cuda init
if __name__ == '__main__':
cuda.init()
# Input gTotal lists
some_array1 = np.array(list(g1()))
print 'some_array1 = ', some_array1
# Parameters for cuda
num = cuda.Device.count()
gpu_thread_list = []
for i in range(num):
gpu_thread = GPUThread(i, some_array1)
#gpu_thread = GPUThread(i, eval("some_array"+str(j)))
gpu_thread.start()
gpu_thread_list.append(gpu_thread)
I get the following error at the execution:
`Traceback (most recent call last):
File "/Users/mike/anaconda2/envs/py2cuda/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "Example_GPU.py", line 1232, in run
self.array_gpu = cuda.mem_alloc(some_array.nbytes)
NameError: global name 'some_array' is not defined`
I can't see what's wrong with the variable 'some_array' and the line
self.array_gpu = cuda.mem_alloc(some_array.nbytes)
What can I try next?
I'm trying to make a python wrapper for AutoIt using ctypes.
Here is my problem:
e.g. The prototype for AU3_WinGetText is:
void AU3_WinGetTitle(LPCWSTR szTitle, LPCWSTR szText, LPWSTR szRetText, int nBufSize);
I'm using flowing code to call the function:
import ctypes
from ctypes.wintypes import *
AUTOIT = ctypes.windll.LoadLibrary("AutoItX3.dll")
def win_get_title(title, text="", buf_size=200):
AUTOIT.AU3_WinGetTitle.argtypes = (LPCWSTR, LPCWSTR, LPWSTR, INT)
AUTOIT.AU3_WinGetTitle.restypes = None
rec_text = LPWSTR()
AUTOIT.AU3_WinGetTitle(LPCWSTR(title), LPCWSTR(text),
ctypes.cast(ctypes.byref(rec_text), LPWSTR),
INT(buf_size))
res = rec_text.value
return res
print win_get_title("[CLASS:Notepad]")
I'm getting an exception after run these codes:
res = rec_text.value
ValueError: invalid string pointer 0x680765E0
szRetText is used to receive the output text buffer
import ctypes
from ctypes.wintypes import *
AUTOIT = ctypes.windll.LoadLibrary("AutoItX3.dll")
def win_get_title(title, text="", buf_size=200):
# AUTOIT.AU3_WinGetTitle.argtypes = (LPCWSTR, LPCWSTR, LPWSTR, INT)
# AUTOIT.AU3_WinGetTitle.restypes = None
rec_text = ctypes.create_unicode_buffer(buf_size)
AUTOIT.AU3_WinGetTitle(LPCWSTR(title), LPCWSTR(text),
rec_text, INT(buf_size))
res = rec_text.value.rstrip()
return res
print win_get_title("[CLASS:Notepad]")