I want to use the scipy.optimize.minimize function. The function contains commands from a DLL which require a ctypes array. The goal is to vary the inputs in the ctypes array to optimize a specific output which is also a ctypes array (see code below).
import os
import ctypes
import tkinter as tk
from PIL import ImageTk
from tkinter import filedialog
import numpy as np
from scipy.optimize import minimize
dll = ctypes.cdll.LoadLibrary(library)
LoadModelDef = dll.addModelDef(model)
nrExperiments = 1
nrin = dll.getNumInputs(LoadModelDef)
PDBL2ARR = ctypes.c_double * nrin * nrExperiments
inputs = PDBL2ARR()
inputs_init = PDBL2ARR()
def evaluaterel(library,Model,InputArray):
nrExp = len(InputArray)
DBL2ARR = ctypes.c_double * nrExp
outputs = DBL2ARR()
for i in range(2,13):
Name= outputName(Model,i)
library.evalVBA(Model,InputArray,nrExp,i,outputs)
for i in range(nrExp):
Value = str(outputs[i])
# text = label.cget("text") + '\n' + str(Name)+ ' ' + str(Value)
# label.configure(text=text)
return outputs
data = np.array([line.split()[-1] for line in open("DATA.txt")], dtype=np.float64)
for i in range(nrExperiments):
for j in range(nrin):
inputs_init[i][j]= 0
for i in range(nrExperiments):
for j in range(0,nrin):
inputs[i][j]=data[j]
solution=minimize(evaluaterel(dll,LoadModelDef,inputs),inputs_init,method='SLSQP')
print(solution)
File "c:\app\python27\lib\site-packages\scipy\optimize\optimize.py", line 292, in function_wrapper
return function(*(wrapper_args + args))
TypeError: 'c_double_Array_1' object is not callable
According to [SciPy.Docs]: scipy.optimize.minimize(fun, x0, args=(), method=None, jac=None, hess=None, hessp=None, bounds=None, constraints=(), tol=None, callback=None, options=None), the 1st argument should be a callable (function, in your case). But, you're calling the function yourself when passing it, and therefore you're passing the function return value.
Modify your code (faulty line) to:
solution = minimize(evaluaterel, inputs_init, args=(dll, LoadModelDef, inputs), method="SLSQP")
Related
My code is the following:
from coopr.pyomo import *
import numpy as np
from scipy.optimize import minimize
import math
model = ConcreteModel()
model.days = RangeSet(1, 31) #model.time)
T = model.days
M_b1_O_stored_T = Var(T,bounds=(0, None))
def obj_rule(model):
return sum( M_b1_O_stored_T[i] for i in model.days )
model.funcobj = Objective( rule =obj_rule , sense=maximize)
It shows the following error: ValueError: Error retrieving component IndexedVar[1]: The component has not been constructed.
Do anyone can please help me on this please? The constraints do not show problem, but the objective function is showing...
Welcome to the site...
You neglected to put your variable "into the model" with the model. prefix. Note my fix below in both the declaration and in your objective function.
from pyomo.environ import *
# from coopr.pyomo import *
# import numpy as np
# from scipy.optimize import minimize
# import math
model = ConcreteModel()
model.days = RangeSet(1, 31) #model.time)
# T = model.days
model.M_b1_O_stored_T = Var(model.days,bounds=(0, None))
def obj_rule(model):
return sum( model.M_b1_O_stored_T[i] for i in model.days )
model.funcobj = Objective( rule =obj_rule , sense=maximize)
I am trying to run a NEAT algorithm using this python implementation. This is the original file from the library that is relevant for my question:
from neat.graphs import feed_forward_layers
class FeedForwardNetwork(object):
def __init__(self, inputs, outputs, node_evals):
self.input_nodes = inputs
self.output_nodes = outputs
self.node_evals = node_evals
self.values = dict((key, 0.0) for key in inputs + outputs)
def activate(self, inputs):
if len(self.input_nodes) != len(inputs):
raise RuntimeError("Expected {0:n} inputs, got {1:n}".format(len(self.input_nodes), len(inputs)))
for k, v in zip(self.input_nodes, inputs):
self.values[k] = v
for node, act_func, agg_func, bias, response, links in self.node_evals:
node_inputs = []
for i, w in links:
node_inputs.append(self.values[i] * w)
s = agg_func(node_inputs)
self.values[node] = act_func(bias + response * s)
return [self.values[i] for i in self.output_nodes]
#staticmethod
def create(genome, config):
""" Receives a genome and returns its phenotype (a FeedForwardNetwork). """
# Gather expressed connections.
connections = [cg.key for cg in genome.connections.values() if cg.enabled]
layers = feed_forward_layers(config.genome_config.input_keys, config.genome_config.output_keys, connections)
node_evals = []
for layer in layers:
for node in layer:
inputs = []
node_expr = [] # currently unused
for conn_key in connections:
inode, onode = conn_key
if onode == node:
cg = genome.connections[conn_key]
inputs.append((inode, cg.weight))
node_expr.append("v[{}] * {:.7e}".format(inode, cg.weight))
ng = genome.nodes[node]
aggregation_function = config.genome_config.aggregation_function_defs.get(ng.aggregation)
activation_function = config.genome_config.activation_defs.get(ng.activation)
node_evals.append((node, activation_function, aggregation_function, ng.bias, ng.response, inputs))
return FeedForwardNetwork(config.genome_config.input_keys, config.genome_config.output_keys, node_evals)
Since I evaluate the performance of my neural networks on a large dataset, I wanted to speed up the activate method using numba jit. In order to not fall back into numbas object mode I had to update the implementation of the activate method (and hence also the fields of the FeedForwardNetwork class) using only datatypes supported by numba. This is what I came up with (create is the same as before):
from neat.graphs import feed_forward_layers
from neat.six_util import itervalues
import numba
from numba import jit, njit
from numba.typed import List, Dict
import numpy as np
import math
#jit(nopython=True)
def activate(input_nodes, output_nodes, node_evals_node, node_evals_bias, node_evals_resp, node_evals_ins_nodes, node_evals_ins_conns, values, inputs):
for i in range(input_nodes.size):
values[input_nodes[i]] = inputs[i]
for node in range(len(node_evals_node)):
s = 0
for pred in range(len(node_evals_ins_nodes[node])):
s += values[node_evals_ins_nodes[node][pred]] * node_evals_ins_conns[node][pred]
values[node_evals_node[node]] = math.tanh(node_evals_bias[node] + node_evals_resp[node] * s)
return [values[output_nodes[i]] for i in range(output_nodes.size)]
class FeedForwardNetwork(object):
def __init__(self, inputs, outputs, node_evals):
self.input_nodes = np.array(inputs)
self.output_nodes = np.array(outputs)
# NODE_EVALS decomposition
self.node_evals_node = np.reshape(np.array(node_evals)[:, 0:1], (len(node_evals),)).astype(np.int64)
self.node_evals_bias = np.reshape(np.array(node_evals)[:, 3:4], (len(node_evals),)).astype(np.float64)
self.node_evals_resp = np.reshape(np.array(node_evals)[:, 4:5], (len(node_evals),)).astype(np.float64)
temp = np.array(node_evals)[:, 5:6]
self.node_evals_ins_nodes = List()
self.node_evals_ins_conns = List()
for node in range(temp.size):
l = List()
m = List()
for predecessor in range(len(temp[node])):
l.append(temp[0][node][predecessor][0])
m.append(temp[0][node][predecessor][1])
self.node_evals_ins_nodes.append(l)
self.node_evals_ins_conns.append(m)
self.values = Dict()
# Set types of dict
self.values[0] = float(1)
self.values.pop(0)
This is the code I call the create and activate method in:
def eval_single_genome(genome, config, thread_id, result):
net = neat.nn.FeedForwardNetwork.create(genome, config)
error_sum = 0
for i, row in PRICES.iterrows():
prediction = feed_forward.activate(net.input_nodes, net.output_nodes, net.node_evals_node, net.node_evals_bias, net.node_evals_resp, net.node_evals_ins_nodes, net.node_evals_ins_conns, net.values, np.array([0]))
error_sum += (prediction - PRICES.iloc[i]['open']) ** 2
result[thread_id] = error_sum
The code compiles and runs without errors or warnings which (as far as I've understood) indicates that numba should be able to optimize my implementation. But adding/removing the #jit(nopython=True)decorator doesn't change the runtime at all.
Did I overlook something? Or is there just nothing that numba can improve in my case?
I show you below an example of code using pycuda with "kernel" code included in itself (with SourceModule)
import pycuda
import pycuda.driver as cuda
from pycuda.compiler import SourceModule
import threading
import numpy
class GPUThread(threading.Thread):
def __init__(self, number, some_array):
threading.Thread.__init__(self)
self.number = number
self.some_array = some_array
def run(self):
self.dev = cuda.Device(self.number)
self.ctx = self.dev.make_context()
self.array_gpu = cuda.mem_alloc(some_array.nbytes)
cuda.memcpy_htod(self.array_gpu, some_array)
test_kernel(self.array_gpu)
print "successful exit from thread %d" % self.number
self.ctx.pop()
del self.array_gpu
del self.ctx
def test_kernel(input_array_gpu):
mod = SourceModule("""
__global__ void f(float * out, float * in)
{
int idx = threadIdx.x;
out[idx] = in[idx] + 6;
}
""")
func = mod.get_function("f")
output_array = numpy.zeros((1,512))
output_array_gpu = cuda.mem_alloc(output_array.nbytes)
func(output_array_gpu,
input_array_gpu,
block=(512,1,1))
cuda.memcpy_dtoh(output_array, output_array_gpu)
return output_array
cuda.init()
some_array = numpy.ones((1,512), dtype=numpy.float32)
num = cuda.Device.count()
gpu_thread_list = []
for i in range(num):
gpu_thread = GPUThread(i, some_array)
gpu_thread.start()
gpu_thread_list.append(gpu_thread)
I would like to use the same method but instead of using a "kernel code", I would like to do multiple calls of a function which is external (not a function like "kernel code"), i.e a classical function defined in my main program and which takes in argument different parameters shared by all the main program. Is it possible ?
People who have practiced Matlab may know the function arrayfun where B = arrayfun(func,A) is a vector of results given by applying function funcfor each element of vector A.
Actually, it is a version of what is commonly called the map function: I would like to do the same but with GPU/pycuda version.
Update 1
Sorry, I forgot from the beginning of my post to say what I call an extern and classical function. Here is below an example of function which is used in main section :
def integ(I1):
function_A = aux_fun_LU(way, ecs, I1[0], I1[1])
integrale_A = 0.25*delta_x*delta_y*np.sum(function_A[0:-1, 0:-1] + function_A[1:, 0:-1] + function_A[0:-1, 1:] + function_A[1:, 1:])
def g():
for j in range(6*i, 6*i+6):
for l in range(j, 6*i+6):
yield j, l
## applied integ function to g() generator.
## Here I a using simple map function (no parallelization)
if __name__ == '__main__':
map(integ, g())
Update 2
Maybe a solution would be to call the extern function from a kernel code, benefiting as well of the high GPU power of multiple calls on kernel code. But how to deal with the returned value of this extern function to get it back into main program?
Update 3
Here is below what I have tried:
# Class GPUThread
class GPUThread(threading.Thread):
def __init__(self, number, some_array):
threading.Thread.__init__(self)
self.number = number
self.some_array = some_array
def run(self):
self.dev = cuda.Device(self.number)
self.ctx = self.dev.make_context()
self.array_gpu = cuda.mem_alloc(some_array.nbytes)
cuda.memcpy_htod(self.array_gpu, some_array)
test_kernel(self.array_gpu)
print "successful exit from thread %d" % self.number
self.ctx.pop()
del self.array_gpu
del self.ctx
def test_kernel(input_array_gpu):
mod1 = SourceModule("""
__device__ void integ1(int *I1)
{
function_A = aux_fun_LU(way, ecs, I1[0], I1[1]);
integrale_A = 0.25*delta_x*delta_y*np.sum(function_A[0:-1, 0:-1] + function_A[1:, 0:-1] + function_A[0:-1, 1:] + function_A[1:, 1:]);
}""")
func1 = mod1.get_function("integ1")
# Calling function
func1(input_array_gpu)
# Define couples (i,j) to build Fisher matrix
def g1():
for j in range(6*i, 6*i+6):
for l in range(j, 6*i+6):
yield j, l
# Cuda init
if __name__ == '__main__':
cuda.init()
# Input gTotal lists
some_array1 = np.array(list(g1()))
print 'some_array1 = ', some_array1
# Parameters for cuda
num = cuda.Device.count()
gpu_thread_list = []
for i in range(num):
gpu_thread = GPUThread(i, some_array1)
#gpu_thread = GPUThread(i, eval("some_array"+str(j)))
gpu_thread.start()
gpu_thread_list.append(gpu_thread)
I get the following error at the execution:
`Traceback (most recent call last):
File "/Users/mike/anaconda2/envs/py2cuda/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "Example_GPU.py", line 1232, in run
self.array_gpu = cuda.mem_alloc(some_array.nbytes)
NameError: global name 'some_array' is not defined`
I can't see what's wrong with the variable 'some_array' and the line
self.array_gpu = cuda.mem_alloc(some_array.nbytes)
What can I try next?
I have a custom class in my Python code, that handles k-means clustering. The class takes some arguments to customize the clustering, however when subtracting two values from a list passed to the class, I get the following error:
Traceback (most recent call last):
File "/home/dev/PycharmProjects/KMeans/KMeansApplication.py", line 22, in <module>
application()
File "/home/dev/PycharmProjects/KMeans/KMeansApplication.py", line 16, in application
opt_num_clusters = cluster_calculator.calculate_optimum_clusters()
File "/home/dev/PycharmProjects/KMeans/ClusterCalculator.py", line 19, in calculate_optimum_clusters
self.init_opt_line()
File "/home/dev/PycharmProjects/KMeans/ClusterCalculator.py", line 33, in init_opt_line
self. m = (self.sum_squared_dist[0] - self.sum_squared_dist[1]) / (1 - self.calc_border)
TypeError: unsupported operand type(s) for -: 'KMeans' and 'KMeans'
Here is the code of my custom class:
import KMeansClusterer
from math import sqrt, fabs
from matplotlib import pyplot as plp
class ClusterCalculator:
m = 0
b = 0
sum_squared_dist = []
derivates = []
distances = []
line_coordinates = []
def __init__(self, calc_border, data):
self.calc_border = calc_border
self.data = data
def calculate_optimum_clusters(self):
self.calculate_squared_dist()
self.init_opt_line()
self.calc_distances()
self.calc_line_coordinates()
opt_clusters = self.get_optimum_clusters()
print("Evaluated", opt_clusters, "as optimum number of clusters")
return opt_clusters
def calculate_squared_dist(self):
for k in range(1, self.calc_border):
kmeans = KMeansClusterer.KMeansClusterer(k, self.data)
self.sum_squared_dist.append(kmeans.calc_custom_params(self.data, k))
def init_opt_line(self):
#here the error is thrown
self. m = (self.sum_squared_dist[0] - self.sum_squared_dist[1]) / (1 - self.calc_border)
self.b = (1 * self.sum_squared_dist[0] - self.calc_border*self.sum_squared_dist[0]) / (1 - self.calc_border)
def calc_y_value(self, x_calc):
return self.m * x_calc + self.b
def calc_line_coordinates(self):
for i in range(1, self.calc_border):
self.line_coordinates.append(self.calc_y_value(i))
def calc_distances(self):
for i in range(1, self.calc_border):
self.distances.append(sqrt(fabs(self.calc_y_value(i))))
print("For border", self.calc_border, ", calculated the following distances: \n", self.distances)
def get_optimum_clusters(self):
return self.distances.index((max(self.distances)))
def plot_results(self):
plp.plot(range(1, self.calc_border), self.sum_squared_dist, "bx-")
plp.plot(range(1, self.calc_border), self.line_coordinates, "bx-")
plp.xlabel("Number of clusters")
plp.ylabel("Sum of squared distances")
plp.show()
I append the KMeansClusterer as well, because sum_squared_dist is filled with values of there:
from sklearn.cluster import KMeans
from matplotlib import pyplot as plp
class KMeansClusterer:
def __init__(self, clusters, data):
self.clusters = clusters
self.data = data
def cluster(self):
kmeans = KMeans(n_clusters=self.cluster(), random_state=0).fit(self.data)
print("Clustered", len(kmeans.labels_), "GTINs")
for i, cluster_center in enumerate(kmeans.cluster_centers_):
plp.plot(cluster_center, label="Center {0}".format(i))
plp.legend(loc="best")
plp.show()
def calc_custom_params(self, data_frame, clusters):
kmeans = KMeans(n_clusters=clusters, random_state=0).fit(data_frame)
return kmeans
def cluster_without_plot(self):
return KMeans(n_clusters=self.cluster(), random_state=0).fit(self.data)
I cannot imagine why '-' should be unsupported, I trie to subtract two list values of type integer and 1 and a integer variable.
Python cannot automatically subtract classes. You need to implement the __sub__ method on your class for python to know how to handle subtracting these classes. You can find the full reference here https://docs.python.org/3/library/operator.html
KMeans.fit() returns a class instance, which implies calc_custom_params() returns a class instance, so your list sum_squared_dist does not contain integers, the elements are objects of the sklearn.cluster.KMeans class.
I'm trying to make a python wrapper for AutoIt using ctypes.
Here is my problem:
e.g. The prototype for AU3_WinGetText is:
void AU3_WinGetTitle(LPCWSTR szTitle, LPCWSTR szText, LPWSTR szRetText, int nBufSize);
I'm using flowing code to call the function:
import ctypes
from ctypes.wintypes import *
AUTOIT = ctypes.windll.LoadLibrary("AutoItX3.dll")
def win_get_title(title, text="", buf_size=200):
AUTOIT.AU3_WinGetTitle.argtypes = (LPCWSTR, LPCWSTR, LPWSTR, INT)
AUTOIT.AU3_WinGetTitle.restypes = None
rec_text = LPWSTR()
AUTOIT.AU3_WinGetTitle(LPCWSTR(title), LPCWSTR(text),
ctypes.cast(ctypes.byref(rec_text), LPWSTR),
INT(buf_size))
res = rec_text.value
return res
print win_get_title("[CLASS:Notepad]")
I'm getting an exception after run these codes:
res = rec_text.value
ValueError: invalid string pointer 0x680765E0
szRetText is used to receive the output text buffer
import ctypes
from ctypes.wintypes import *
AUTOIT = ctypes.windll.LoadLibrary("AutoItX3.dll")
def win_get_title(title, text="", buf_size=200):
# AUTOIT.AU3_WinGetTitle.argtypes = (LPCWSTR, LPCWSTR, LPWSTR, INT)
# AUTOIT.AU3_WinGetTitle.restypes = None
rec_text = ctypes.create_unicode_buffer(buf_size)
AUTOIT.AU3_WinGetTitle(LPCWSTR(title), LPCWSTR(text),
rec_text, INT(buf_size))
res = rec_text.value.rstrip()
return res
print win_get_title("[CLASS:Notepad]")