Fitting 2 experimental datasets using scipy - chemical reaction - python

Long time lurking, first time posting.
I am working with a chemical system that is detected only for a certain period of time, so I will have the reaction and the decay of the signal. The equation is given by:
Derivative(GL, t): (-k*GL) - GL/a,
Derivative(GM, t): (k*GL) - GM/b,
I have managed to fit my data by using symfit package (image below to give an idea of the system), however since I will need to do Monte Carlo simulation, I need to fit my data using scipy. Chemical reaction and fitting using symfit
I have tried to define the equation in this way:
def f(C, xdata):
GL = ydataScaled
GM = ydataScaled2
dGLdt = -k*GL - GL/a
dGMdt = k*GL - GM/b
return [dGLdt, dGMdt]
However, I am not able to fit neither by using optimize.minimize or odeint. What would be the right approach in this case to fit two dataset in y that share some parameters?
Full code:
import nmrglue as ng
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from scipy import integrate
from scipy.optimize import curve_fit
from scipy.integrate import odeint
from symfit import variables, parameters, Fit, ODEModel, Derivative, D, exp, sin, Model, cos, integrate
# read in the bruker formatted data
dic,data = ng.bruker.read_pdata('/opt/topspin4.1.0/NMR/2021_09_27_Glutamine/90/pdata/1')
#Bruker to NMRPipe data
C = ng.convert.converter()
C.from_bruker(dic, data)
pdic, ppdata = C.to_pipe()
#process the spectrum
ZF_Number = 16384
ppdata = ng.proc_base.di(ppdata) # discard the imaginaries
show = ppdata[2] #show the spectra number X
# determind the ppm scale
udic = ng.bruker.guess_udic(dic, data)
uc = ng.fileiobase.uc_from_udic(udic)
ppm_scale = uc.ppm_scale()
ppms = uc.ppm_scale()
#Plot the spectra
fig1 = plt.figure()
bx = fig1.add_subplot(111)
bx.plot(ppms, show)
plt.xlabel('Chemical Shift (ppm)')
plt.ylabel('Intensity')
First = 0
End = 80
#Integration for every i in the range
Area = []
Area2 = []
Area3 = [] #noise measurement, using the same chemical shift lenght as the product-peak.
#limits = [(176, 180), (180, 183)]
for i in range(First,End):
Area.append(ng.analysis.integration.integrate(ppdata[i], uc, (177.15, 177.80), unit = "ppm", noise_limits = None, norm_to_range = None, calibrate = 1.0))
NP_Area = np.asarray(Area)
for i in range(First, End):
Area2.append(ng.analysis.integration.integrate(ppdata[i], uc, (180.80, 181.10), unit = "ppm", noise_limits = None, norm_to_range = None, calibrate = 1.0))
NP_Area2 = np.asarray(Area2)
for i in range(First, End):
Area3.append(ng.analysis.integration.integrate(ppdata[i], uc, (20.0, 20.3), unit = "ppm", noise_limits = None, norm_to_range = None, calibrate = 1.0))
NP_Area3 = np.asarray(Area3)
#Plot the buildUP
fig2 = plt.figure()
cx = fig2.add_subplot(111)
cx.plot(NP_Area)
cx.plot(NP_Area2)
plt.xlabel('Time (seconds)')
plt.ylabel('Intensity')
#Fitting
d1 = dic['acqus']['D'][1]
xdata = (np.arange(First, End) - First)*d1
ydata = NP_Area[:,0]
ydata2 = NP_Area2[:,0]
ydataScaled = ydata/max(ydata) #normalized to the initial value of the Glu signal to compensate for any variations in the polarization level
ydataScaled2 = ydata2/max(ydata) # same as above
#GL, GM, t = variables('GL, GM, t')
a, b, k = parameters('a, b, k')
# Define the equation considering the enzymatic reaction Gl -> Gm with the HP decay.
def f(C, xdata):
GL = ydataScaled
GM = ydataScaled2
dGLdt = -k*GL - GL/a
dGMdt = k*GL - GM/b
return [dGLdt, dGMdt]
C0 = [1, 0]
popt, pcov = sp.optimize.minimize(f, xdata, args = (ydataScaled, ydataScaled2))```
And the error:
runfile('/Users/karensantos/Desktop/Codes/Stack_question.py', wdir='/Users/karensantos/Desktop/Codes')
2
(512, 32768)
float64
/opt/anaconda3/lib/python3.8/site-packages/nmrglue/fileio/convert.py:68: UserWarning: Incompatible dtypes, conversion not recommended
warn("Incompatible dtypes, conversion not recommended")
Traceback (most recent call last):
File "/Users/karensantos/Desktop/Codes/Stack_question.py", line 112, in <module>
popt, pcov = sp.optimize.minimize(f, xdata, args = (ydataScaled, ydataScaled2))
File "/opt/anaconda3/lib/python3.8/site-packages/scipy/optimize/_minimize.py", line 612, in minimize
return _minimize_bfgs(fun, x0, args, jac, callback, **options)
File "/opt/anaconda3/lib/python3.8/site-packages/scipy/optimize/optimize.py", line 1101, in _minimize_bfgs
sf = _prepare_scalar_function(fun, x0, jac, args=args, epsilon=eps,
File "/opt/anaconda3/lib/python3.8/site-packages/scipy/optimize/optimize.py", line 261, in _prepare_scalar_function
sf = ScalarFunction(fun, x0, args, grad, hess,
File "/opt/anaconda3/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py", line 76, in __init__
self._update_fun()
File "/opt/anaconda3/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py", line 166, in _update_fun
self._update_fun_impl()
File "/opt/anaconda3/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py", line 73, in update_fun
self.f = fun_wrapped(self.x)
File "/opt/anaconda3/lib/python3.8/site-packages/scipy/optimize/_differentiable_functions.py", line 70, in fun_wrapped
return fun(x, *args)
TypeError: f() takes 2 positional arguments but 3 were given

Related

RuntimeError: Given groups=1, weight of size [64, 1, 4, 4], expected input[256, 3, 32, 32] to have 1 channels, but got 3 channels instead

Could you help me fix the above error? If I were to load the mnist dataset, there is no error popping up. The error has to do with the dimension of the other datasets, cifar10, fmnist and so on and cannot be run when applied to these sets. Any help appreciated.
# noinspection PyUnresolvedReferences
import os
# imports
# noinspection PyUnresolvedReferences
import pickle
from time import time
from torchvision import datasets, transforms
from torchvision.utils import save_image
import site
site.addsitedir('/content/gw_gan/model')
from loss import gwnorm_distance, loss_total_variation, loss_procrustes
from model_cnn import Generator, Adversary
from model_cnn import weights_init_generator, weights_init_adversary
# internal imports
from utils import *
# get arguments
args = get_args()
# system preferences
seed = np.random.randint(100)
torch.set_default_dtype(torch.double)
np.random.seed(seed)
torch.manual_seed(seed)
# settings
batch_size = 256
z_dim = 100
lr = 0.0002
ngen = 3
beta = args.beta
lam = 0.5
niter = 10
epsilon = 0.005
num_epochs = args.num_epochs
cuda = args.cuda
channels = args.n_channels
id1 = args.id
model = 'gwgan_{}_eps_{}_tv_{}_procrustes_{}_ngen_{}_channels_{}_{}' \
.format(args.data, epsilon, lam, beta, ngen, channels, id1)
save_fig_path = 'out_' + model
if not os.path.exists(save_fig_path):
os.makedirs(save_fig_path)
# data import
dataloader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data/cifar10', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))])),
batch_size=batch_size, drop_last=True, shuffle=True)
# print example images
save_image(next(iter(dataloader))[0][:25],
os.path.join(save_fig_path, 'real.pdf'), nrow=5, normalize=True)
# define networks and parameters
generator = Generator(output_dim=channels)
adversary = Adversary(input_dim=channels)
# weight initialisation
generator.apply(weights_init_generator)
adversary.apply(weights_init_adversary)
if cuda:
generator = generator.cuda()
adversary = adversary.cuda()
# create optimizer
g_optimizer = torch.optim.Adam(generator.parameters(), lr, betas=(0.5, 0.99))
# zero gradients
generator.zero_grad()
c_optimizer = torch.optim.Adam(adversary.parameters(), lr, betas=(0.5, 0.99))
# zero gradients
adversary.zero_grad()
# sample for plotting
num_test_samples = batch_size
z_ex = torch.randn(num_test_samples, z_dim)
if cuda:
z_ex = z_ex.cuda()
loss_history = list()
loss_tv = list()
loss_orth = list()
loss_og = 0
is_hist = list()
for epoch in range(num_epochs):
t0 = time()
for it, (image, _) in enumerate(dataloader):
train_c = ((it + 1) % (ngen + 1) == 0)
x = image.double()
if cuda:
x = x.cuda()
# sample random number z from Z
z = torch.randn(image.shape[0], z_dim)
if cuda:
z = z.cuda()
if train_c:
for q in generator.parameters():
q.requires_grad = False
for p in adversary.parameters():
p.requires_grad = True
else:
for q in generator.parameters():
q.requires_grad = True
for p in adversary.parameters():
p.requires_grad = False
# result generator
g = generator.forward(z)
# result adversary
f_x = adversary.forward(x)
f_g = adversary.forward(g)
# compute inner distances
D_g = get_inner_distances(f_g, metric='euclidean', concat=False)
D_x = get_inner_distances(f_x, metric='euclidean', concat=False)
# distance matrix normalisation
D_x_norm = normalise_matrices(D_x)
D_g_norm = normalise_matrices(D_g)
# compute normalized gromov-wasserstein distance
loss, T = gwnorm_distance((D_x, D_x_norm), (D_g, D_g_norm),
epsilon, niter, loss_fun='square_loss',
coupling=True, cuda=cuda)
if train_c:
# train adversary
loss_og = loss_procrustes(f_x, x.view(x.shape[0], -1), cuda)
loss_to = -loss + beta * loss_og
loss_to.backward()
# parameter updates
c_optimizer.step()
# zero gradients
reset_grad(generator, adversary)
else:
# train generator
loss_t = loss_total_variation(g)
loss_to = loss + lam * loss_t
loss_to.backward()
# parameter updates
g_optimizer.step()
# zero gradients
reset_grad(generator, adversary)
# plotting
# get generator example
g_ex = generator.forward(z_ex)
g_plot = g_ex.cpu().detach()
# plot result
save_image(g_plot.data[:25],
os.path.join(save_fig_path, 'g_%d.pdf' % epoch),
nrow=5, normalize=True)
fig1, ax = plt.subplots(1, 3, figsize=(15, 5))
ax0 = ax[0].imshow(T.cpu().detach().numpy(), cmap='RdBu_r')
colorbar(ax0)
ax1 = ax[1].imshow(D_x.cpu().detach().numpy(), cmap='Blues')
colorbar(ax1)
ax2 = ax[2].imshow(D_g.cpu().detach().numpy(), cmap='Blues')
colorbar(ax2)
ax[0].set_title(r'$T$')
ax[1].set_title(r'inner distances of $D$')
ax[2].set_title(r'inner distances of $G$')
plt.tight_layout(h_pad=1)
fig1.savefig(os.path.join(save_fig_path, '{}_ccc.pdf'.format(
str(epoch).zfill(3))), bbox_inches='tight')
loss_history.append(loss)
loss_tv.append(loss_t)
loss_orth.append(loss_og)
plt.close('all')
# plot loss history
fig2 = plt.figure(figsize=(2.4, 2))
ax2 = fig2.add_subplot(111)
ax2.plot(loss_history, 'k.')
ax2.set_xlabel('Iterations')
ax2.set_ylabel(r'$\overline{GW}_\epsilon$ Loss')
plt.tight_layout()
plt.grid()
fig2.savefig(save_fig_path + '/loss_history.pdf')
fig3 = plt.figure(figsize=(2.4, 2))
ax3 = fig3.add_subplot(111)
ax3.plot(loss_tv, 'k.')
ax3.set_xlabel('Iterations')
ax3.set_ylabel(r'Total Variation Loss')
plt.tight_layout()
plt.grid()
fig3.savefig(save_fig_path + '/loss_tv.pdf')
fig4 = plt.figure(figsize=(2.4, 2))
ax4 = fig4.add_subplot(111)
ax4.plot(loss_orth, 'k.')
ax4.set_xlabel('Iterations')
ax4.set_ylabel(r'$R_\beta(f_\omega(X), X)$ Loss')
plt.tight_layout()
plt.grid()
fig4.savefig(save_fig_path + '/loss_orth.pdf')
The error displays:
Traceback (most recent call last):
File "/content/gw_gan/main_gwgan_cnn.py", line 160, in <module>
f_x = adversary.forward(x)
File "/content/gw_gan/model/model_cnn.py", line 62, in forward
x = self.conv(input)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/container.py", line 117, in forward
input = module(input)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/conv.py", line 423, in forward
return self._conv_forward(input, self.weight)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/conv.py", line 420, in _conv_forward
self.padding, self.dilation, self.groups)
RuntimeError: Given groups=1, weight of size [64, 1, 4, 4], expected input[256, 3, 32, 32] to have 1 channels, but got 3 channels instead
This is for an application of a generative model, where this is a CNN. The reference this is taken from is from main_gwgan_cnn at https://github.com/bunnech/gw_gan. A GAN is proposed to learn from incomparable spaces and produce results.
You have to set the --n_channels otherwise args.n_chanels will default to 1 as see here. The example given here is for FMNIST which has a single channel.
You are running on CIFAR, you should set it to 3 since there are three channels.

Python data fitting using curve_fit

(This task uses the jupyter notebook system)
This is not
Fit the Higgs mass - given a fitter(xvalues, data, init) function below, write a function fitfunc(...) that describes the combined background and signal model to fit the data. Create two pictures:
(a) plot the data with cross markers ('+' symbol) and the best fit curve as red line on the first plot and
(b) draw the residuals with cross markers on the second plot where residuals are defined as the difference between best fit model and pure background model, see below.
The fit function is composed of a background model with 3 parameters
𝑏(𝑚)=𝐴 * exp(𝑏1(𝑚−105.5)+𝑏2(𝑚−105.5)^2)
The signal is added to the background and its model is
𝑠(𝑚)=𝑅/(𝜎√(2𝜋)) * exp(−(𝑚−𝜇)^2/(2𝜎^2))
The equations are not an issue, it is easy to put them into code, as I have done below:
# YOUR CODE HERE
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def fitfunc(m, mu, sigma, R, A, b1, b2):
tb1 = b1 * (m - 105.5)
tb2 = b2 * ((m-105.5)**2)
b = A * np.exp(tb1 + tb2)
ts1 = R / (sigma * np.sqrt(2 * np.pi))
ts2 = -(((m - mu)**2) / (2 * (sigma**2)))
s = ts1 * np.exp(ts2)
tot = b + s
return tot
#
def fitter(xval, yval, initial):
''' function to fit the given data using a 'fitfunc' TBD.
The curve_fit function is called. Only the best fit values
are returned to be utilized in a main script.
'''
best, _ = curve_fit(fitfunc, xval, yval, p0=initial)
return best
# Use functions with script below for plotting parts (a) and (b)
The fitter method was already provided, so I don't think it is to be changed.
This is my code for plotting the results:
# start value parameter definitions, see equations for s(m) and b(m).
# init[0] = mu
# init[1] = sigma
# init[2] = R
# init[3] = A
# init[4] = b1
# init[5] = b2
init = (125.8, 1.4, 470.0, 5000.0, -0.04, -1.5e-4)
xvalues = np.arange(start=105.5, stop=160.5, step=1)
data = np.array([4780, 4440, 4205, 4150, 3920, 3890, 3590, 3460, 3300, 3200, 3000,
2950, 2830, 2700, 2620, 2610, 2510, 2280, 2330, 2345, 2300, 2190,
2080, 1990, 1840, 1830, 1730, 1680, 1620, 1600, 1540, 1505, 1450,
1410, 1380, 1380, 1250, 1230, 1220, 1110, 1110, 1080, 1055, 1050,
940, 920, 950, 880, 870, 850, 800, 820, 810, 770, 760])
# YOUR CODE HERE
def main():
arr = np.ndarray(init)
fitt = fitfunc(xvalues, init[0], init[1], init[2], init[3], init[4], init[5])
def plota(xval, yval):
fig = plt.figure()
axis1 = fig.add_axes([0.12, 0.1, 0.85, 0.85])
axis1.plot(xval, yval, marker="+", color="red")
axis1.set_title("Combined", size=12)
axis1.set_xlabel("Mass [GeV]", size=12)
plt.show()
return
plota(xvalues, fitt)
plota(xvalues, fitter(xvalues, fitt, arr))
main()
In this second block, my code starts after the "#YOUR CODE HERE", the rest was already provided.
At the end, the first call of plota() is a curve of the data points found and the second call is my attempt at doing a "best fit curve" as asked by (a). The first call plots just fine, but is not what the question is asking for. This gives a type error: "'float' object cannot be interpreted as an integer". I tried rounding these to integers as well, and I get this error instead: "fitfunc() missing 6 required positional arguments: 'mu', 'sigma', 'R', 'A', 'b1', and 'b2'". I think I am on the right lines with the second call, but I don't know what the third parameter of the fitter method is supposed to be. Looking through the notes I have been provided, it says that it is supposed to be some sort of initial guess but I don't know what this would have to be.
As for part (b), I am not sure how I would get the residuals, I think I can just iterate through the "best" array returned from the fitter method and calculate b(m) values separately and subtract, but I am unsure of the wording of the question.
Thank you for any help.
TypeError Traceback (most recent call last)
<ipython-input-2-30fd8d6062a3> in <module>
27 plota(xvalues, fitt)
28 plota(xvalues, fitter(xvalues, fitt, arr))
---> 29 main()
30
<ipython-input-2-30fd8d6062a3> in main()
26 return
27 plota(xvalues, fitt)
---> 28 plota(xvalues, fitter(xvalues, fitt, arr))
29 main()
30
<ipython-input-1-ac8e97799a28> in fitter(xval, yval, initial)
22 are returned to be utilized in a main script.
23 '''
---> 24 best, _ = curve_fit(fitfunc, xval, yval, p0=initial)
25 return best
26
C:\ProgramData\Anaconda3\lib\site-packages\scipy\optimize\minpack.py in curve_fit(f, xdata, ydata, p0, sigma, absolute_sigma, check_finite, bounds, method, jac, **kwargs)
750 # Remove full_output from kwargs, otherwise we're passing it in twice.
751 return_full = kwargs.pop('full_output', False)
--> 752 res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
753 popt, pcov, infodict, errmsg, ier = res
754 cost = np.sum(infodict['fvec'] ** 2)
C:\ProgramData\Anaconda3\lib\site-packages\scipy\optimize\minpack.py in leastsq(func, x0, args, Dfun, full_output, col_deriv, ftol, xtol, gtol, maxfev, epsfcn, factor, diag)
381 if not isinstance(args, tuple):
382 args = (args,)
--> 383 shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
384 m = shape[0]
385
C:\ProgramData\Anaconda3\lib\site-packages\scipy\optimize\minpack.py in _check_func(checker, argname, thefunc, x0, args, numinputs, output_shape)
24 def _check_func(checker, argname, thefunc, x0, args, numinputs,
25 output_shape=None):
---> 26 res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
27 if (output_shape is not None) and (shape(res) != output_shape):
28 if (output_shape[0] != 1):
C:\ProgramData\Anaconda3\lib\site-packages\scipy\optimize\minpack.py in func_wrapped(params)
456 if transform is None:
457 def func_wrapped(params):
--> 458 return func(xdata, *params) - ydata
459 elif transform.ndim == 1:
460 def func_wrapped(params):
TypeError: fitfunc() missing 6 required positional arguments: 'mu', 'sigma', 'R', 'A', 'b1', and 'b2'
I think you're close but for two things:
values for b1 and b2 > 0 can lead to Infinities in the exponents
the return value from curve_fit are the best parameter values, not the best fit. You'll have to calculate that yourself.
You also probably want to fit the data array, right? I think this might be what you're looking for
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def fitfunc(m, mu, sigma, R, A, b1, b2):
"""comment about Higgs mass here"""
tb1 = b1 * (m - 105.5)
tb2 = b2 * ((m-105.5)**2)
b = A * np.exp(tb1 + tb2)
ts1 = R / (sigma * np.sqrt(2 * np.pi))
ts2 = -(((m - mu)**2) / (2 * (sigma**2)))
s = ts1 * np.exp(ts2)
tot = b + s
return tot
xvalues = np.arange(start=105.5, stop=160.5, step=1)
data = np.array([4780, 4440, 4205, 4150, 3920, 3890, 3590, 3460, 3300, 3200, 3000,
2950, 2830, 2700, 2620, 2610, 2510, 2280, 2330, 2345, 2300, 2190,
2080, 1990, 1840, 1830, 1730, 1680, 1620, 1600, 1540, 1505, 1450,
1410, 1380, 1380, 1250, 1230, 1220, 1110, 1110, 1080, 1055, 1050,
940, 920, 950, 880, 870, 850, 800, 820, 810, 770, 760])
# start value parameter definitions, see equations for s(m) and b(m).
# init[0] = mu
# init[1] = sigma
# init[2] = R
# init[3] = A
# init[4] = b1
# init[5] = b2
init = np.array([125.8, 2, 470, 5000., -0.05, -0.001])
init_fit = fitfunc(xvalues, *init)
best, _ = curve_fit(fitfunc, xvalues, data, p0=init)
print(best)
best_fit = fitfunc(xvalues, *best)
plt.plot(xvalues, data, color='red', marker='+', label='data')
plt.plot(xvalues, init_fit, color='black', label='init')
plt.plot(xvalues, best_fit, color='blue', label='fit')
plt.gca().set_title("Combined", size=12)
plt.gca().set_xlabel("Mass [GeV]", size=12)
plt.legend()
plt.show()
If you'll allow, I'd also suggest using lmfit (http://lmfit.github.io/lmfit-py/) (disclosure: I am one of the authors) for this. Using this library, the code above with curve_fit would transform to
import lmfit
h_model = Model(fitfunc)
params = h_model.make_params(mu=125.8, sigma=2, R=470,
A=5000, b1=-0.05, b2=-0.001)
result = h_model.fit(data, params, m=xvalues)
print(result.fit_report())
plt.plot(xvalues, data, color='red', marker='+', label='data')
plt.plot(xvalues, result.init_fit, color='black', label='init')
plt.plot(xvalues, result.best_fit, color='blue', label='fit')
plt.gca().set_title("Combined", size=12)
plt.gca().set_xlabel("Mass [GeV]", size=12)
plt.legend()
plt.show()
Note here that with lmfit, Parameters are named using your function arguments. In lmfit all parameters can have bounds, so you could do something like
params['b1'].max = 0.0
to ensure that b1 stays negative You can also fix any of the parameter values. And there are many other features.
The printed report for this fit would include estimates of uncertainties and correlations as well as fit statistics:
[[Model]]
Model(fitfunc)
[[Fit Statistics]]
# fitting method = leastsq
# function evals = 100
# data points = 55
# variables = 6
chi-square = 106329.424
reduced chi-square = 2169.98824
Akaike info crit = 428.183028
Bayesian info crit = 440.227027
[[Variables]]
mu: 125.940465 +/- 0.34609625 (0.27%) (init = 125.8)
sigma: 1.52638256 +/- 0.37354633 (24.47%) (init = 2)
R: 677.016219 +/- 163.585050 (24.16%) (init = 470)
A: 4660.71073 +/- 24.3437093 (0.52%) (init = 5000)
b1: -0.04279037 +/- 7.7658e-04 (1.81%) (init = -0.05)
b2: 1.7476e-04 +/- 1.7587e-05 (10.06%) (init = -0.001)
[[Correlations]] (unreported correlations are < 0.100)
C(b1, b2) = -0.952
C(A, b1) = -0.775
C(sigma, R) = 0.655
C(A, b2) = 0.650
C(R, b1) = -0.492
C(R, b2) = 0.445
C(sigma, b1) = -0.317
C(sigma, b2) = 0.287
C(R, A) = 0.230
C(sigma, A) = 0.146
and the plot would look something like
I modified your code to run, so your init array has changed for me here.
"""."""
# YOUR CODE HERE
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def fitfunc(m, mu, sigma, R, A, b1, b2):
"""."""
tb1 = b1 * (m - 105.5)
tb2 = b2 * ((m-105.5)**2)
b = A * np.exp(tb1 + tb2)
ts1 = R / (sigma * np.sqrt(2 * np.pi))
ts2 = -(((m - mu)**2) / (2 * (sigma**2)))
s = ts1 * np.exp(ts2)
tot = b + s
return tot
def fitter(xval, yval, initial):
"""
Function to fit the given data using a 'fitfunc' TBD.
The curve_fit function is called. Only the best fit values
are returned to be utilized in a main script.
"""
best, _ = curve_fit(fitfunc, xval, yval, p0=initial)
return best
# Use functions with script below for plotting parts (a) and (b)
# start value parameter definitions, see equations for s(m) and b(m).
# init[0] = mu
# init[1] = sigma
# init[2] = R
# init[3] = A
# init[4] = b1
# init[5] = b2
init = (126, 2, 470, 5000, 1, 5)
xvalues = np.arange(start=105.5, stop=160.5, step=1)
data = np.array([4780, 4440, 4205, 4150, 3920, 3890, 3590, 3460, 3300, 3200, 3000,
2950, 2830, 2700, 2620, 2610, 2510, 2280, 2330, 2345, 2300, 2190,
2080, 1990, 1840, 1830, 1730, 1680, 1620, 1600, 1540, 1505, 1450,
1410, 1380, 1380, 1250, 1230, 1220, 1110, 1110, 1080, 1055, 1050,
940, 920, 950, 880, 870, 850, 800, 820, 810, 770, 760])
def main():
"""."""
arr = np.ndarray(init)
fitt = fitfunc(xvalues, init[0], init[1], init[2], init[3], init[4], init[5])
def plota(xval, yval):
fig = plt.figure()
axis1 = fig.add_axes([0.12, 0.1, 0.85, 0.85])
axis1.plot(xval, yval, marker="+", color="red")
axis1.set_title("Combined", size=12)
axis1.set_xlabel("Mass [GeV]", size=12)
plt.show()
return
plota(xvalues, fitt)
plota(xvalues, fitter(xvalues, fitt, arr))
main()
Note the indentation on main is off by 1 tab/space grouping.

shapes (401,1) and (401,1) not aligned: 1 (dim 1) != 401 (dim 0)

I am implementing the one vs all classifier, however, I got the error "shapes (401,1) and (401,1) not aligned: 1 (dim 1) != 401 (dim 0)",and the traceback is below :
Traceback (most recent call last):
File "<ipython-input-1-682bb50c2435>", line 1, in <module>
runfile('/Users/alvin/Documents/GitDemo/ML_Basic_Imple/Coursera_ML_Python/ex3/Multi_classify_oneVSall.py', wdir='/Users/alvin/Documents/GitDemo/ML_Basic_Imple/Coursera_ML_Python/ex3')
File "/Users/alvin/Documents/tools/anaconda3/lib/python3.6/site-packages/spyder/utils/site/sitecustomize.py", line 710, in runfile
execfile(filename, namespace)
File "/Users/alvin/Documents/tools/anaconda3/lib/python3.6/site-packages/spyder/utils/site/sitecustomize.py", line 101, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "/Users/alvin/Documents/GitDemo/ML_Basic_Imple/Coursera_ML_Python/ex3/Multi_classify_oneVSall.py", line 124, in <module>
trained_theta = training_OnevsAll_theta(X,y,10,0.1)
File "/Users/alvin/Documents/GitDemo/ML_Basic_Imple/Coursera_ML_Python/ex3/Multi_classify_oneVSall.py", line 119, in training_OnevsAll_theta
theta,cost = opt_Cost(initial_theta,X,y,lamada)
File "/Users/alvin/Documents/GitDemo/ML_Basic_Imple/Coursera_ML_Python/ex3/Multi_classify_oneVSall.py", line 96, in opt_Cost
res = optimize.fmin_bfgs(LR_Costfunction, theta, fprime=Gradient, args=(X,y,lamada) )
File "/Users/alvin/Documents/tools/anaconda3/lib/python3.6/site-packages/scipy/optimize/optimize.py", line 859, in fmin_bfgs
res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts)
File "/Users/alvin/Documents/tools/anaconda3/lib/python3.6/site-packages/scipy/optimize/optimize.py", line 934, in _minimize_bfgs
old_fval, old_old_fval, amin=1e-100, amax=1e100)
File "/Users/alvin/Documents/tools/anaconda3/lib/python3.6/site-packages/scipy/optimize/optimize.py", line 765, in _line_search_wolfe12
**kwargs)
File "/Users/alvin/Documents/tools/anaconda3/lib/python3.6/site-packages/scipy/optimize/linesearch.py", line 97, in line_search_wolfe1
derphi0 = np.dot(gfk, pk)
ValueError: shapes (401,1) and (401,1) not aligned: 1 (dim 1) != 401 (dim 0)e
Could you find any problem in my below code?
Thank you for your patient!
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import scipy.io
import scipy.misc
import matplotlib.cm as cm # Used to display images in a specific colormap
import random
from scipy.special import expit
datapath = 'data/ex3data1.mat'
data = scipy.io.loadmat(datapath)
X = data['X']
y = data['y']
print(X.shape)
print(y.shape)
def _display_data():
all_fig = np.zeros((10*20,10*20))
index_of_samples = random.sample(range(X.shape[0]),100)
row, col = 0, 0
for i in index_of_samples:
if col == 10:
row += 1
col = 0
fig = X[i].reshape(20,20).T
all_fig[row * 20:(row+1)*20,col * 20:(col+1)*20] = fig
col += 1
plt.figure(figsize=(8,8))
img = scipy.misc.toimage(all_fig)
plt.imshow(img, cmap = plt.cm.gray_r)
_display_data()
# ============ Part 2a: Vectorize Logistic Regression ============
def hpy_sigmod_fucntion(X_inter,theta_inter):
return expit(np.dot(X_inter,theta_inter))
def LR_Costfunction(theta_inter,X_inter,y,lamada=0.):
m = X_inter.shape[0]
hyp = hpy_sigmod_fucntion(X_inter,theta_inter)
reg = np.dot(theta_inter.T,theta_inter) * (lamada / (2 * m))
J = np.dot(y.T,np.log(hyp))+np.dot((1 - y.T),np.log(1 - hyp))
return J + reg
def Gradient(theta_inter,X_inter,y,lamada=0.):
m = X_inter.shape[0]
hyp = hpy_sigmod_fucntion(X_inter,theta_inter)
hyp = np.asarray(hyp).reshape(hyp.shape[0],1)
h_y = hyp - y # 5000 * 1
reg = theta_inter[1:] * (lamada / m)
reg = np.asarray(reg).reshape(reg.shape[0],1)
grad = (1 / m) * np.dot(X_inter.T,h_y) # 401 * 1
grad[1:] = grad[1:] + reg
return grad # 401 * 1
def opt_Cost(theta,X,y,lamada=0.):
from scipy import optimize
res = optimize.fmin_bfgs(LR_Costfunction, theta, fprime=Gradient, args=(X,y,lamada) )
return result[0], result[1]
This function below maybe catch the problem.
Are there any restrictions when using fmin functions?
def training_OnevsAll_theta(X,y,num_labels,lamada=0.):
m = X.shape[0]
n = X.shape[1]
all_theta = np.zeros((num_labels,n+1))
X = np.hstack((np.ones((m,1)),X))
for c in range(num_labels):
print("Training theta for class %d" %c)
initial_theta = np.zeros((n+1,1))
theta,cost = opt_Cost(initial_theta,X,y,lamada)
all_theta[c] = theta
print("Finished!")
trained_theta = training_OnevsAll_theta(X,y,10,0.1)
Thank you!
Aha , I found the answer on matrices are not aligned Error: Python SciPy fmin_bfgs
Actually, the incorrect input gradient makes the problem occur, so I followed the answer up and add below code before 'return grad'
grad = np.ndarray.flatten(grad)
And It works!

CUDA API error on Python with Numba

I'm kind of new to numba and was trying to speed up my monte carlo method with it. Im currently working on Ubuntu 14.04 with GeForce 950M. The CUDA version is 8.0.61.
When I try to run the following code I get some memory associated error from CUDA API
Code:
#cuda.jit
def SIR(rng_states, y, particles, weight, beta, omega, gamma,
greater, equal, phi, phi_sub):
# thread/block index for accessing data
tx = cuda.threadIdx.x # Thread id in a 1D block = particle index
ty = cuda.blockIdx.x # Block id in a 1D grid = event index
bw = cuda.blockDim.x # Block width, i.e. number of threads per block = particle number
pos = tx + ty * bw # computed flattened index inside the array
# get current event y_t
y_current = y[ ty ]
# get number of time steps
tn = y_current.size
# iterator over timestep
for i in range(1, tn):
# draw samples
sirModule_sample_draw(rng_states, particles[ty][i-1], beta,
omega, particles[ty][i])
# get weight
sirModule_weight(particles[ty][i], particles[ty][i-1], weight[ty][i-1],
weight[ty][i], y_current[i], beta, omega, gamma)
# normalize weight
weight_sum = arr_sum(weight[ty][i])
arr_div(weight[ty][i], weight_sum)
# calculate tau
sirModule_tau(particles[ty][i], beta, omega, phi, phi_sub)
# update greater and equal
greater[ty][i] = greater[ty][i-1]*dot(weight[ty][i-1], phi)
equal[ty][i] = greater[ty][i-1]*dot(weight[ty][i-1], phi_sub)
def main():
beta = 1
omega = 1
gamma = 2
pn = 100
event_number = 50
timestep = 100
y = np.ones((event_number, timestep), dtype = np.int8)
particles = cuda.to_device(np.zeros((event_number, timestep, pn), dtype = np.float32))
weight = cuda.to_device(np.ones((event_number, timestep, pn), dtype = np.float32))
greater = cuda.to_device(np.ones((event_number, timestep), dtype = np.float32))
equal = cuda.to_device(np.ones((event_number, timestep), dtype = np.float32))
phi = cuda.to_device(np.zeros(particles[0][0].size, dtype = np.float32))
phi_sub = cuda.to_device(np.zeros(particles[0][0].size, dtype = np.float32))
rng_states = create_xoroshiro128p_states(pn, seed=1)
start = timer()
SIR[event_number, pn](rng_states, y, particles, weight, beta,
omega, gamma, greater, equal, phi, phi_sub)
vectoradd_time = timer() - start
print("sirModule1 took %f seconds" % vectoradd_time)
if __name__ == '__main__':
main()
Then I get
numba.cuda.cudadrv.driver.CudaAPIError: [715] Call to cuMemcpyDtoH results in UNKNOWN_CUDA_ERROR
numba.cuda.cudadrv.driver.CudaAPIError: [715] Call to cuMemFree results in UNKNOWN_CUDA_ERROR
errors....
Did anybody face the same problem? I checked online and some suggest that the problem arise from WDDM TDR but I thought thats for only Windows, right?
The following is the missing part of the code.
import numpy as np
import numba as nb
from timeit import default_timer as timer
from matplotlib import pyplot as pt
import math
from numba import cuda
from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_normal_float32
"""
Look up table for factorial
"""
LOOKUP_TABLE = cuda.to_device(np.array([
1, 1, 2, 6, 24, 120, 720, 5040, 40320,
362880, 3628800, 39916800, 479001600,
6227020800, 87178291200, 1307674368000,
20922789888000, 355687428096000, 6402373705728000,
121645100408832000, 2432902008176640000], dtype='int64'))
"""
arr_sum - sum element in array
"""
#cuda.jit(device=True)
def arr_sum(arr):
result = 0
for i in range(arr.size):
result = result + arr[i]
return result
"""
dot - dot product of arr1 and arr2
"""
#cuda.jit(device=True)
def dot(arr1, arr2):
result = 0
for i in range(arr1.size):
result = arr1[i]*arr2[i] + result
return result
"""
arr_div - divide element in array
"""
#cuda.jit(device=True)
def arr_div(arr, div):
thread_id = cuda.threadIdx.x
arr[thread_id] = arr[thread_id]/div
"""
SIR module (sample_draw) - module drawing sample for time t (rampling model)
"""
#cuda.jit(device=True)
def sirModule_sample_draw(rng_states, inp, beta, omega, out):
"""Find a value less than 1 from nomral distribution"""
thread_id = cuda.threadIdx.x
# draw candidate sample from normal distribution and store
# when less than 1
while True:
candidate = inp[thread_id] + beta + omega * xoroshiro128p_normal_float32(rng_states, thread_id)
if candidate < 1:
out[thread_id] = candidate
break
"""
SIR module (weight calculation) - weight calculation method
"""
#cuda.jit(device=True)
def sirModule_weight(current, previous, weight, out, y, beta, omega, gamma):
thread_id = cuda.threadIdx.x
PI = 3.14159265359
# calculate the pdf/pmf of given state
Z = ( current[thread_id] - ( previous[ thread_id ] + beta ) ) / omega
p1_div_p3 = 1.0 / 2.0 * ( 1.0 + math.erf( Z ) )
mu = math.log( 1 + math.exp( gamma * current[ thread_id ] ) )
p2 = math.exp( mu ) * mu**y / LOOKUP_TABLE[ y ]
out[thread_id] = weight[thread_id]*p2*p1_div_p3
"""
SIR module (phi distribution calculator)
"""
#cuda.jit(device=True)
def sirModule_tau(current, beta, omega, phi, phi_sub):
thread_id = cuda.threadIdx.x
# calculate phi distribution and subtract from 1
Z = ( 1 - ( current[ thread_id ] + beta ) ) / omega
phi[ thread_id ] = 1.0 / 2.0 * ( 1.0 + math.erf( Z ) )
phi_sub[ thread_id ] = 1 - phi[ thread_id ]
But these are the device functions. Should this be a source of problem?
And for the error, I get the following error message where line 207 in my code is where I call SIR module.
Traceback (most recent call last):
File "CUDA_MonteCarlo_Testesr.py", line 214, in <module>
main()
File "CUDA_MonteCarlo_Testesr.py", line 207, in main
omega, gamma, greater, equal, phi, phi_sub)
File "/home/ryan/anaconda3/envs/py53/lib/python3.5/site-packages/numba/cuda/compiler.py", line 703, in __call__
cfg(*args)
File "/home/ryan/anaconda3/envs/py53/lib/python3.5/site-packages/numba/cuda/compiler.py", line 483, in __call__
sharedmem=self.sharedmem)
File "/home/ryan/anaconda3/envs/py53/lib/python3.5/site-packages/numba/cuda/compiler.py", line 585, in _kernel_call
wb()
File "/home/ryan/anaconda3/envs/py53/lib/python3.5/site-packages/numba/cuda/compiler.py", line 600, in <lambda>
retr.append(lambda: devary.copy_to_host(val, stream=stream))
File "/home/ryan/anaconda3/envs/py53/lib/python3.5/site-packages/numba/cuda/cudadrv/devicearray.py", line 198, in copy_to_host
_driver.device_to_host(hostary, self, self.alloc_size, stream=stream)
File "/home/ryan/anaconda3/envs/py53/lib/python3.5/site-packages/numba/cuda/cudadrv/driver.py", line 1597, in device_to_host
fn(host_pointer(dst), device_pointer(src), size, *varargs)
File "/home/ryan/anaconda3/envs/py53/lib/python3.5/site-packages/numba/cuda/cudadrv/driver.py", line 288, in safe_cuda_api_call
self._check_error(fname, retcode)
File "/home/ryan/anaconda3/envs/py53/lib/python3.5/site-packages/numba/cuda/cudadrv/driver.py", line 323, in _check_error
raise CudaAPIError(retcode, msg)
numba.cuda.cudadrv.driver.CudaAPIError: [715] Call to cuMemcpyDtoH results in UNKNOWN_CUDA_ERROR
Traceback (most recent call last):
File "/home/ryan/anaconda3/envs/py53/lib/python3.5/site-packages/numba/utils.py", line 647, in _exitfunc
f()
File "/home/ryan/anaconda3/envs/py53/lib/python3.5/site-packages/numba/utils.py", line 571, in __call__
return info.func(*info.args, **(info.kwargs or {}))
File "/home/ryan/anaconda3/envs/py53/lib/python3.5/site-packages/numba/cuda/cudadrv/driver.py", line 1099, in deref
mem.free()
File "/home/ryan/anaconda3/envs/py53/lib/python3.5/site-packages/numba/cuda/cudadrv/driver.py", line 1013, in free
self._finalizer()
File "/home/ryan/anaconda3/envs/py53/lib/python3.5/site-packages/numba/utils.py", line 571, in __call__
return info.func(*info.args, **(info.kwargs or {}))
File "/home/ryan/anaconda3/envs/py53/lib/python3.5/site-packages/numba/cuda/cudadrv/driver.py", line 863, in core
deallocations.add_item(dtor, handle, size=bytesize)
File "/home/ryan/anaconda3/envs/py53/lib/python3.5/site-packages/numba/cuda/cudadrv/driver.py", line 519, in add_item
self.clear()
File "/home/ryan/anaconda3/envs/py53/lib/python3.5/site-packages/numba/cuda/cudadrv/driver.py", line 530, in clear
dtor(handle)
File "/home/ryan/anaconda3/envs/py53/lib/python3.5/site-packages/numba/cuda/cudadrv/driver.py", line 288, in safe_cuda_api_call
self._check_error(fname, retcode)
File "/home/ryan/anaconda3/envs/py53/lib/python3.5/site-packages/numba/cuda/cudadrv/driver.py", line 323, in _check_error
raise CudaAPIError(retcode, msg)
numba.cuda.cudadrv.driver.CudaAPIError: [715] Call to cuMemFree results in UNKNOWN_CUDA_ERROR
I think there may be 2 problems.
I'm not sure your use of LOOKUP_TABLE = cuda.to_device( outside of main is valid. I guess you are trying to create a device array, but I think you should be using numba.cuda.device_array() for that.
You don't seem to be transferring the array y to the device properly for use.
When I make those two changes, the code seems to run without CUDA runtime error for me:
# cat t1.py
import numpy as np
import numba as nb
from timeit import default_timer as timer
# from matplotlib import pyplot as pt
import math
from numba import cuda
from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_normal_float32
"""
Look up table for factorial
"""
"""
arr_sum - sum element in array
"""
#cuda.jit(device=True)
def arr_sum(arr):
result = 0
for i in range(arr.size):
result = result + arr[i]
return result
"""
dot - dot product of arr1 and arr2
"""
#cuda.jit(device=True)
def dot(arr1, arr2):
result = 0
for i in range(arr1.size):
result = arr1[i]*arr2[i] + result
return result
"""
arr_div - divide element in array
"""
#cuda.jit(device=True)
def arr_div(arr, div):
thread_id = cuda.threadIdx.x
arr[thread_id] = arr[thread_id]/div
"""
SIR module (sample_draw) - module drawing sample for time t (rampling model)
"""
#cuda.jit(device=True)
def sirModule_sample_draw(rng_states, inp, beta, omega, out):
"""Find a value less than 1 from nomral distribution"""
thread_id = cuda.threadIdx.x
# draw candidate sample from normal distribution and store
# when less than 1
while True:
candidate = inp[thread_id] + beta + omega * xoroshiro128p_normal_float32(rng_states, thread_id)
if candidate < 1:
out[thread_id] = candidate
break
"""
SIR module (weight calculation) - weight calculation method
"""
#cuda.jit(device=True)
def sirModule_weight(current, previous, weight, out, y, beta, omega, gamma, lt):
thread_id = cuda.threadIdx.x
PI = 3.14159265359
# calculate the pdf/pmf of given state
Z = ( current[thread_id] - ( previous[ thread_id ] + beta ) ) / omega
p1_div_p3 = 1.0 / 2.0 * ( 1.0 + math.erf( Z ) )
mu = math.log( 1 + math.exp( gamma * current[ thread_id ] ) )
p2 = math.exp( mu ) * mu**y / lt[ y ]
out[thread_id] = weight[thread_id]*p2*p1_div_p3
"""
SIR module (phi distribution calculator)
"""
#cuda.jit(device=True)
def sirModule_tau(current, beta, omega, phi, phi_sub):
thread_id = cuda.threadIdx.x
# calculate phi distribution and subtract from 1
Z = ( 1 - ( current[ thread_id ] + beta ) ) / omega
phi[ thread_id ] = 1.0 / 2.0 * ( 1.0 + math.erf( Z ) )
phi_sub[ thread_id ] = 1 - phi[ thread_id ]
#cuda.jit
def SIR(rng_states, y, particles, weight, beta, omega, gamma,
greater, equal, phi, phi_sub, lt):
# thread/block index for accessing data
tx = cuda.threadIdx.x # Thread id in a 1D block = particle index
ty = cuda.blockIdx.x # Block id in a 1D grid = event index
bw = cuda.blockDim.x # Block width, i.e. number of threads per block = particle number
pos = tx + ty * bw # computed flattened index inside the array
# get current event y_t
y_current = y[ ty ]
# get number of time steps
tn = y_current.size
# iterator over timestep
for i in range(1, tn):
# draw samples
sirModule_sample_draw(rng_states, particles[ty][i-1], beta,
omega, particles[ty][i])
# get weight
sirModule_weight(particles[ty][i], particles[ty][i-1], weight[ty][i-1], weight[ty][i], y_current[i], beta, omega, gamma, lt)
# normalize weight
weight_sum = arr_sum(weight[ty][i])
arr_div(weight[ty][i], weight_sum)
# calculate tau
sirModule_tau(particles[ty][i], beta, omega, phi, phi_sub)
# update greater and equal
greater[ty][i] = greater[ty][i-1]*dot(weight[ty][i-1], phi)
equal[ty][i] = greater[ty][i-1]*dot(weight[ty][i-1], phi_sub)
def main():
beta = 1
omega = 1
gamma = 2
pn = 100
event_number = 50
timestep = 100
LOOKUP_TABLE = cuda.to_device(np.array([
1, 1, 2, 6, 24, 120, 720, 5040, 40320,
362880, 3628800, 39916800, 479001600,
6227020800, 87178291200, 1307674368000,
20922789888000, 355687428096000, 6402373705728000,
121645100408832000, 2432902008176640000], dtype='int64'))
hy = np.ones((event_number, timestep), dtype = np.uint32)
print(hy.size)
print(hy)
y = cuda.to_device(hy)
particles = cuda.to_device(np.zeros((event_number, timestep, pn), dtype = np.float32))
weight = cuda.to_device(np.ones((event_number, timestep, pn), dtype = np.float32))
greater = cuda.to_device(np.ones((event_number, timestep), dtype = np.float32))
equal = cuda.to_device(np.ones((event_number, timestep), dtype = np.float32))
phi = cuda.to_device(np.zeros(particles[0][0].size, dtype = np.float32))
phi_sub = cuda.to_device(np.zeros(particles[0][0].size, dtype = np.float32))
rng_states = create_xoroshiro128p_states(pn, seed=1)
start = timer()
SIR[event_number, pn](rng_states, y, particles, weight, beta, omega, gamma, greater, equal, phi, phi_sub, LOOKUP_TABLE)
vectoradd_time = timer() - start
print("sirModule1 took %f seconds" % vectoradd_time)
cuda.synchronize()
if __name__ == '__main__':
main()
# cuda-memcheck python t1.py
========= CUDA-MEMCHECK
5000
[[1 1 1 ..., 1 1 1]
[1 1 1 ..., 1 1 1]
[1 1 1 ..., 1 1 1]
...,
[1 1 1 ..., 1 1 1]
[1 1 1 ..., 1 1 1]
[1 1 1 ..., 1 1 1]]
sirModule1 took 0.840958 seconds
========= ERROR SUMMARY: 0 errors
#
Solved! I am working on Ubuntu 16.04. When I installed Numba for the first time, numba.cuda functions worked fine. However later I encountered these kind of errors
raise CudaAPIError(retcode, msg)
CudaAPIError: Call to cuMemcpyHtoD results in CUDA_ERROR_LAUNCH_FAILED
These errors are encountered when you put your system on 'suspend'. In order to avoid such errors, restart your system or don't suspend.

IndexError returned on curve_fit: error on function call?

I am trying to use curve_fit given this function
def F(xy,*p):
x,y = xy
c = np.array(p).ravel()
n = (len(c)-1)/4
omega = pi/180.0
z = c[0]
for t in range(n):
z += c[4*t+1] * (cos((t+1)*omega*x))
z += c[4*t+2] * (cos((t+1)*omega*y))
z += c[4*t+3] * (sin((t+1)*omega*x))
z += c[4*t+4] * (sin((t+1)*omega*y))
return z
def G(xy,*p):
x,y = xy
c = np.array(p).ravel()
ngm = (len(c))/7
z = 0
for t in range(ngm):
a = c[7*t]
cx = c[7*t+1]
mx = c[7*t+2]
sx = c[7*t+3]
cy = c[7*t+4]
my = c[7*t+5]
sy = c[7*t+6]
z += a * np.exp(-((cx*(x-mx)**2)/(2*(sx**2)))-((cy*(y-my)**2)/(2*(sy**2))))
return z
def FG(xy,*p):
x,y = xy
c = np.array(p).ravel()
nf = int(c[0])
ng = int(c[1])
print nf,ng
pf = [c[i] for i in range(2,4*nf+3)]
pg = [c[i] for i in range(4*nf+3,4*nf+7*ng+3)]
z1 = F(xy,pf)
z2 = G(xy,pg)
return z1+z2
pfit,cov = opt.curve_fit(FG,xy,z,p,bounds=bounds)
I am sure that the shape of both p and bounds are appropriate. I tried printing nf and ng, and they are properly printed until after some number of iterations (around after 20th function call, not the same in every run), where the values changed significantly.
After the 20th (or more) run, it returns the following error:
File "/Users/pensieve/calcs/3D_AA/0_codes/fitpkgs.py", line 144, in FGfit
pfit,cov = opt.curve_fit(FG,xy,z,p,bounds=bounds)
File "/Library/Python/2.7/site-packages/scipy-0.18.1-py2.7-macosx-10.10-intel.egg/scipy/optimize/minpack.py", line 683, in curve_fit
**kwargs)
File "/Library/Python/2.7/site-packages/scipy-0.18.1-py2.7-macosx-10.10-intel.egg/scipy/optimize/_lsq/least_squares.py", line 878, in least_squares
tr_options.copy(), verbose)
File "/Library/Python/2.7/site-packages/scipy-0.18.1-py2.7-macosx-10.10-intel.egg/scipy/optimize/_lsq/trf.py", line 128, in trf
loss_function, tr_solver, tr_options, verbose)
File "/Library/Python/2.7/site-packages/scipy-0.18.1-py2.7-macosx-10.10-intel.egg/scipy/optimize/_lsq/trf.py", line 341, in trf_bounds
f_new = fun(x_new)
File "/Library/Python/2.7/site-packages/scipy-0.18.1-py2.7-macosx-10.10-intel.egg/scipy/optimize/_lsq/least_squares.py", line 764, in fun_wrapped
return np.atleast_1d(fun(x, *args, **kwargs))
File "/Library/Python/2.7/site-packages/scipy-0.18.1-py2.7-macosx-10.10-intel.egg/scipy/optimize/minpack.py", line 455, in func_wrapped
return func(xdata, *params) - ydata
File "/Users/pensieve/calcs/3D_AA/0_codes/fitfunctions.py", line 65, in FG
pgm = [c[i] for i in range(4*nf+3,4*nf+7*ng+3)]
IndexError: index out of bounds
For reference, I use scipy 0.18.1.

Categories

Resources