How to use cross_validation with curve_fit? - python

I use scipy.optimize.curve_fit to check models on my data.
Normally, I would have a function of several variables, here y_data is a function of 2 variables in x_data:
import numpy as np
from scipy.optimize import curve_fit
x_data = np.array( [np.arange(10), np.arange(10)*-1+5] )
print( x_data )
[[ 0 1 2 3 4 5 6 7 8 9]
[ 5 4 3 2 1 0 -1 -2 -3 -4]]
y_data = np.arange(10) - 5
print( y_data )
[-5 -4 -3 -2 -1 0 1 2 3 4]
To fit the function, I just define it, and use curve_fit:
def lincomb( X, a, b ):
x1 = X[0]
x2 = X[1]
return a*x1*x2 + b
popt, pcov = curve_fit( lincomb, x_data, y_data )
print( popt )
[-0.17857143 -1.57142857]
The latter are the optimized values of the coefficients a and b in the function.
Now, I would like to use cross validation from sklearn to do the same fit. For this, I packed my function into a class to be used as an estimator, like this:
from sklearn.model_selection import cross_validate
class LinComb:
def __init__( self, a=None, b=None ):
self.a = a
self.b = b
def _lincomb_background(self, X, a, b):
x1 = X[0]
x2 = X[1]
return a*x1*x2 + b
def predict( self, X ):
return self._lincomb_background( X, self.a, self.b )
def fit( self, X, y ):
from scipy.optimize import curve_fit
popt, pcov = curve_fit( self._lincomb_background, X, y )
self.a = popt[0]
self.b = popt[1]
return self
def get_params( self, deep=False ):
return { 'a':self.a, 'b':self.b }
def set_params( self, **parameters ):
for parameter, value in parameters.intems():
setattr( self, parameter, value )
return self
When I then call the cross validation, I get an error in dimensions:
cross_validate( LinComb(), x_data, y_data, cv=5, scoring='neg_mean_squared_error' )
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-60-e0ff8bb83213> in <module>
----> 1 cross_validate( LinComb(), x_data, y_data, cv=5, scoring='neg_mean_squared_error' )
/usr/local/lib/python3.7/dist-packages/sklearn/model_selection/_validation.py in cross_validate(estimator, X, y, groups, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch, return_train_score, return_estimator, error_score)
215
216 """
--> 217 X, y, groups = indexable(X, y, groups)
218
219 cv = check_cv(cv, y, classifier=is_classifier(estimator))
/usr/local/lib/python3.7/dist-packages/sklearn/utils/validation.py in indexable(*iterables)
228 else:
229 result.append(np.array(X))
--> 230 check_consistent_length(*result)
231 return result
232
/usr/local/lib/python3.7/dist-packages/sklearn/utils/validation.py in check_consistent_length(*arrays)
203 if len(uniques) > 1:
204 raise ValueError("Found input variables with inconsistent numbers of"
--> 205 " samples: %r" % [int(l) for l in lengths])
206
207
ValueError: Found input variables with inconsistent numbers of samples: [2, 10]
Indeed, the dimensions of x_data and y_data are:
print( x_data.shape, y_data.shape )
(2, 10) (10,)
Still I do not understand why the dimensions work in the first, simple case, and how to achieve this cleanly.
Am I missing something?

I found out that I had 2 errors in the dimensions. Since they were simultaneous, I wasn't able to trace them back easily. I will post the answer here, maybe it will be useful sometime.
1. From the documentation
Modifying the example in the documentation helped to trace back the dimension errors.
from sklearn import datasets, linear_model
from sklearn.model_selection import cross_validate, cross_val_score
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
cv_results = cross_validate(lasso, X, y, cv=3)
sorted(cv_results.keys())
print( cv_results['test_score'] )
[0.33150734 0.08022311 0.03531764]
Note that cross_validation needs the first dimensions to be the same:
print( X.shape, y.shape )
(150, 10) (150,)
Note that with these new dimensions, the simple way to call curve_fit throws an error:
def lincomb( X, a, b ):
x1 = X[0]
x2 = X[1]
return a*x1*x2 + b
popt, pcov = curve_fit( lincomb, x_data, y_data )
print( popt )
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-6-dedaa241e377> in <module>
4 return a*x1*x2 + b
5
----> 6 popt, pcov = curve_fit( lincomb, x_data, y_data )
7 print( popt )
/usr/local/lib/python3.6/dist-packages/scipy/optimize/minpack.py in curve_fit(f, xdata, ydata, p0, sigma, absolute_sigma, check_finite, bounds, method, jac, **kwargs)
754 # Remove full_output from kwargs, otherwise we're passing it in twice.
755 return_full = kwargs.pop('full_output', False)
--> 756 res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
757 popt, pcov, infodict, errmsg, ier = res
758 cost = np.sum(infodict['fvec'] ** 2)
/usr/local/lib/python3.6/dist-packages/scipy/optimize/minpack.py in leastsq(func, x0, args, Dfun, full_output, col_deriv, ftol, xtol, gtol, maxfev, epsfcn, factor, diag)
381 if not isinstance(args, tuple):
382 args = (args,)
--> 383 shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
384 m = shape[0]
385
/usr/local/lib/python3.6/dist-packages/scipy/optimize/minpack.py in _check_func(checker, argname, thefunc, x0, args, numinputs, output_shape)
24 def _check_func(checker, argname, thefunc, x0, args, numinputs,
25 output_shape=None):
---> 26 res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
27 if (output_shape is not None) and (shape(res) != output_shape):
28 if (output_shape[0] != 1):
/usr/local/lib/python3.6/dist-packages/scipy/optimize/minpack.py in func_wrapped(params)
456 if transform is None:
457 def func_wrapped(params):
--> 458 return func(xdata, *params) - ydata
459 elif transform.ndim == 1:
460 def func_wrapped(params):
ValueError: operands could not be broadcast together with shapes (2,) (10,)
This can be solved by transposing again inside the call to curve_fit:
popt, pcov = curve_fit( lincomb, x_data.T, y_data )
print( popt )
[-0.17857143 -1.57142857]
2. Class
Using the new dimensions in x_data for cross_validation (using the class defined in the question) throws a different error:
from sklearn.model_selection import cross_validate
class LinComb:
def __init__( self, a=None, b=None ):
self.a = a
self.b = b
def _lincomb_background(self, X, a, b):
x1 = X[0]
x2 = X[1]
return a*x1*x2 + b
def predict( self, X ):
return self._lincomb_background( X, self.a, self.b )
def fit( self, X, y ):
from scipy.optimize import curve_fit
popt, pcov = curve_fit( self._lincomb_background, X, y )
self.a = popt[0]
self.b = popt[1]
return self
def get_params( self, deep=False ):
return { 'a':self.a, 'b':self.b }
def set_params( self, **parameters ):
for parameter, value in parameters.intems():
setattr( self, parameter, value )
return self
cross_validate( LinComb(), x_data, y_data, cv=5, scoring='neg_mean_squared_error' )
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-10-e0ff8bb83213> in <module>
----> 1 cross_validate( LinComb(), x_data, y_data, cv=5, scoring='neg_mean_squared_error' )
/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py in cross_validate(estimator, X, y, groups, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch, return_train_score, return_estimator, error_score)
229 return_times=True, return_estimator=return_estimator,
230 error_score=error_score)
--> 231 for train, test in cv.split(X, y, groups))
232
233 zipped_scores = list(zip(*scores))
/usr/local/lib/python3.6/dist-packages/joblib/parallel.py in __call__(self, iterable)
919 # remaining jobs.
920 self._iterating = False
--> 921 if self.dispatch_one_batch(iterator):
922 self._iterating = self._original_iterator is not None
923
/usr/local/lib/python3.6/dist-packages/joblib/parallel.py in dispatch_one_batch(self, iterator)
757 return False
758 else:
--> 759 self._dispatch(tasks)
760 return True
761
/usr/local/lib/python3.6/dist-packages/joblib/parallel.py in _dispatch(self, batch)
714 with self._lock:
715 job_idx = len(self._jobs)
--> 716 job = self._backend.apply_async(batch, callback=cb)
717 # A job can complete so quickly than its callback is
718 # called before we get here, causing self._jobs to
/usr/local/lib/python3.6/dist-packages/joblib/_parallel_backends.py in apply_async(self, func, callback)
180 def apply_async(self, func, callback=None):
181 """Schedule a func to be run"""
--> 182 result = ImmediateResult(func)
183 if callback:
184 callback(result)
/usr/local/lib/python3.6/dist-packages/joblib/_parallel_backends.py in __init__(self, batch)
547 # Don't delay the application, to avoid keeping the input
548 # arguments in memory
--> 549 self.results = batch()
550
551 def get(self):
/usr/local/lib/python3.6/dist-packages/joblib/parallel.py in __call__(self)
223 with parallel_backend(self._backend, n_jobs=self._n_jobs):
224 return [func(*args, **kwargs)
--> 225 for func, args, kwargs in self.items]
226
227 def __len__(self):
/usr/local/lib/python3.6/dist-packages/joblib/parallel.py in <listcomp>(.0)
223 with parallel_backend(self._backend, n_jobs=self._n_jobs):
224 return [func(*args, **kwargs)
--> 225 for func, args, kwargs in self.items]
226
227 def __len__(self):
/usr/local/lib/python3.6/dist-packages/sklearn/model_selection/_validation.py in _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score, return_parameters, return_n_test_samples, return_times, return_estimator, error_score)
512 estimator.fit(X_train, **fit_params)
513 else:
--> 514 estimator.fit(X_train, y_train, **fit_params)
515
516 except Exception as e:
<ipython-input-9-ff88060f1729> in fit(self, X, y)
15 def fit( self, X, y ):
16 from scipy.optimize import curve_fit
---> 17 popt, pcov = curve_fit( self._lincomb_background, X, y )
18 self.a = popt[0]
19 self.b = popt[1]
/usr/local/lib/python3.6/dist-packages/scipy/optimize/minpack.py in curve_fit(f, xdata, ydata, p0, sigma, absolute_sigma, check_finite, bounds, method, jac, **kwargs)
754 # Remove full_output from kwargs, otherwise we're passing it in twice.
755 return_full = kwargs.pop('full_output', False)
--> 756 res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
757 popt, pcov, infodict, errmsg, ier = res
758 cost = np.sum(infodict['fvec'] ** 2)
/usr/local/lib/python3.6/dist-packages/scipy/optimize/minpack.py in leastsq(func, x0, args, Dfun, full_output, col_deriv, ftol, xtol, gtol, maxfev, epsfcn, factor, diag)
381 if not isinstance(args, tuple):
382 args = (args,)
--> 383 shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
384 m = shape[0]
385
/usr/local/lib/python3.6/dist-packages/scipy/optimize/minpack.py in _check_func(checker, argname, thefunc, x0, args, numinputs, output_shape)
24 def _check_func(checker, argname, thefunc, x0, args, numinputs,
25 output_shape=None):
---> 26 res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
27 if (output_shape is not None) and (shape(res) != output_shape):
28 if (output_shape[0] != 1):
/usr/local/lib/python3.6/dist-packages/scipy/optimize/minpack.py in func_wrapped(params)
456 if transform is None:
457 def func_wrapped(params):
--> 458 return func(xdata, *params) - ydata
459 elif transform.ndim == 1:
460 def func_wrapped(params):
ValueError: operands could not be broadcast together with shapes (2,) (8,)
3. Dimension error inside the class
This error is coming from curve_fit, not cross_validation, and must be corrected inside the class, in both functions calling the model _lincomb_background(), that is, both in fit() and predict(). The modified class is:
class LinComb:
def __init__( self, a=None, b=None ):
self.a = a
self.b = b
def _lincomb_background(self, X, a, b):
x1 = X[0]
x2 = X[1]
return a*x1*x2 + b
def predict( self, X ):
return self._lincomb_background( X.T, self.a, self.b ) # Call with transposed X!
def fit( self, X, y ):
from scipy.optimize import curve_fit
popt, pcov = curve_fit( self._lincomb_background, X.T, y ) # Call with transposed X!
self.a = popt[0]
self.b = popt[1]
return self
def get_params( self, deep=False ):
return { 'a':self.a, 'b':self.b }
def set_params( self, **parameters ):
for parameter, value in parameters.intems():
setattr( self, parameter, value )
return self
With these two modified calls, cross_validation works as expected:
cross_validate( LinComb(), x_data, y_data, cv=5, scoring='neg_mean_squared_error' )
{'fit_time': array([0.00105524, 0.00051618, 0.0004158 , 0.00040078, 0.00039887]),
'score_time': array([0.00158715, 0.0001812 , 0.00017715, 0.00017595, 0.00017548]),
'test_score': array([-12.89 , -0.29918379, -3.82378685, -2.72051908,
-7.25 ])}
4. Summary
a) First check that the dimensions for cross_validation() are correct
b) Then adjust dimensions inside the class, in the call to curve_fit()
c) Lastly adjust dimensions inside the class, in predict()

Related

ValueError: Buffer dtype mismatch, expected 'double' but got Python object

I am testing an open-source code but I keep running into this error. I am passing a crf model into cross_val_predict and that's where the error starts. Pystruct is being used for the crf model. The crf model is built from sparse matrices which I think is causing the problem but I am not entirely sure.
This is the code:
def evaluate_dataset(config):
# Parameters
folds = config['folds']
results = {}
crf = config['clf']
sample_size = config['sample_size'] if 'sample_size' in config else None
paths = list(Path(config['dir']).iterdir())
preprocessor = SectionPreprocess(ground_truth=True)
paths, X, y = preprocessor.preprocess(paths, sample_size=sample_size)
# Dataset information
results['num_binaries'] = len(y)
results['avg_code_fraction'] = np.mean([np.mean(yy) for yy in y])
results['params'] = crf.get_params()
# Cross-validation
# logreg = LogisticRegression()
results['cv_folds'] = folds
start = time.time()
y_pred = cross_val_predict(crf, X, y, cv=folds, n_jobs=1)
end = time.time()
results['cv_time'] = (end - start) / folds
values = [
(metrics.accuracy_score(true, pred),) + metrics.precision_recall_fscore_support(true, pred, average='binary')
for (true, pred) in zip(y, y_pred)
]
accuracy, precision, recall, f1, _ = zip(*values)
results['cv_metrics'] = {
'accuracy': (np.mean(accuracy), np.std(accuracy)),
'precision': (np.mean(precision), np.std(precision)),
'recall': (np.mean(recall), np.std(recall)),
'f1': (np.mean(f1), np.std(f1)),
}
return results
evaluation_folder = Path('evaluation')
evaluation_folder.mkdir(exist_ok=True)
for (name, config) in configurations.items():
print(config)
%time results = evaluate_dataset(config)
print("## " + name)
print(results)
print('')
file_path = evaluation_folder / (name + '.json')
with file_path.open('w') as f:
json.dump(results, f)
And this is the error:
ValueError Traceback (most recent call last)
in
in evaluate_dataset(config)
24
25 start = time.time()
---> 26 y_pred = cross_val_predict(crf, X, y, cv=folds, n_jobs=1)
27 end = time.time()
28 results['cv_time'] = (end - start) / folds
~/.pyenv/versions/pythonvenv3.6.1/lib/python3.6/site-packages/sklearn/model_selection/_validation.py in cross_val_predict(estimator, X, y, groups, cv, n_jobs, verbose, fit_params, pre_dispatch, method)
399 prediction_blocks = parallel(delayed(_fit_and_predict)(
400 clone(estimator), X, y, train, test, verbose, fit_params, method)
--> 401 for train, test in cv_iter)
402
403 # Concatenate the predictions
~/.pyenv/versions/pythonvenv3.6.1/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py in __call__(self, iterable)
756 # was dispatched. In particular this covers the edge
757 # case of Parallel used with an exhausted iterator.
--> 758 while self.dispatch_one_batch(iterator):
759 self._iterating = True
760 else:
~/.pyenv/versions/pythonvenv3.6.1/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py in dispatch_one_batch(self, iterator)
606 return False
607 else:
--> 608 self._dispatch(tasks)
609 return True
610
~/.pyenv/versions/pythonvenv3.6.1/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py in _dispatch(self, batch)
569 dispatch_timestamp = time.time()
570 cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
--> 571 job = self._backend.apply_async(batch, callback=cb)
572 self._jobs.append(job)
573
~/.pyenv/versions/pythonvenv3.6.1/lib/python3.6/site-packages/sklearn/externals/joblib/_parallel_backends.py in apply_async(self, func, callback)
107 def apply_async(self, func, callback=None):
108 """Schedule a func to be run"""
--> 109 result = ImmediateResult(func)
110 if callback:
111 callback(result)
~/.pyenv/versions/pythonvenv3.6.1/lib/python3.6/site-packages/sklearn/externals/joblib/_parallel_backends.py in __init__(self, batch)
324 # Don't delay the application, to avoid keeping the input
325 # arguments in memory
--> 326 self.results = batch()
327
328 def get(self):
~/.pyenv/versions/pythonvenv3.6.1/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py in __call__(self)
129
130 def __call__(self):
--> 131 return [func(*args, **kwargs) for func, args, kwargs in self.items]
132
133 def __len__(self):
~/.pyenv/versions/pythonvenv3.6.1/lib/python3.6/site-packages/sklearn/externals/joblib/parallel.py in (.0)
129
130 def __call__(self):
--> 131 return [func(*args, **kwargs) for func, args, kwargs in self.items]
132
133 def __len__(self):
~/.pyenv/versions/pythonvenv3.6.1/lib/python3.6/site-packages/sklearn/model_selection/_validation.py in _fit_and_predict(estimator, X, y, train, test, verbose, fit_params, method)
472 estimator.fit(X_train, **fit_params)
473 else:
--> 474 estimator.fit(X_train, y_train, **fit_params)
475 func = getattr(estimator, method)
476 predictions = func(X_test)
~/Google Drive (gdk244#nyu.edu)/MoMa Lab /CRF/code_section_identification/crf_models/CRFModel.py in fit(self, X, y, **fit_params)
74 verbose=self.verbose
75 )
---> 76 self._ssvm.fit(X, y)
77
78 return self
~/.pyenv/versions/pythonvenv3.6.1/lib/python3.6/site-packages/pystruct/learners/frankwolfe_ssvm.py in fit(self, X, Y, constraints, initialize)
295 self._frank_wolfe_batch(X, Y)
296 else:
--> 297 self._frank_wolfe_bc(X, Y)
298 except KeyboardInterrupt:
299 pass
~/.pyenv/versions/pythonvenv3.6.1/lib/python3.6/site-packages/pystruct/learners/frankwolfe_ssvm.py in _frank_wolfe_bc(self, X, Y)
220 i = perm[j]
221 x, y = X[i], Y[i]
--> 222 y_hat, delta_joint_feature, slack, loss = find_constraint(self.model, x, y, w)
223 # ws and ls
224 ws = delta_joint_feature * self.C
~/.pyenv/versions/pythonvenv3.6.1/lib/python3.6/site-packages/pystruct/utils/inference.py in find_constraint(model, x, y, w, y_hat, relaxed, compute_difference)
63
64 if y_hat is None:
---> 65 y_hat = model.loss_augmented_inference(x, y, w, relaxed=relaxed)
66 joint_feature = model.joint_feature
67 if getattr(model, 'rescale_C', False):
~/.pyenv/versions/pythonvenv3.6.1/lib/python3.6/site-packages/pystruct/models/crf.py in loss_augmented_inference(self, x, y, w, relaxed, return_energy)
104 pairwise_potentials = self._get_pairwise_potentials(x, w)
105 edges = self._get_edges(x)
--> 106 loss_augment_unaries(unary_potentials, np.asarray(y), self.class_weight)
107
108 return inference_dispatch(unary_potentials, pairwise_potentials, edges,
utils.pyx in utils.loss_augment_unaries (src/utils.c:5132)()
ValueError: Buffer dtype mismatch, expected 'double' but got Python object

curve_fit impossible if the function calls minimize

Say I have a function f1, of an independent variable x and a parameter a.
The function f1, in turn, needs a parameter m that can be obtained minimizing a second function f2, and this function needs the variable x as a parameter.
I have no problem calling f1(x,a) with any value, but if I try to make a fit with curve_fit of it, it throws a ValueError: setting an array element with a sequence. I guess it does it since it assigns not a single x value, but the whole array.
There is no way to make curve_fit work ?
Toy code, to check the problem:
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from scipy.optimize import curve_fit
from random import uniform
def f1(x,a):
m = minimize(f2,0.,args=(x)).x[0]
return (x + a + m)**2
def f2(theta,a):
return (theta-a)**2
xs = np.linspace(-3.,3.,10)
ys = [round(f1(x,0.5)-uniform(-1,1),2) for x in xs]
plt.scatter(xs,ys)
popt,pcov = curve_fit(f1, xs, ys, [0.3])
plt.plot(xs,[f1(x,popt[0]) for x in xs])
Full traceback:
ValueErrorTraceback (most recent call last)
<ipython-input-87-5db669cee585> in <module>()
10 plt.scatter(xs,ys)
11 plt.plot(xs,[f1(x,0.5) for x in xs])
---> 12 popt,pcov = curve_fit(f1, xs, ys, [0.3])
13 plt.plot(xs,[f1(x,popt[0]) for x in xs])
/usr/lib64/python2.7/site-packages/scipy/optimize/minpack.pyc in curve_fit(f, xdata, ydata, p0, sigma, absolute_sigma, check_finite, bounds, method, jac, **kwargs)
749 # Remove full_output from kwargs, otherwise we're passing it in twice.
750 return_full = kwargs.pop('full_output', False)
--> 751 res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
752 popt, pcov, infodict, errmsg, ier = res
753 cost = np.sum(infodict['fvec'] ** 2)
/usr/lib64/python2.7/site-packages/scipy/optimize/minpack.pyc in leastsq(func, x0, args, Dfun, full_output, col_deriv, ftol, xtol, gtol, maxfev, epsfcn, factor, diag)
381 if not isinstance(args, tuple):
382 args = (args,)
--> 383 shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
384 m = shape[0]
385 if n > m:
/usr/lib64/python2.7/site-packages/scipy/optimize/minpack.pyc in _check_func(checker, argname, thefunc, x0, args, numinputs, output_shape)
25 def _check_func(checker, argname, thefunc, x0, args, numinputs,
26 output_shape=None):
---> 27 res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
28 if (output_shape is not None) and (shape(res) != output_shape):
29 if (output_shape[0] != 1):
/usr/lib64/python2.7/site-packages/scipy/optimize/minpack.pyc in func_wrapped(params)
461 if transform is None:
462 def func_wrapped(params):
--> 463 return func(xdata, *params) - ydata
464 elif transform.ndim == 1:
465 def func_wrapped(params):
<ipython-input-87-5db669cee585> in f1(x, a)
1 from scipy.optimize import minimize
2 def f1(x,a):
----> 3 m = minimize(f2,0.,args=(x)).x[0]
4 return (x + a + m)**2
5 def f2(theta,a):
/usr/lib64/python2.7/site-packages/scipy/optimize/_minimize.pyc in minimize(fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol, callback, options)
595 return _minimize_cg(fun, x0, args, jac, callback, **options)
596 elif meth == 'bfgs':
--> 597 return _minimize_bfgs(fun, x0, args, jac, callback, **options)
598 elif meth == 'newton-cg':
599 return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,
/usr/lib64/python2.7/site-packages/scipy/optimize/optimize.pyc in _minimize_bfgs(fun, x0, args, jac, callback, gtol, norm, eps, maxiter, disp, return_all, **unknown_options)
961 else:
962 grad_calls, myfprime = wrap_function(fprime, args)
--> 963 gfk = myfprime(x0)
964 k = 0
965 N = len(x0)
/usr/lib64/python2.7/site-packages/scipy/optimize/optimize.pyc in function_wrapper(*wrapper_args)
291 def function_wrapper(*wrapper_args):
292 ncalls[0] += 1
--> 293 return function(*(wrapper_args + args))
294
295 return ncalls, function_wrapper
/usr/lib64/python2.7/site-packages/scipy/optimize/optimize.pyc in approx_fprime(xk, f, epsilon, *args)
721
722 """
--> 723 return _approx_fprime_helper(xk, f, epsilon, args=args)
724
725
/usr/lib64/python2.7/site-packages/scipy/optimize/optimize.pyc in _approx_fprime_helper(xk, f, epsilon, args, f0)
661 ei[k] = 1.0
662 d = epsilon * ei
--> 663 grad[k] = (f(*((xk + d,) + args)) - f0) / d[k]
664 ei[k] = 0.0
665 return grad
ValueError: setting an array element with a sequence.

Fitting data to Faddeeva function using python's optimize.leastsq() and optimize.curve_fit

Hello Stackoverflow community,
I am trying to fit data to a Faddeeva function (optimize.special.wofz) using pyhton's optimize.leastsq() or optimize.curve_fit(). The fit parameters are the following two: z1 and z2. They are complex, whereas the independent variable (time) and the output of the function (meas_data) are purely real numbers. This is my first attempt to fit the data:
import numpy as np
from scipy import optimize
from scipy import special
meas_data = np.loadtxt('directory')
time = np.loadtxt('directory')
def test(params, time):
z1 = params[0]
z2 = params[1]
a = z1*np.sqrt(time)
b = z2*np.sqrt(time)
a = np.complex(0, a)
b = np.complex(0, b)
c = special.wofz(a)
d = special.wofz(b)
return np.real(c*d)
def test_error(params, time, t_error):
return test(params, time) - t_error
initial_guess = (300+200j, 300-200j)
params_fit, cov_x, infodict, mesg, ier = optimize.leastsq(test_error, initial_guess, args = (time, meas_data), full_output = True)
My second attempt looks like :
import numpy as np
from scipy import optimize
from scipy import special
meas_data = np.loadtxt('directory')
time = np.loadtxt('directory')
def test(time, z1, z2):
a = z1*np.sqrt(time)
b = z2*np.sqrt(time)
a = np.complex(0, a)
b = np.complex(0, b)
c = special.wofz(a)
d = special.wofz(b)
return np.real(c*d)
popt, pcov = optimize.curve_fit(test, time, meas_data)
For both cases, I get a similar error message:
for the first attempt:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-9286b2981692> in <module>()
22
23 initial_guess = (300+200j, 300-200j)
---> 24 params_fit, cov_x, infodict, mesg, ier = optimize.leastsq(test_error, initial_guess, args = (time, msd), full_output = True)
/Users/tthalheim/anaconda/lib/python3.5/site-packages/scipy/optimize/minpack.py in leastsq(func, x0, args, Dfun, full_output, col_deriv, ftol, xtol, gtol, maxfev, epsfcn, factor, diag)
375 if not isinstance(args, tuple):
376 args = (args,)
--> 377 shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
378 m = shape[0]
379 if n > m:
/Users/tthalheim/anaconda/lib/python3.5/site-packages/scipy/optimize/minpack.py in _check_func(checker, argname, thefunc, x0, args, numinputs, output_shape)
24 def _check_func(checker, argname, thefunc, x0, args, numinputs,
25 output_shape=None):
---> 26 res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
27 if (output_shape is not None) and (shape(res) != output_shape):
28 if (output_shape[0] != 1):
<ipython-input-13-9286b2981692> in test_error(params, time, t_error)
19
20 def test_error(params, time, t_error):
---> 21 return test(params, time) - t_error
22
23 initial_guess = (z1, z2)
<ipython-input-13-9286b2981692> in test(params, time)
10 b = z2*np.sqrt(time)
11
---> 12 a = np.complex(0, a)
13 b = np.complex(0, b)
14
TypeError: only length-1 arrays can be converted to Python scalars
and for the second attempt:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-8-8f631a7ede54> in <module>()
16 return np.real(c*d)
17
---> 18 popt, pcov = optimize.curve_fit(test, time, msd)
/Users/tthalheim/anaconda/lib/python3.5/site-packages/scipy/optimize/minpack.py in curve_fit(f, xdata, ydata, p0, sigma, absolute_sigma, check_finite, bounds, method, jac, **kwargs)
674 # Remove full_output from kwargs, otherwise we're passing it in twice.
675 return_full = kwargs.pop('full_output', False)
--> 676 res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
677 popt, pcov, infodict, errmsg, ier = res
678 cost = np.sum(infodict['fvec'] ** 2)
/Users/tthalheim/anaconda/lib/python3.5/site-packages/scipy/optimize/minpack.py in leastsq(func, x0, args, Dfun, full_output, col_deriv, ftol, xtol, gtol, maxfev, epsfcn, factor, diag)
375 if not isinstance(args, tuple):
376 args = (args,)
--> 377 shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
378 m = shape[0]
379 if n > m:
/Users/tthalheim/anaconda/lib/python3.5/site-packages/scipy/optimize/minpack.py in _check_func(checker, argname, thefunc, x0, args, numinputs, output_shape)
24 def _check_func(checker, argname, thefunc, x0, args, numinputs,
25 output_shape=None):
---> 26 res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
27 if (output_shape is not None) and (shape(res) != output_shape):
28 if (output_shape[0] != 1):
/Users/tthalheim/anaconda/lib/python3.5/site-packages/scipy/optimize/minpack.py in func_wrapped(params)
453 if weights is None:
454 def func_wrapped(params):
--> 455 return func(xdata, *params) - ydata
456 else:
457 def func_wrapped(params):
<ipython-input-8-8f631a7ede54> in test(time, z1, z2)
7 b = z2*np.sqrt(time)
8
----> 9 a = np.complex(0, a)
10 b = np.complex(0, b)
11
TypeError: only length-1 arrays can be converted to Python scalars
The data I am using for fitting are times in the range of 10e-6 to 10e-2 and measurement data in the range of 10e-19 to 10e-16. Both test functions used for calculating individual numbers given that the z1 and z2 are known work. I think that it has something to do with python's fitting routines which maybe not can handle complex values during their calculation?
I would be very happy, if someone could help me fixing this problem.
The third comment by PRMoureu on my question fixed the problem.

Scipy optimize function: matrixes not aligned

EDIT: The data set is the MNIST data set from the Homework of Week 4 of Andrew Ng's Machine Learning Course
I've checked the question on scipy optimize but I still couldn't figure out what is wrong with my code. I am trying to optimize theta for the oneVsAll question on the Andrew Ng coursera course.
Here is the relevant code
def sigmoid(x):
a = []
for item in x:
a.append(1/(1+math.exp(-item)))
return a
def hypothesis(x, theta):
return np.array(sigmoid(np.dot(x, theta)))
def costFunction(theta, x, y, lamba_):
m = X.shape[0]
part1 = np.dot(y.T, np.log(hypothesis(x, theta)).reshape(m,1))
part2 = np.dot((np.ones((m,1)) - y).T, np.log( 1 - hypothesis(x, theta)).reshape(m,1))
summ = (part1 + part2)
return -summ[0]/m
def gradientVect(theta, x, y, lambda_):
n = X.shape[1]
m = X.shape[0]
gradient = []
theta = theta.reshape(n,1)
beta = hypothesis(x, theta) - y
reg = theta[1:] * lambda_/m
grad = np.dot(X.T, beta) * 1./m
grad[1:] = grad[1:] * reg
return grad.flatten()
from scipy import optimize
def optimizeTheta(x, y, nLabels, lambda_):
for i in np.arange(0, nLabels):
theta = np.zeros((n,1))
res = optimize.minimize(costFunction, theta, args=(x, (y == i)*1, lambda_), method=None,
jac=gradientVect, options={'maxiter':50})
print(res)
return result
but running
optimizeTheta(X, y, 10, 0) # X shape = 401, 500
Gives me the following error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-247-e0e6e4c1eddd> in <module>()
3 n = X.shape[1]
4
----> 5 optimizeTheta(X, y, 10, 0)
<ipython-input-246-0a15e9f4769a> in optimizeTheta(x, y, nLabels, lambda_)
54 theta = np.zeros((n,1))
55 res = optimize.minimize(costFunction, x0 = theta, args=(x, (y == i)*1, lambda_), method=None,
---> 56 jac=gradientVect, options={'maxiter':50})
57 print(res)
58 return result
//anaconda/lib/python3.5/site-packages/scipy/optimize/_minimize.py in minimize(fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol, callback, options)
439 return _minimize_cg(fun, x0, args, jac, callback, **options)
440 elif meth == 'bfgs':
--> 441 return _minimize_bfgs(fun, x0, args, jac, callback, **options)
442 elif meth == 'newton-cg':
443 return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,
//anaconda/lib/python3.5/site-packages/scipy/optimize/optimize.py in _minimize_bfgs(fun, x0, args, jac, callback, gtol, norm, eps, maxiter, disp, return_all, **unknown_options)
859 gnorm = vecnorm(gfk, ord=norm)
860 while (gnorm > gtol) and (k < maxiter):
--> 861 pk = -numpy.dot(Hk, gfk)
862 try:
863 alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
ValueError: shapes (401,401) and (2005000,) not aligned: 401 (dim 1) != 2005000 (dim 0)
And I can't figure out why the shapes are not aligned.
Thanks!
So I realized what was wrong with my question.
The problem was the sigmoid function returning a list and not an integer and therefore it messed up the matrixes multiplications afterwards. The new sigmoid function is
def sigmoid(z):
return(1 / (1 + np.exp(-z)))

IndexError: too many indices working with Pandas Dataframe

OK so here's my code for a multi-classification task using one-vs-all logistic regression with some regularization. I've been struggling with this for the past 2 days, I don't know why it doesn't work.
import pandas as pd
import numpy as np
import scipy.optimize as sp
Data = pd.read_csv(Location,
sep=';',
dtype = np.float64,
header = None)
X = Data.ix[:,0:1]
y = Data.ix[:,2:]
y.columns = [0]
def sigmoid(z) :
g = 1.0/(1.0+np.exp(-z))
return g
def lrCostFunction(theta, X, y, lambd):
m , n = X.shape
J=-(y.T.dot(np.log(sigmoid(X.dot(theta))))+(1-y).T.dot(np.log(1-sigmoid(X.dot(theta)))))/m
J = J + (theta.T.dot(theta)- np.power(theta[0,0],2))*(lambd)/(2*m);
return J.ix[0,0]
def Gradient(theta, X, y, lambd):
m , n = X.shape
grad = X.T.dot(sigmoid(X.dot(theta))-y)/m
grad.ix[1:(n-1),:] = grad.ix[1:(n-1),:] + lambd*theta.ix[1:(n-1),:]/m;
return grad.values.flatten().tolist()
def oneVsAll(X, y, num_labels, lambd):
m , n = X.shape
all_theta = pd.DataFrame(data = [[0 for col in range(n+1)] for row in range(num_labels)])
ones = pd.DataFrame(data = [1 for i in range(X.shape[0])])
X = pd.concat([ones,X], axis = 1)
for c in range(0,num_labels-1) :
initial_theta = pd.DataFrame(data = [0 for i in range(n+1)])
theta = sp.minimize(fun = lrCostFunction,
x0 = initial_theta,
args = (X,y,lambd),
method = 'TNC',
jac = Gradient)
all_theta.ix[c,:] = theta
return all_theta
oneVsAll(X, y, 4, 0.1)
And it says :
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-27-b18648b06674> in <module>()
1 theta = pd.DataFrame(data = [0 for i in range(X.shape[1])])
----> 2 oneVsAll(X, y, 4, 0.1)
<ipython-input-26-ba0f7093d1f6> in oneVsAll(X, y, num_labels, lambd)
10 args = (X,y,lambd),
11 method = 'TNC',
---> 12 jac = Gradient)
13 all_theta.ix[c,:] = theta
14 return all_theta
/Users/jean-marcmarty/anaconda/lib/python2.7/site-packages/scipy/optimize/_minimize.pyc in minimize(fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol, callback, options)
381 elif meth == 'tnc':
382 return _minimize_tnc(fun, x0, args, jac, bounds, callback=callback,
--> 383 **options)
384 elif meth == 'cobyla':
385 return _minimize_cobyla(fun, x0, args, constraints, **options)
/Users/jean-marcmarty/anaconda/lib/python2.7/site-packages/scipy/optimize/tnc.pyc in _minimize_tnc(fun, x0, args, jac, bounds, eps, scale, offset, mesg_num, maxCGit, maxiter, eta, stepmx, accuracy, minfev, ftol, xtol, gtol, rescale, disp, callback, **unknown_options)
396 offset, messages, maxCGit, maxfun,
397 eta, stepmx, accuracy, fmin, ftol,
--> 398 xtol, pgtol, rescale, callback)
399
400 funv, jacv = func_and_grad(x)
/Users/jean-marcmarty/anaconda/lib/python2.7/site-packages/scipy/optimize/tnc.pyc in func_and_grad(x)
358 else:
359 def func_and_grad(x):
--> 360 f = fun(x, *args)
361 g = jac(x, *args)
362 return f, g
<ipython-input-24-5f31e87e00da> in lrCostFunction(theta, X, y, lambd)
2 m , n = X.shape
3 J=-(y.T.dot(np.log(sigmoid(X.dot(theta))))+(1-y).T.dot(np.log(1-sigmoid(X.dot(theta)))))/m
----> 4 J = J + (theta.T.dot(theta)- np.power(theta[0,0],2))*(lambd)/(2*m);
5 return J.ix[0,0]
IndexError: too many indices
I don't know anything about the math, but the error is coming from this code:
theta[0,0]
Theta is a 1d array, so you'd need to index at as theta[0], unless there was some reason you were expecting it to be 2d?

Categories

Resources