Cannot do mpmath.sqrtm() in sympy - python

I have the following code to extract the square root of a symmetric second-order tensor.
from sympy import symbols, Matrix, mpmath
import numpy as np
F11, F12, F13, F21, F22, F23, F31, F32, F33 = symbols('F11, F12, F13, F21, F22, F23, F31, F32, F33', real=True)
F = np.array([[F11, F12, F13], [F21, F22, F23], [F31, F32, F33]])
B = F.dot(F.T)
mpmath.sqrtm(Matrix(B))
However, it gave me the error:
TypeError Traceback (most recent call last)
<ipython-input-14-439fed475a57> in <module>()
5 F = np.array([[F11, F12, F13], [F21, F22, F23], [F31, F32, F33]])
6 B = F.dot(F.T)
----> 7 mpmath.sqrtm(Matrix(B))
X:\WinPython3\python-3.4.2.amd64\lib\site-packages\sympy\mpmath\matrices\calculus.py in sqrtm(ctx, A, _may_rotate)
308
309 """
--> 310 A = ctx.matrix(A)
311 # Trivial
312 if A*0 == A:
X:\WinPython3\python-3.4.2.amd64\lib\site-packages\sympy\mpmath\matrices\matrices.py in __init__(self, *args, **kwargs)
326 A[i,j] = convert(A[i,j])
327 elif hasattr(args[0], 'tolist'):
--> 328 A = self.ctx.matrix(args[0].tolist())
329 self.__data = A._matrix__data
330 self.__rows = A._matrix__rows
X:\WinPython3\python-3.4.2.amd64\lib\site-packages\sympy\mpmath\matrices\matrices.py in __init__(self, *args, **kwargs)
299 for i, row in enumerate(A):
300 for j, a in enumerate(row):
--> 301 self[i, j] = convert(a)
302 else:
303 # interpret list as row vector
X:\WinPython3\python-3.4.2.amd64\lib\site-packages\sympy\mpmath\ctx_mp_python.py in convert(ctx, x, strings)
660 if hasattr(x, '_mpmath_'):
661 return ctx.convert(x._mpmath_(prec, rounding))
--> 662 return ctx._convert_fallback(x, strings)
663
664 def isnan(ctx, x):
X:\WinPython3\python-3.4.2.amd64\lib\site-packages\sympy\mpmath\ctx_mp.py in _convert_fallback(ctx, x, strings)
612 else:
613 raise ValueError("can only create mpf from zero-width interval")
--> 614 raise TypeError("cannot create mpf from " + repr(x))
615
616 def mpmathify(ctx, *args, **kwargs):
TypeError: cannot create mpf from F11**2 + F12**2 + F13**2
May I ask why that is happening? Is this a limitation of sympy or that I am doing something wrong?
Thank you!
Shawn

mpmath.sqrtm is expecting a square matrix of numbers; if you want to take the sqrt of each element in B symbolically try:
>>> B.applyfunc(sqrt)

Don't use NumPy to do symbolic calculations. NumPy only works with numerical arrays.
To take the square root of a matrix, use B**(Rational(1, 2)) (sqrt(B) ought to work too, but it looks like it remains unevaluated by default).
In this case, though, SymPy hangs, because it computes the square root by diagonalizing, and the eigenvalues don't simplify (or at least SymPy doesn't know how to simplify them), so they are huge cubic equations. Take a look at B.eigenvals(). Thus, the square root of this matrix is quite huge. Are you expecting the square root matrix to be a relatively simple expression?

Related

TypeError: only size-1 arrays can be converted to Python scalars when using scipy.integrate.quad

I'm trying to obtain the signal to noise ratio of two variables I have cross-correlated. This involves using scipy.integrate.quad
In short, I've got two functions: an integrand and the integral function.
The integrand takes four inputs: one array and three scalars.
This integrand function works fine: it swallows the array and spits out another.
I then vectorize the integrand using numpy.vectorize.
I then call scipy.integrate.quad in the following way
integral, integrall_err = quad(lambda array: integrand(array, scalar, scalar, scalar), 0, array).
Then I vectorize the integral as well. When I try and perform it on the actual values I have, it just returns me
'TypeError: only size-1 arrays can be converted to Python scalars'.
Here's the code:
def ISNR(ell, var, NFRB):
ad = CL_tauvals + NL_tauvals
bd = CL_disvals + NFRB
cd = CL_dtauvals*CL_dtauvals
res = ell*((CL_dtauvals**2)/(ad*bd + cd))
return res #dimensionless
ISNR = np.vectorize(ISNR)
test_val = ISNR(1, 300, 4000)
def SNR(ell, var, NFRB, FoV):
Integrand = lambda ell: ISNR(ell, var, NFRB)
snr = quad(Integrand, 0, 100)
prefactors = np.sqrt(4*np.pi*FoV)
return prefactors*np.sqrt(snr)
SNR = np.vectorize(SNR)
SNR_test = SNR(l, 100, 100, 100)
Note that l is an array of 100 values I've defined using np.logspace. The error message I get is
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-28-31c52985c5c2> in <module>
22 SNR = np.vectorize(SNR)
23
---> 24 SNR_test = SNR(l, 100, 100, 100)
/Software/users/modules/7/software/anaconda3/2020.07/lib/python3.8/site-packages/numpy/lib/function_base.py in __call__(self, *args, **kwargs)
2089 vargs.extend([kwargs[_n] for _n in names])
2090
-> 2091 return self._vectorize_call(func=func, args=vargs)
2092
2093 def _get_ufunc_and_otypes(self, func, args):
/Software/users/modules/7/software/anaconda3/2020.07/lib/python3.8/site-packages/numpy/lib/function_base.py in _vectorize_call(self, func, args)
2159 res = func()
2160 else:
-> 2161 ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
2162
2163 # Convert args to object arrays first
/Software/users/modules/7/software/anaconda3/2020.07/lib/python3.8/site-packages/numpy/lib/function_base.py in _get_ufunc_and_otypes(self, func, args)
2119
2120 inputs = [arg.flat[0] for arg in args]
-> 2121 outputs = func(*inputs)
2122
2123 # Performance note: profiling indicates that -- for simple
<ipython-input-28-31c52985c5c2> in SNR(ell, var, NFRB, FoV)
16 def SNR(ell, var, NFRB, FoV):
17 Integrand = lambda ell: ISNR(ell, var, NFRB)
---> 18 snr = quad(Integrand, 0, 100)
19 prefactors = np.sqrt(4*np.pi*FoV)
20 return prefactors*np.sqrt(snr)
/Software/users/modules/7/software/anaconda3/2020.07/lib/python3.8/site-packages/scipy/integrate/quadpack.py in quad(func, a, b, args, full_output, epsabs, epsrel, limit, points, weight, wvar, wopts, maxp1, limlst)
349
350 if weight is None:
--> 351 retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
352 points)
353 else:
/Software/users/modules/7/software/anaconda3/2020.07/lib/python3.8/site-packages/scipy/integrate/quadpack.py in _quad(func, a, b, args, full_output, epsabs, epsrel, limit, points)
461 if points is None:
462 if infbounds == 0:
--> 463 return _quadpack._qagse(func,a,b,args,full_output,epsabs,epsrel,limit)
464 else:
465 return _quadpack._qagie(func,bound,infbounds,args,full_output,epsabs,epsrel,limit)
TypeError: only size-1 arrays can be converted to Python scalars
Can anyone explain to me what's going on and/or how to solve this? I've successfully done integrals of the exact same type with some other code recently and the main difference is just that three scalars are taken as inputs alongside the array and just called by the integrand (which works) within the function. I really don't understand why it's trying to convert the array into a scalar: I just want the integral evaluated at each input value.
Some of the solutions I've found tell me to vectorize and use lambda for the variable I'm integrating with... that's exactly what I've done. The other solutions I've found are just not that relevant to what I'm trying to do. If anyone has any tips, I'd be more than grateful.

3D graph error: "The truth value of an array with more than one element is ambiguous"

I am trying to plot a 3D graph, using a re-existing function to generate the Z values. However, this is yielding the error "The truth value of an array with more than one element is ambiguous". This seems strange, as I am able to generate a list of Z values using the same function and y,x values, but once I include the 3D graphing code the error occurs.
My graphing code is:
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib import cm
def f(tau,tau_b): #re-use society welfare function of tau & tau_b, using corr=0.6
Z = society_welfare2 (0.6, tau, tau_b)
return Z
xgrid=np.linspace(1e-5, 1-1e-5,100) #tau grid
ygrid=np.linspace(1e-5, 1-1e-5,100) #tau_b grid
tau,tau_b=np.meshgrid(xgrid,ygrid)
fig=plt.figure(figsize=(8,6))
ax=fig.add_subplot(111,projection='3d')
ax.plot_surface(tau,
tau_b,
f(tau,tau_b),
rstride=2,cstride=2,
cmap=cm.jet,
alpha=0.7,
linewidth=0.25)
ax.set_zlim(-0.5,1.0)
plt.show()
My society_welfare2 function code:
def society_welfare2 (corr, tau, tau_b):
cov = [[1,corr], [corr,1]] #covariance
epsilon_start,b_start = np.random.multivariate_normal(mean, cov, sample_N).T
epsilon = np.exp(epsilon_start) #to ensure epsilon positive
b = np.exp(b_start) #to ensure b positive
indv_welfares = []
def GBC (t_o):
taxes_paid = []
for i in range(sample_N): #loop over all agents to find their C1,C2,L
def consumption_functions(Lguess,epsilon=epsilon,b=b):
C2 = (((1-tau)*epsilon[i]*w*Lguess) +(1-tau_b)*b[i] + ((t_o)/(1+r)))/((1/((beta**(1/gamma))*((1+r)**(1/gamma)))) + (1/(1+r)))
C1 = C2 /((beta**(1/gamma))*(1+r)**(1/gamma))
return -Utility(C1,C2,Lguess)
result = minimize_scalar(consumption_functions,bounds=(0,1),method='bounded', args=(epsilon, b))
opt_L = result.x
opt_C1=(((1-tau)*(epsilon[i])*w)/(opt_L**sigma))**(1/gamma)
opt_C2=(opt_C1)*((beta**(1/gamma))*(1+r)**(1/gamma))
income_tax = tau*(epsilon[i])*w*opt_L
bequest_tax = tau_b*(b[i])
taxes_paid.append(income_tax)
taxes_paid.append(bequest_tax)
welfare_func = opt_C1**(1-gamma)/(1-gamma)-opt_L**(1+sigma)/(1+sigma) + beta*(opt_C2**(1-gamma)/(1-gamma))
indv_welfares.append(welfare_func)
total_tax_revenue = sum(taxes_paid)
return total_tax_revenue - (10000*t_o)
result1 = minimize_scalar(GBC,bounds=(1e-5, 100000),method='bounded')
opt_t_o = result1.x
total_welfare = sum(indv_welfares)
return total_welfare
The full traceback error code:
ValueError Traceback (most recent call last)
<ipython-input-19-3633f4a9db76> in <module>
18 ax.plot_surface(tau,
19 tau_b,
---> 20 f(tau,tau_b),
21 rstride=2,cstride=2,
22 cmap=cm.jet,
<ipython-input-19-3633f4a9db76> in f(tau, tau_b)
7
8 def f(tau,tau_b): #re-use society welfare function of tau & tau_b, using corr=0.6
----> 9 Z = society_welfare2 (0.6, tau, tau_b)
10 return Z
11
<ipython-input-17-321a709b9684> in society_welfare2(corr, tau, tau_b)
61 return total_tax_revenue - (10000*t_o)
62
---> 63 result1 = minimize_scalar(GBC,bounds=(1e-5, 100000),method='bounded')
64
65 opt_t_o = result1.x
/opt/anaconda3/lib/python3.8/site-packages/scipy/optimize/_minimize.py in minimize_scalar(fun, bracket, bounds, args, method, tol, options)
798 if isinstance(disp, bool):
799 options['disp'] = 2 * int(disp)
--> 800 return _minimize_scalar_bounded(fun, bounds, args, **options)
801 elif meth == 'golden':
802 return _minimize_scalar_golden(fun, bracket, args, **options)
/opt/anaconda3/lib/python3.8/site-packages/scipy/optimize/optimize.py in _minimize_scalar_bounded(func, bounds, args, xatol, maxiter, disp, **unknown_options)
1956 rat = e = 0.0
1957 x = xf
-> 1958 fx = func(x, *args)
1959 num = 1
1960 fmin_data = (1, xf, fx)
<ipython-input-17-321a709b9684> in GBC(t_o)
41 return -Utility(C1,C2,Lguess)
42
---> 43 result = minimize_scalar(consumption_functions,bounds=(0,1),method='bounded', args=(epsilon, b))
44
45 opt_L = result.x
/opt/anaconda3/lib/python3.8/site-packages/scipy/optimize/_minimize.py in minimize_scalar(fun, bracket, bounds, args, method, tol, options)
798 if isinstance(disp, bool):
799 options['disp'] = 2 * int(disp)
--> 800 return _minimize_scalar_bounded(fun, bounds, args, **options)
801 elif meth == 'golden':
802 return _minimize_scalar_golden(fun, bracket, args, **options)
/opt/anaconda3/lib/python3.8/site-packages/scipy/optimize/optimize.py in _minimize_scalar_bounded(func, bounds, args, xatol, maxiter, disp, **unknown_options)
2015 print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
2016
-> 2017 if fu <= fx:
2018 if x >= xf:
2019 a = xf
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
The lowest point the trace back is
if fu <= fx:
That's comparing two variables in an if. That will work if fu and fx are scalars, or single value arrays. But if either is a multivalue array if will raise this error.
Our task at this point is to trace those variables back to your code. I suspect you are providing arrays for some parameter, where they/is should be a scalar.
Looking at the top. It occurs when you ask for the plot, but a parameter is a function call:
f(tau,tau_b)
and on through a function calls to the minimize on the GBC function. I think that GBC is the func in:
fx = func(x, *args)
Which raises the question, what exactly does GBC return? It's being used in a _minimize_scalar, so it should return exactly one value.
What is its return expression?
return total_tax_revenue - (10000*t_o)
Do you think you can take the analysis from there?
Now do you see why we insist on seeing the traceback. The error is in your code, but the sequence getting there is long, and not obvious from simply reading the code.
edit
Oops, I see another level of minimize, one that uses
consumption_functions
It has several parameters, epsilon and b. I suppose we can deduce what those are. But what is
Utility
The fu <= fx appears to be testing the fx return value against a bound fu. Assuming the bound is scalar, then the value fx must be an array. Is it???

TypeError: Cannot cast array data from dtype('O') to dtype('float64') according to the rule 'safe'

I need to make an integral of the type g(u)jn(u) where g(u) is a smooth function without zeros and jn(u) in the Bessel function with infinity zeros, but I got the following error:
TypeError: Cannot cast array data from dtype('O') to dtype('float64') according to the rule 'safe'
First I need to change of variable x to variable u and make an integration in the new variable u but how the function u(x) is not analytically invertible so I need to use interpolation to make this inversion numerically.
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
x = np.linspace(0.1, 100, 1000)
u = lambda x: x*np.exp(x)
dxdu_x = lambda x: 1/((1+x) * np.exp(x)) ## dxdu as function of x: not invertible
dxdu_u = InterpolatedUnivariateSpline(u(x), dxdu_x(x)) ## dxdu as function of u: change of variable
After this, the integral is:
from mpmath import mp
def f(n):
integrand = lambda U: dxdu_u(U) * mp.besselj(n,U)
bjz = lambda nth: mp.besseljzero(n, nth)
return mp.quadosc(integrand, [0,mp.inf], zeros=bjz)
I use quadosc from mpmath and not quad from scipy because quadosc is more appropriate to make integral of rapidly oscillating functions, like Bessel functions. But, by other hand, this force me to use two different packges, scipy to calculate dxdu_u by interpolation, and mpmath to calculate the Bessel functions mp.besselj(n,U) and the integral of the product dxdu_u(U) * mp.bessel(n,U) so I suspect that this mix of two different packages can make some issue/ conflict. So when I make:
print(f(0))
I got the error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-38-ac2976a6b736> in <module>
12 return mp.quadosc(integrand, [0,mp.inf], zeros=bjz)
13
---> 14 f(0)
<ipython-input-38-ac2976a6b736> in f(n)
10 integrand = lambda U: dxdu_u(U) * mp.besselj(n,U)
11 bjz = lambda nth: mp.besseljzero(n, nth)
---> 12 return mp.quadosc(integrand, [0,mp.inf], zeros=bjz)
13
14 f(0)
TypeError: Cannot cast array data from dtype('O') to dtype('float64') according to the rule 'safe'
Does anyone know how I can solve this problem?
Thanks
The full traceback (the part you sniped) shows that the error is in the __call__ method of the univariatespline object. So indeed the problem is that the mpmath integration routine feeds in its mpf decimals, and scipy has no way of dealing with them.
A simplest fix is then to manually cast the offending part of the argument of the integrand to a float:
integrand = lambda U: dxdu_u(float(U)) * mp.besselj(n,U)
In general this is prone to numerical errors (mpmath uses its high-precision variables on purpose!) so proceed with caution. In this specific case it might be OK, because the interpolation is actually done in double precision. Still, best check the results.
A possible alternative might be to avoid mpmath and use the weights argument to scipy.integrate.quad, see the docs (scroll down to weights="sin" part)
Another alternative is to stick with mpmath all the way and implement the interpolation yourselves in pure python (this way, mpf objects are probably fine since they should support usual arithmetics). It's likely a simple linear interpolation is enough. If it's not, it's not too big of a deal to code up your own cubic spline interpolator.
The full traceback:
In [443]: f(0)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-443-6bfbdbfff9c4> in <module>
----> 1 f(0)
<ipython-input-440-7ebeff3611f6> in f(n)
2 integrand = lambda U: dxdu_u(U) * mp.besselj(n,U)
3 bjz = lambda nth: mp.besseljzero(n, nth)
----> 4 return mp.quadosc(integrand, [0,mp.inf], zeros=bjz)
5
/usr/local/lib/python3.6/dist-packages/mpmath/calculus/quadrature.py in quadosc(ctx, f, interval, omega, period, zeros)
998 # raise ValueError("zeros do not appear to be correctly indexed")
999 n = 1
-> 1000 s = ctx.quadgl(f, [a, zeros(n)])
1001 def term(k):
1002 return ctx.quadgl(f, [zeros(k), zeros(k+1)])
/usr/local/lib/python3.6/dist-packages/mpmath/calculus/quadrature.py in quadgl(ctx, *args, **kwargs)
807 """
808 kwargs['method'] = 'gauss-legendre'
--> 809 return ctx.quad(*args, **kwargs)
810
811 def quadosc(ctx, f, interval, omega=None, period=None, zeros=None):
/usr/local/lib/python3.6/dist-packages/mpmath/calculus/quadrature.py in quad(ctx, f, *points, **kwargs)
740 ctx.prec += 20
741 if dim == 1:
--> 742 v, err = rule.summation(f, points[0], prec, epsilon, m, verbose)
743 elif dim == 2:
744 v, err = rule.summation(lambda x: \
/usr/local/lib/python3.6/dist-packages/mpmath/calculus/quadrature.py in summation(self, f, points, prec, epsilon, max_degree, verbose)
230 print("Integrating from %s to %s (degree %s of %s)" % \
231 (ctx.nstr(a), ctx.nstr(b), degree, max_degree))
--> 232 results.append(self.sum_next(f, nodes, degree, prec, results, verbose))
233 if degree > 1:
234 err = self.estimate_error(results, prec, epsilon)
/usr/local/lib/python3.6/dist-packages/mpmath/calculus/quadrature.py in sum_next(self, f, nodes, degree, prec, previous, verbose)
252 case the quadrature rule is able to reuse them.
253 """
--> 254 return self.ctx.fdot((w, f(x)) for (x,w) in nodes)
255
256
/usr/local/lib/python3.6/dist-packages/mpmath/ctx_mp_python.py in fdot(ctx, A, B, conjugate)
942 hasattr_ = hasattr
943 types = (ctx.mpf, ctx.mpc)
--> 944 for a, b in A:
945 if type(a) not in types: a = ctx.convert(a)
946 if type(b) not in types: b = ctx.convert(b)
/usr/local/lib/python3.6/dist-packages/mpmath/calculus/quadrature.py in <genexpr>(.0)
252 case the quadrature rule is able to reuse them.
253 """
--> 254 return self.ctx.fdot((w, f(x)) for (x,w) in nodes)
255
256
<ipython-input-440-7ebeff3611f6> in <lambda>(U)
1 def f(n):
----> 2 integrand = lambda U: dxdu_u(U) * mp.besselj(n,U)
3 bjz = lambda nth: mp.besseljzero(n, nth)
4 return mp.quadosc(integrand, [0,mp.inf], zeros=bjz)
5
at this point it starts using the scipy interpolation code
/usr/local/lib/python3.6/dist-packages/scipy/interpolate/fitpack2.py in __call__(self, x, nu, ext)
310 except KeyError:
311 raise ValueError("Unknown extrapolation mode %s." % ext)
--> 312 return fitpack.splev(x, self._eval_args, der=nu, ext=ext)
313
314 def get_knots(self):
/usr/local/lib/python3.6/dist-packages/scipy/interpolate/fitpack.py in splev(x, tck, der, ext)
366 return tck(x, der, extrapolate=extrapolate)
367 else:
--> 368 return _impl.splev(x, tck, der, ext)
369
370
/usr/local/lib/python3.6/dist-packages/scipy/interpolate/_fitpack_impl.py in splev(x, tck, der, ext)
596 shape = x.shape
597 x = atleast_1d(x).ravel()
--> 598 y, ier = _fitpack._spl_(x, der, t, c, k, ext)
599
600 if ier == 10:
TypeError: Cannot cast array data from dtype('O') to dtype('float64') according to the rule 'safe'
_fitpack._spl_ probably is compiled code (for speed). It can't take the mpmath objects directly; it has to pass their values as C compatible doubles.
To illustrate the problem, make a numpy array of mpmath objects:
In [444]: one,two = mp.mpmathify(1), mp.mpmathify(2)
In [445]: arr = np.array([one,two])
In [446]: arr
Out[446]: array([mpf('1.0'), mpf('2.0')], dtype=object)
In [447]: arr.astype(float) # default 'unsafe' casting
Out[447]: array([1., 2.])
In [448]: arr.astype(float, casting='safe')
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-448-4860036bcca8> in <module>
----> 1 arr.astype(float, casting='safe')
TypeError: Cannot cast array from dtype('O') to dtype('float64') according to the rule 'safe'
With integrand = lambda U: dxdu_u(float(U)) * mp.besselj(n,U),
In [453]: f(0) # a minute or so later
Out[453]: mpf('0.61060303588231069')

Preconditioned Conjugate Gradient and LinearOperator in python

[Homework] I am going to solve the linear system Ax=b by the Preconditioned Conjugate Gradient method, and I use spilu function from scipy.sparse.linalg for the preconditioner. A is a sparse symmetric 162*162 matrix. Since the spilu gives an approximation to the inverse of A, say M approximates A, and so spilu(A) gives M^-1, which is the preconditioner. I find that we can directly gives the preconditioner in the python Conjugate Gradient function, but my code below does not work.
M_inverse=scipy.sparse.linalg.spilu(A)
M2=scipy.sparse.linalg.LinearOperator((162,162),M_inverse.solve)
x3=scipy.sparse.linalg.cg(A,b,M2)
TypeError Traceback (most recent call last)
<ipython-input-84-86f8f91df8d2> in <module>()
----> 1 x3=scipy.sparse.linalg.cg(A,b,M2)
/Users/ruobinghan/anaconda/lib/python3.4/site-packages/scipy/sparse/linalg/isolve/iterative.py in cg(A, b, x0, tol, maxiter, xtype, M, callback)
/Users/ruobinghan/anaconda/lib/python3.4/site-packages/scipy/sparse/linalg/isolve/iterative.py in non_reentrant(func, *a, **kw)
83 try:
84 d['__entered'] = True
---> 85 return func(*a, **kw)
86 finally:
87 d['__entered'] = False
/Users/ruobinghan/anaconda/lib/python3.4/site-packages/scipy/sparse/linalg/isolve/iterative.py in cg(A, b, x0, tol, maxiter, xtype, M, callback)
219 #non_reentrant
220 def cg(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None, callback=None):
--> 221 A,M,x,b,postprocess = make_system(A,M,x0,b,xtype)
222
223 n = len(b)
/Users/ruobinghan/anaconda/lib/python3.4/site-packages/scipy/sparse/linalg/isolve/utils.py in make_system(A, M, x0, b, xtype)
108 x = zeros(N, dtype=xtype)
109 else:
--> 110 x = array(x0, dtype=xtype)
111 if not (x.shape == (N,1) or x.shape == (N,)):
112 raise ValueError('A and x have incompatible dimensions')
TypeError: float() argument must be a string or a number, not 'LinearOperator'
Also, the question hints I will need to use LinearOperator interface, I do not understand what is exactly LinearOperator doing and why we need it here.
Any suggestion would be appreciated!
Thanks in advance!
I think the parameters are in wrong order,
x3=scipy.sparse.linalg.cg(A,b,M2)
In the Error message:
220 def cg(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None,
callback=None):
--> 221 A,M,x,b,postprocess = make_system(A,M,x0,b,xtype)
M2 is in the place of x0 - the initial guess of the solution but not the preconditioner.
In my host, with correct order, class-LinearOperator is functioning well.
correct version
x3=scipy.sparse.linalg.cg(A,b,M=M2)
Please use "key word" arguments as often as possible.

Sparse Matrix Addition yields 'ValueError: setting an array element with a sequence.'

The lines in question are:
# Make efficient matrix that can be built
K = sparse.lil_matrix((N, N))
# Calculate K matrix (<i|pHp|j> in the LGL-nodes basis)
for i in range(Ne):
idx_s, idx_e = i*(Np-1), i*(Np-1)+Np
print(shape(K[idx_s:idx_e, idx_s:idx_e]))
print(shape(dmat.T.dot(sparse.spdiags(w*peq[idx_s:idx_e], 0, Np, Np)).dot(dmat)))
K[idx_s:idx_e, idx_s:idx_e] += dmat.T.dot(sparse.spdiags(w*peq[idx_s:idx_e], 0, Np, Np)).dot(dmat)
But, currently, Numpy is yielding the error
(8, 8)
(8, 8)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-62-cc7cc21f07e5> in <module>()
22
23 for _ in range(N):
---> 24 ll, q = getLL(Ne, Np, x_d, w_d, dmat_d, x, w, dL, peq*peq, data)
25 peq = (peq*q)
26
<ipython-input-61-a52c13d48b87> in getLL(Ne, Np, x_d, w_d, dmat_d, x, w, dmat, peq, data)
15 print(shape(K[idx_s:idx_e, idx_s:idx_e]))
16 print(shape(dmat.T.dot(sparse.spdiags(w*peq[idx_s:idx_e], 0, Np, Np)).dot(dmat)))
---> 17 K[idx_s:idx_e, idx_s:idx_e] += dmat.T.dot(sparse.spdiags(w*peq[idx_s:idx_e], 0, Np, Np)).dot(dmat)
18
19 # Re-make matrix for efficient vector products
/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/site-packages/scipy/sparse/lil.py in __iadd__(self, other)
157
158 def __iadd__(self,other):
--> 159 self[:,:] = self + other
160 return self
161
/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/site-packages/scipy/sparse/lil.py in __setitem__(self, index, x)
307
308 # Make x and i into the same shape
--> 309 x = np.asarray(x, dtype=self.dtype)
310 x, _ = np.broadcast_arrays(x, i)
311
/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/site-packages/numpy/core/numeric.py in asarray(a, dtype, order)
460
461 """
--> 462 return array(a, dtype, copy=False, order=order)
463
464 def asanyarray(a, dtype=None, order=None):
ValueError: setting an array element with a sequence.
This is a little cryptic as it seems that the error is happening somewhere inside of the Numpy library---not in my code. But I'm not terribly familiar with numpy, per se, so perhaps I'm indirectly causing the error.
Both slices are of the same shape, so that doesn't seem to be the actual error.
The problem is that
(dmat.T.dot(sparse.spdiags(w*peq[idx_s:idx_e], 0, Np, Np)).dot(dmat)
is not a simple array. It has the right shape, but the elements are sparse matrices (the 'sequence' in the error message).
Turning the inner sparse matrix into a dense array should solve the problem:
dmat.T.dot(sparse.spdiags(w*peq[idx_s:idx_e], 0, Np, Np).A).dot(dmat)
The np.dot method is not aware of sparse matrices, at least not in your version of numpy (1.8?), so it treats it as sequence. Newer versions are 'sparse' aware.
Another solution is to use the sparse matrix product (dot or *).
sparse.spdiags(...).dot(dmat etc)
I had to play around to get reasonable values for N,Np,Ns, dmat,peq. You really should have given us small samples. It makes testing ideas much easier.

Categories

Resources