How to create a customized Linear Operator to solve Ax = b? - python

I want to create a linear operator in python to solve Ax = b where A is a large-scale dense Matrix of float64. Since matrix A cause both performance and memory problems I thought about creating a customized operator as follows:
from numpy import ones
from numpy.linalg import inv
import scipy.sparse.linalg
from sklearn.datasets import make_spd_matrix
n = 100
def solver(A, b):
return inv(A).dot(b)
M = make_spd_matrix(n, random_state=11)
print(M.shape)
solverFunc = scipy.sparse.linalg.LinearOperator((n, n), matvec=solver)
solverFunc.matvec(M, ones((n, 1)))
However, I get the following error:
Traceback (most recent call last):
File "C:\Users\anoir\Desktop\CG_accelerator\inversion\main.py", line 15, in <module>
solverFunc = LinearOperator((n, n), matvec=solver)
File "C:\ProgramData\Anaconda3\envs\inversion\lib\site-packages\scipy\sparse\linalg\interface.py", line 521, in __init__
self._init_dtype()
File "C:\ProgramData\Anaconda3\envs\inversion\lib\site-packages\scipy\sparse\linalg\interface.py", line 178, in _init_dtype
self.dtype = np.asarray(self.matvec(v)).dtype
File "C:\ProgramData\Anaconda3\envs\inversion\lib\site-packages\scipy\sparse\linalg\interface.py", line 232, in matvec
y = self._matvec(x)
File "C:\ProgramData\Anaconda3\envs\inversion\lib\site-packages\scipy\sparse\linalg\interface.py", line 530, in _matvec
return self.__matvec_impl(x)
TypeError: solver() missing 1 required positional argument: 'b'
What seems to be the problem here? I followed the documentation but there is nothing about custom LinearOperator.

The linear operator only takes one parameter. You can get around this by using a closure as shown below:
from numpy.linalg import inv
import numpy as np
import scipy.sparse.linalg
from scipy.sparse import random
import timeit
n = 100
def solver_closure(A):
# This is the outer enclosing function
def solver(b):
return inv(A).dot(b)
return solver # returns the nested function
M = np.random.rand(n, n)
b = range(n)
print(M.shape)
solverFunc = scipy.sparse.linalg.LinearOperator((n, n), matvec=solver_closure(M))
def test100():
x = solverFunc.matvec(b)
print(np.matmul(M,x))
print(timeit.timeit("test100()", setup="from __main__ import test100",number=10))

Related

How can I find Wronskian determinant with numpy

I am trying to find these 3 function's Wronskian determinant but the code has "TypeError: No loop matching the specified signature and casting was found for ufunc det ". How can I solve it?
import numpy as np
import numpy.linalg
import sympy as sp
x = sp.Symbol('x')
e=sp.exp(-3*x) #inputs
f=sp.cos(2*x)
g=sp.sin(2*x)
buneya=np.array([e,f,g],dtype=object)
a=sp.diff(buneya[0]) #first derivative
b=sp.diff(buneya[1])
c=sp.diff(buneya[2])
k=sp.diff(a)
l=sp.diff(b) #second derivative
m=sp.diff(c)
wronskian=np.array([[e,f,g],[a,b,c],[k,l,m]],dtype=object)
print (np.linalg.det(wronskian) ) #determinant
Traceback (most recent call last):
File "C:\Users\canat\.spyder-py3\temp.py", line 20, in <module>
print (np.linalg.det(wronskian) )
File "<__array_function__ internals>", line 5, in det
File "C:\ProgramData\Anaconda3\lib\site-packages\numpy\linalg\linalg.py", line 2159, in det
r = _umath_linalg.det(a, signature=signature)
TypeError: No loop matching the specified signature and casting was found for ufunc det
It is that simple:
import sympy as sp
x = sp.Symbol('x')
e=sp.exp(-3*x) #inputs
f=sp.cos(2*x)
g=sp.sin(2*x)
buneya=[e,f,g]
a=sp.diff(buneya[0]) #first derivative
b=sp.diff(buneya[1])
c=sp.diff(buneya[2])
k=sp.diff(a)
l=sp.diff(b) #second derivative
m=sp.diff(c)
wronskian=sp.Matrix([[e,f,g],[a,b,c],[k,l,m]]).det()
print(wronskian)
returns
26*exp(-3*x)*sin(2*x)**2 + 26*exp(-3*x)*cos(2*x)**2
see sympy matrices docs for further info.
EDIT #1
And even simpler!
import sympy as sp
from sympy.matrices import dense
x = sp.Symbol('x')
e=sp.exp(-3*x) #inputs
f=sp.cos(2*x)
g=sp.sin(2*x)
wronskian = dense.wronskian([e,f,g],x)
print(wronskian)
returns the same
26*exp(-3*x)*sin(2*x)**2 + 26*exp(-3*x)*cos(2*x)**2
wronskian method is also documented in sympy docs.

scipy curve_fit doesn't like math module

While trying to create an example with scipy.optimize curve_fit I found that scipy seems to be incompatible with Python's math module. While function f1 works fine, f2 throws an error message.
from scipy.optimize import curve_fit
from math import sin, pi, log, exp, floor, fabs, pow
x_axis = np.asarray([pi * i / 6 for i in range(-6, 7)])
y_axis = np.asarray([sin(i) for i in x_axis])
def f1(x, m, n):
return m * x + n
coeff1, mat = curve_fit(f1, x_axis, y_axis)
print(coeff1)
def f2(x, m, n):
return m * sin(x) + n
coeff2, mat = curve_fit(f2, x_axis, y_axis)
print(coeff2)
The full traceback is
Traceback (most recent call last):
File "/Documents/Programming/Eclipse/PythonDevFiles/so_test.py", line 49, in <module>
coeff2, mat = curve_fit(f2, x_axis, y_axis)
File "/usr/local/lib/python3.5/dist-packages/scipy/optimize/minpack.py", line 742, in curve_fit
res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/scipy/optimize/minpack.py", line 377, in leastsq
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
File "/usr/local/lib/python3.5/dist-packages/scipy/optimize/minpack.py", line 26, in _check_func
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
File "/usr/local/lib/python3.5/dist-packages/scipy/optimize/minpack.py", line 454, in func_wrapped
return func(xdata, *params) - ydata
File "/Documents/Programming/Eclipse/PythonDevFiles/so_test.py", line 47, in f2
return m * sin(x) + n
TypeError: only length-1 arrays can be converted to Python scalars
The error message appears with lists and numpy arrays as input alike. It affects all math functions, I tested (see functions in import) and must have something to do with, how the math module manipulates input data. This is most obvious with pow() function - if I don't import this function from math, curve_fit works properly with pow().
The obvious question - why does this happen and how can math functions be used with curve_fit?
P.S.: Please don't discuss, that one shouldn't fit the sample data with a linear fit. This was just chosen to illustrate the problem.
Be careful with numpy-arrays, operations working on arrays and operations working on scalars!
Scipy optimize assumes the input (initial-point) to be a 1d-array and often things go wrong in other cases (a list for example becomes an array and if you assumed to work on lists, things go havoc; those kind of problems are common here on StackOverflow and debugging is not that easy to do by the eye; code-interaction helps!).
import numpy as np
import math
x = np.ones(1)
np.sin(x)
> array([0.84147098])
math.sin(x)
> 0.8414709848078965 # this only works as numpy has dedicated support
# as indicated by the error-msg below!
x = np.ones(2)
np.sin(x)
> array([0.84147098, 0.84147098])
math.sin(x)
> TypeError: only size-1 arrays can be converted to Python scalars
To be honest: this is part of a very basic understanding of numpy and should be understood when using scipy's somewhat sensitive functions.

Float error while attempting to use the bisect optimizer within scipy

I’m having trouble using the bisect optimizer within scipy. Here are the relevant portions of my code:
How I’m importing things
import numpy as np
import scipy.optimize as sp
import matplotlib.pyplot as plt
Break in code, section causing errors below
#All variables are previously defined except for h
def BeamHeight(h):
x = 1000e3*M[i]*h/(fw*h^3-(fw-wt)(h-2*ft)^3) - Max_stress_steel
return x
for i in range(0,50):
h = np.zeros((50))
h[i] = sp.bisect(BeamHeight, hb, 5,xtol = 0.001)
Causing this error:
Traceback (most recent call last):
File "ShearMoment.py", line 63, in <module>
h[i] = sp.bisect(BeamHeight, hb, 5,xtol = 0.001)
File "/usr/lib/python2.7/dist-packages/scipy/optimize/zeros.py", line 248, in bisect
r = _zeros._bisect(f,a,b,xtol,rtol,maxiter,args,full_output,disp)
File "ShearMoment.py", line 58, in BeamHeight
x = 1000e3*M[i]*h/(fw*h^3-(fw-wt)(h-2*ft)^3) - Max_stress_steel
TypeError: 'float' object is not callable
I understand that scipy.optimize expects a function as one of its arguments. Am I doing this incorrectly?
In Python, concatenation is not implicitly multiplication, and ^ is not exponentiation. Multiplication must be made explicit with *, and exponentiation must be written as **. This part of BeamHeight:
fw*h^3-(fw-wt)(h-2*ft)^3
must be written as
fw*h**3-(fw-wt)*(h-2*ft)**3

Strange TypeError with Theano

Traceback (most recent call last):
File "test.py", line 37, in <module>
print convLayer1.output.shape.eval({x:xTrain})
File "/Volumes/TONY/anaconda/lib/python2.7/site-packages/theano/gof/graph.py", line 415, in eval
rval = self._fn_cache[inputs](*args)
File "/Volumes/TONY/anaconda/lib/python2.7/site-packages/theano/compile/function_module.py", line 513, in __call__
allow_downcast=s.allow_downcast)
File "/Volumes/TONY/anaconda/lib/python2.7/site-packages/theano/tensor/type.py", line 180, in filter
"object dtype", data.dtype)
TypeError
And here is my code:
import scipy.io as sio
import numpy as np
import theano.tensor as T
from theano import shared
from convnet3d import ConvLayer, NormLayer, PoolLayer, RectLayer
from mlp import LogRegr, HiddenLayer, DropoutLayer
from activations import relu, tanh, sigmoid, softplus
dataReadyForCNN = sio.loadmat("DataReadyForCNN.mat")
xTrain = dataReadyForCNN["xTrain"]
# xTrain = np.random.rand(10, 1, 5, 6, 2).astype('float64')
xTrain.shape
dtensor5 = T.TensorType('float64', (False,)*5)
x = dtensor5('x') # the input data
yCond = T.ivector()
# input = (nImages, nChannel(nFeatureMaps), nDim1, nDim2, nDim3)
kernel_shape = (5,6,2)
fMRI_shape = (51, 61, 23)
n_in_maps = 1 # channel
n_out_maps = 5 # num of feature maps, aka the depth of the neurons
num_pic = 2592
layer1_input = x
# layer1_input.eval({x:xTrain}).shape
# layer1_input.shape.eval({x:numpy.zeros((2592, 1, 51, 61, 23))})
convLayer1 = ConvLayer(layer1_input, n_in_maps, n_out_maps, kernel_shape, fMRI_shape,
num_pic, tanh)
print convLayer1.output.shape.eval({x:xTrain})
It is really weird as the error was not thrown in Jupyter (but it takes long long time to run and finally the kernel is down I really don't know why), but as I move it to the shell and run python fileName.py the error was thrown.
The problem lies in loadmat from scipy. The typeerror you are getting is thrown by this code in Theano:
if not data.flags.aligned:
...
raise TypeError(...)
Now, when you create a new array in numpy from raw data, it would usually be aligned:
>>> a = np.array(2)
>>> a.flags.aligned
True
But if you savemat / loadmat it, the value of the flag gets lost:
>>> savemat('test', {'a':a})
>>> a2 = loadmat('test')['a']
>>> a2.flags.aligned
False
(seems like this particular issue is discussed here)
One quick and dirty way to address it is to create a new numpy array from the array you loaded:
>>> a2 = loadmat('test')['a']
>>> a3 = np.array(a2)
>>> a3.flags.aligned
True
So, for your code:
dataReadyForCNN = np.array(sio.loadmat("DataReadyForCNN.mat"))

Use scikit-cuda to compute singular value decomposition with cuSOLVER

I am trying to use scikit-cuda's wrappers for the cuSOLVER functions, in particular I want to execute cusolverDnSgesvd to compute full-matrix single precision SVD on a matrix of real numbers.
Using the code here and here as a reference, I managed to get this far:
import pycuda.autoinit
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import numpy as np
from skcuda import cusolver
handle = cusolver.cusolverDnCreate()
m = 50
n = 25
a = np.asarray(np.random.random((m, n)))
a_gpu = gpuarray.to_gpu(a)
ldu = m
ldvt = n
s_gpu = gpuarray.empty(min(m, n), np.float32)
u_gpu = gpuarray.empty((ldu, m), np.float32)
vh_gpu = gpuarray.empty((n, n), np.float32)
work_size = cusolver.cusolverDnSgesvd_bufferSize(handle, m, n)
work = gpuarray.empty((m,n), np.float32)
u_gpu, s_gpu, vh_gpu = cusolver.cusolverDnSgesvd(
handle=handle,
jobu='A',
jobvt='A',
m=m,
n=n,
A=a,
lda=m,
S=s_gpu,
U=u_gpu,
ldu=ldu,
VT=vh_gpu,
ldvt=ldvt,
Work=work,
Lwork=work_size,
rwork=None,
devInfo=0
)
But the code isn't working, probably because I'm messing up with types.
Traceback (most recent call last):
File "/home/vektor/PycharmProjects/yancut/test_svd.py", line 44, in <module>
devInfo=0
File "/home/vektor/anaconda3/lib/python3.4/site-packages/skcuda/cusolver.py", line 577, in cusolverDnSgesvd
int(A), lda, int(S), int(U),
TypeError: only length-1 arrays can be converted to Python scalars
How should I provide all the arguments so that the SVD is executed in a proper way?
UPDATE1:
After using this question as reference, I edited my code and I'm getting a new error.
import pycuda.autoinit
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import numpy as np
import ctypes
from skcuda import cusolver
rows = 20
cols = 10
a = np.asarray(np.random.random((rows, cols)))
a_gpu = gpuarray.to_gpu(a.copy())
lda = rows
u_gpu = gpuarray.empty((rows, rows), np.float32)
v_gpu = gpuarray.empty((cols, cols), np.float32)
s_gpu = gpuarray.empty(cols, np.float32)
devInfo = gpuarray.zeros(1, np.int32)
handle = cusolver.cusolverDnCreate()
worksize = cusolver.cusolverDnSgesvd_bufferSize(handle, rows, cols)
print("SIZE", worksize)
Workspace = gpuarray.empty(worksize, np.float32)
svd_status = cusolver.cusolverDnSgesvd(
handle=handle,
jobu='A',
jobvt='A',
m=rows,
n=cols,
A=a_gpu.ptr,
lda=rows,
S=s_gpu.ptr,
U=u_gpu.ptr,
ldu=rows,
VT=v_gpu.ptr,
ldvt=cols,
Work=Workspace.ptr,
Lwork=worksize,
rwork=Workspace.ptr,
devInfo=devInfo.ptr
)
status = cusolver.cusolverDnDestroy(handle)
And I'm getting a new error
Traceback (most recent call last):
File "/home/vektor/PycharmProjects/yancut/test_svd.py", line 53, in <module>
devInfo=devInfo.ptr
File "/home/vektor/anaconda3/lib/python3.4/site-packages/skcuda/cusolver.py", line 579, in cusolverDnSgesvd
Lwork, int(rwork), int(devInfo))
ctypes.ArgumentError: argument 2: <class 'TypeError'>: wrong type
It now seems that I'm doing something wrong with devInfo
From the documentation it looks like each of the matrices (so A, S, U, VT) need to be passed as device pointers. So for PyCUDA gpuarrays, pass A.ptr rather than A. etc and it should work.

Categories

Resources