I’m having trouble using the bisect optimizer within scipy. Here are the relevant portions of my code:
How I’m importing things
import numpy as np
import scipy.optimize as sp
import matplotlib.pyplot as plt
Break in code, section causing errors below
#All variables are previously defined except for h
def BeamHeight(h):
x = 1000e3*M[i]*h/(fw*h^3-(fw-wt)(h-2*ft)^3) - Max_stress_steel
return x
for i in range(0,50):
h = np.zeros((50))
h[i] = sp.bisect(BeamHeight, hb, 5,xtol = 0.001)
Causing this error:
Traceback (most recent call last):
File "ShearMoment.py", line 63, in <module>
h[i] = sp.bisect(BeamHeight, hb, 5,xtol = 0.001)
File "/usr/lib/python2.7/dist-packages/scipy/optimize/zeros.py", line 248, in bisect
r = _zeros._bisect(f,a,b,xtol,rtol,maxiter,args,full_output,disp)
File "ShearMoment.py", line 58, in BeamHeight
x = 1000e3*M[i]*h/(fw*h^3-(fw-wt)(h-2*ft)^3) - Max_stress_steel
TypeError: 'float' object is not callable
I understand that scipy.optimize expects a function as one of its arguments. Am I doing this incorrectly?
In Python, concatenation is not implicitly multiplication, and ^ is not exponentiation. Multiplication must be made explicit with *, and exponentiation must be written as **. This part of BeamHeight:
fw*h^3-(fw-wt)(h-2*ft)^3
must be written as
fw*h**3-(fw-wt)*(h-2*ft)**3
Related
I want to create a linear operator in python to solve Ax = b where A is a large-scale dense Matrix of float64. Since matrix A cause both performance and memory problems I thought about creating a customized operator as follows:
from numpy import ones
from numpy.linalg import inv
import scipy.sparse.linalg
from sklearn.datasets import make_spd_matrix
n = 100
def solver(A, b):
return inv(A).dot(b)
M = make_spd_matrix(n, random_state=11)
print(M.shape)
solverFunc = scipy.sparse.linalg.LinearOperator((n, n), matvec=solver)
solverFunc.matvec(M, ones((n, 1)))
However, I get the following error:
Traceback (most recent call last):
File "C:\Users\anoir\Desktop\CG_accelerator\inversion\main.py", line 15, in <module>
solverFunc = LinearOperator((n, n), matvec=solver)
File "C:\ProgramData\Anaconda3\envs\inversion\lib\site-packages\scipy\sparse\linalg\interface.py", line 521, in __init__
self._init_dtype()
File "C:\ProgramData\Anaconda3\envs\inversion\lib\site-packages\scipy\sparse\linalg\interface.py", line 178, in _init_dtype
self.dtype = np.asarray(self.matvec(v)).dtype
File "C:\ProgramData\Anaconda3\envs\inversion\lib\site-packages\scipy\sparse\linalg\interface.py", line 232, in matvec
y = self._matvec(x)
File "C:\ProgramData\Anaconda3\envs\inversion\lib\site-packages\scipy\sparse\linalg\interface.py", line 530, in _matvec
return self.__matvec_impl(x)
TypeError: solver() missing 1 required positional argument: 'b'
What seems to be the problem here? I followed the documentation but there is nothing about custom LinearOperator.
The linear operator only takes one parameter. You can get around this by using a closure as shown below:
from numpy.linalg import inv
import numpy as np
import scipy.sparse.linalg
from scipy.sparse import random
import timeit
n = 100
def solver_closure(A):
# This is the outer enclosing function
def solver(b):
return inv(A).dot(b)
return solver # returns the nested function
M = np.random.rand(n, n)
b = range(n)
print(M.shape)
solverFunc = scipy.sparse.linalg.LinearOperator((n, n), matvec=solver_closure(M))
def test100():
x = solverFunc.matvec(b)
print(np.matmul(M,x))
print(timeit.timeit("test100()", setup="from __main__ import test100",number=10))
I am trying to find these 3 function's Wronskian determinant but the code has "TypeError: No loop matching the specified signature and casting was found for ufunc det ". How can I solve it?
import numpy as np
import numpy.linalg
import sympy as sp
x = sp.Symbol('x')
e=sp.exp(-3*x) #inputs
f=sp.cos(2*x)
g=sp.sin(2*x)
buneya=np.array([e,f,g],dtype=object)
a=sp.diff(buneya[0]) #first derivative
b=sp.diff(buneya[1])
c=sp.diff(buneya[2])
k=sp.diff(a)
l=sp.diff(b) #second derivative
m=sp.diff(c)
wronskian=np.array([[e,f,g],[a,b,c],[k,l,m]],dtype=object)
print (np.linalg.det(wronskian) ) #determinant
Traceback (most recent call last):
File "C:\Users\canat\.spyder-py3\temp.py", line 20, in <module>
print (np.linalg.det(wronskian) )
File "<__array_function__ internals>", line 5, in det
File "C:\ProgramData\Anaconda3\lib\site-packages\numpy\linalg\linalg.py", line 2159, in det
r = _umath_linalg.det(a, signature=signature)
TypeError: No loop matching the specified signature and casting was found for ufunc det
It is that simple:
import sympy as sp
x = sp.Symbol('x')
e=sp.exp(-3*x) #inputs
f=sp.cos(2*x)
g=sp.sin(2*x)
buneya=[e,f,g]
a=sp.diff(buneya[0]) #first derivative
b=sp.diff(buneya[1])
c=sp.diff(buneya[2])
k=sp.diff(a)
l=sp.diff(b) #second derivative
m=sp.diff(c)
wronskian=sp.Matrix([[e,f,g],[a,b,c],[k,l,m]]).det()
print(wronskian)
returns
26*exp(-3*x)*sin(2*x)**2 + 26*exp(-3*x)*cos(2*x)**2
see sympy matrices docs for further info.
EDIT #1
And even simpler!
import sympy as sp
from sympy.matrices import dense
x = sp.Symbol('x')
e=sp.exp(-3*x) #inputs
f=sp.cos(2*x)
g=sp.sin(2*x)
wronskian = dense.wronskian([e,f,g],x)
print(wronskian)
returns the same
26*exp(-3*x)*sin(2*x)**2 + 26*exp(-3*x)*cos(2*x)**2
wronskian method is also documented in sympy docs.
While trying to create an example with scipy.optimize curve_fit I found that scipy seems to be incompatible with Python's math module. While function f1 works fine, f2 throws an error message.
from scipy.optimize import curve_fit
from math import sin, pi, log, exp, floor, fabs, pow
x_axis = np.asarray([pi * i / 6 for i in range(-6, 7)])
y_axis = np.asarray([sin(i) for i in x_axis])
def f1(x, m, n):
return m * x + n
coeff1, mat = curve_fit(f1, x_axis, y_axis)
print(coeff1)
def f2(x, m, n):
return m * sin(x) + n
coeff2, mat = curve_fit(f2, x_axis, y_axis)
print(coeff2)
The full traceback is
Traceback (most recent call last):
File "/Documents/Programming/Eclipse/PythonDevFiles/so_test.py", line 49, in <module>
coeff2, mat = curve_fit(f2, x_axis, y_axis)
File "/usr/local/lib/python3.5/dist-packages/scipy/optimize/minpack.py", line 742, in curve_fit
res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/scipy/optimize/minpack.py", line 377, in leastsq
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
File "/usr/local/lib/python3.5/dist-packages/scipy/optimize/minpack.py", line 26, in _check_func
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
File "/usr/local/lib/python3.5/dist-packages/scipy/optimize/minpack.py", line 454, in func_wrapped
return func(xdata, *params) - ydata
File "/Documents/Programming/Eclipse/PythonDevFiles/so_test.py", line 47, in f2
return m * sin(x) + n
TypeError: only length-1 arrays can be converted to Python scalars
The error message appears with lists and numpy arrays as input alike. It affects all math functions, I tested (see functions in import) and must have something to do with, how the math module manipulates input data. This is most obvious with pow() function - if I don't import this function from math, curve_fit works properly with pow().
The obvious question - why does this happen and how can math functions be used with curve_fit?
P.S.: Please don't discuss, that one shouldn't fit the sample data with a linear fit. This was just chosen to illustrate the problem.
Be careful with numpy-arrays, operations working on arrays and operations working on scalars!
Scipy optimize assumes the input (initial-point) to be a 1d-array and often things go wrong in other cases (a list for example becomes an array and if you assumed to work on lists, things go havoc; those kind of problems are common here on StackOverflow and debugging is not that easy to do by the eye; code-interaction helps!).
import numpy as np
import math
x = np.ones(1)
np.sin(x)
> array([0.84147098])
math.sin(x)
> 0.8414709848078965 # this only works as numpy has dedicated support
# as indicated by the error-msg below!
x = np.ones(2)
np.sin(x)
> array([0.84147098, 0.84147098])
math.sin(x)
> TypeError: only size-1 arrays can be converted to Python scalars
To be honest: this is part of a very basic understanding of numpy and should be understood when using scipy's somewhat sensitive functions.
I am new to Python - I came from matlab. I am trying to compile this code:
import numpy as np
from scipy import sparse
n=3
dim=2^n
sx = np.array([[0,1],[1,0]])
sy = np.array([[0,-1j],[1j,0]])
sz = np.array([[1,0],[0,-1]])
ssx= sparse.csr_matrix(sx)
ssy= sparse.csr_matrix(sy)
ssz= sparse.csr_matrix(sz)
expon1=np.zeros((n,n))
for i in range(n-1):
expon1[i,i]=1
expon1[i+1,i]=1
expon1[0,n-1]=1
expon1[n-1,n-1]=1
expon2=np.identity(n)
Sigs1=sparse.csr_matrix(0,(dim, dim))
for j in range(n-1):
Sig1=sparse.csr_matrix(1)
for i in range(n-1):
Sig1=sparse.kron(Sig1,ssx.power(expon1[i,j]))
Sigs1= Sigs1+Sig1
After running python3 sparse.py [name of file], the terminal prints the following text:
Traceback (most recent call last):
File "sparse.py", line 31, in <module>
Sigs1= Sigs1+Sig1
File "/usr/lib/python3/dist-packages/scipy/sparse/compressed.py", line 341, in __add__
raise ValueError("inconsistent shapes")
ValueError: inconsistent shapes
I didnt go through your whole code, but looking at it I can say your problem is probably here.
n=3
dim=2^n
This ^ operator in python is for bitwise XOR while this ** is for power.
I am trying to use scikit-cuda's wrappers for the cuSOLVER functions, in particular I want to execute cusolverDnSgesvd to compute full-matrix single precision SVD on a matrix of real numbers.
Using the code here and here as a reference, I managed to get this far:
import pycuda.autoinit
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import numpy as np
from skcuda import cusolver
handle = cusolver.cusolverDnCreate()
m = 50
n = 25
a = np.asarray(np.random.random((m, n)))
a_gpu = gpuarray.to_gpu(a)
ldu = m
ldvt = n
s_gpu = gpuarray.empty(min(m, n), np.float32)
u_gpu = gpuarray.empty((ldu, m), np.float32)
vh_gpu = gpuarray.empty((n, n), np.float32)
work_size = cusolver.cusolverDnSgesvd_bufferSize(handle, m, n)
work = gpuarray.empty((m,n), np.float32)
u_gpu, s_gpu, vh_gpu = cusolver.cusolverDnSgesvd(
handle=handle,
jobu='A',
jobvt='A',
m=m,
n=n,
A=a,
lda=m,
S=s_gpu,
U=u_gpu,
ldu=ldu,
VT=vh_gpu,
ldvt=ldvt,
Work=work,
Lwork=work_size,
rwork=None,
devInfo=0
)
But the code isn't working, probably because I'm messing up with types.
Traceback (most recent call last):
File "/home/vektor/PycharmProjects/yancut/test_svd.py", line 44, in <module>
devInfo=0
File "/home/vektor/anaconda3/lib/python3.4/site-packages/skcuda/cusolver.py", line 577, in cusolverDnSgesvd
int(A), lda, int(S), int(U),
TypeError: only length-1 arrays can be converted to Python scalars
How should I provide all the arguments so that the SVD is executed in a proper way?
UPDATE1:
After using this question as reference, I edited my code and I'm getting a new error.
import pycuda.autoinit
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import numpy as np
import ctypes
from skcuda import cusolver
rows = 20
cols = 10
a = np.asarray(np.random.random((rows, cols)))
a_gpu = gpuarray.to_gpu(a.copy())
lda = rows
u_gpu = gpuarray.empty((rows, rows), np.float32)
v_gpu = gpuarray.empty((cols, cols), np.float32)
s_gpu = gpuarray.empty(cols, np.float32)
devInfo = gpuarray.zeros(1, np.int32)
handle = cusolver.cusolverDnCreate()
worksize = cusolver.cusolverDnSgesvd_bufferSize(handle, rows, cols)
print("SIZE", worksize)
Workspace = gpuarray.empty(worksize, np.float32)
svd_status = cusolver.cusolverDnSgesvd(
handle=handle,
jobu='A',
jobvt='A',
m=rows,
n=cols,
A=a_gpu.ptr,
lda=rows,
S=s_gpu.ptr,
U=u_gpu.ptr,
ldu=rows,
VT=v_gpu.ptr,
ldvt=cols,
Work=Workspace.ptr,
Lwork=worksize,
rwork=Workspace.ptr,
devInfo=devInfo.ptr
)
status = cusolver.cusolverDnDestroy(handle)
And I'm getting a new error
Traceback (most recent call last):
File "/home/vektor/PycharmProjects/yancut/test_svd.py", line 53, in <module>
devInfo=devInfo.ptr
File "/home/vektor/anaconda3/lib/python3.4/site-packages/skcuda/cusolver.py", line 579, in cusolverDnSgesvd
Lwork, int(rwork), int(devInfo))
ctypes.ArgumentError: argument 2: <class 'TypeError'>: wrong type
It now seems that I'm doing something wrong with devInfo
From the documentation it looks like each of the matrices (so A, S, U, VT) need to be passed as device pointers. So for PyCUDA gpuarrays, pass A.ptr rather than A. etc and it should work.