To normalize, would this work?:
def normalize(arr, axis):
x = [i for i in range(len(arr.shape))]
a = x[0]
x[0] = x[axis]
x[axis] = a
arr = arr.transpose(x)
return (arr/np.sum(arr**2, axis=0)**0.5).transpose(tuple(x))
You can just do x = x / sum(x) if you want to normalize. This works on numpy arrays:
import numpy
x = [1, 2, 3]
x = numpy.array(x)
x = x / sum(x)
Related
I have created this code as below:
def scalar_function(x, y):
if x <= y:
z = x*y
else:
z = x/y
return z
And
def vector_function(x, y):
vfunc = np.vectorize(scalar_function)
return vfunc
But when I try to put: vector_function([1, 2, 3, 4], 2), the output is <numpy.vectorize at 0x226a1d9aeb8> instead of resulting array of numbers.
Can anyone point out my mistake? Thanks.
You have return return vfunc which return Vectorized function That you got. you may want return vfunc(x,y)
>>> def vector_function(x, y):
vfunc = np.vectorize(scalar_function)
return vfunc(x, y)
>>> vector_function([1, 2, 3, 4], 2)
array([2, 4, 1, 2])
There is no need for np.vectorize:
x = np.asarray(x)
y = np.asarray(y)
np.where(x <= y, x*y, x/y)
does the same and is actually vectorized.
I wrote following code for basic optimization based on Newton's method by writing derivatives explicitly and by calculating them using sympy. Why are the results different?
Writing derivatives explicitly:
import numpy as np
def g(x):
return 1.95 - np.exp(-2/x) - 2*np.exp(-np.power(x,4))
# gd: Derivative of g
def gd(x):
return -2*np.power(x,-2)*np.exp(-2/x) + 8*np.power(x,3)*np.exp(-np.power(x,4))
def gdd(x):
return -4*np.power(x,-3)*np.exp(-2/x)-4*np.power(x,-4)*np.exp(-2/x)+24*np.power(x,2)*np.exp(-np.power(x,4))-32*np.power(x,6)*np.exp(-np.power(x,4))
# Newton's
def newton_update(x0,g,gd):
return x0 - g(x0)/gd(x0)
# Main func
x0 = 1.00
condition = True
loops = 1
max_iter = 20
while condition and loops<max_iter:
x1 = newton_update(x0,gd,gdd)
loops += 1
condition = np.abs(x0-x1) >= 0.001
x0 = x1
print('x =',x0)
if loops == max_iter:
print('Solution failed to converge. Try another starting value!')
Output:
x = 1.66382322329
x = 1.38056881356
x = 1.43948432592
x = 1.46207570893
x = 1.46847791968
x = 1.46995571549
x = 1.47027303095
Using sympy and lambdify:
import sympy as sp
x = sp.symbols('x',real=True)
f_expr = 1.95 - exp(-2/x) - 2*exp(-x**4)
dfdx_expr = sp.diff(f_expr, x)
ddfdx_expr = sp.diff(dfdx_expr, x)
# lambidify
f = sp.lambdify([x],f_expr,"numpy")
dfdx = sp.lambdify([x], dfdx_expr,"numpy")
ddfdx = sp.lambdify([x], ddfdx_expr,"numpy")
# Newton's
x0 = 1.0
condition = True
loops = 1
max_iter = 20
while condition and loops<max_iter:
x1 = newton_update(x0,dfdx,ddfdx)
loops += 1
condition = np.abs(x0-x1) >= 0.001
x0 = x1
print('x =',x0)
if loops == max_iter:
print('Solution failed to converge. Try another starting value!')
Output:
x = 1.90803013971
x = 3.96640484492
x = 6.6181614689
x = 10.5162392894
x = 16.3269006983
x = 25.0229734288
x = 38.0552735534
x = 57.5964036862
x = 86.9034400129
x = 130.860980508
x = 196.795321033
x = 295.695535237
x = 444.044999522
x = 666.568627836
x = 1000.35369299
x = 1501.03103981
x = 2252.04689304
x = 3378.57056168
x = 5068.35599056
Solution failed to converge. Try another starting value!
I made it to work by step halving in newton_update function whenever sign of derivative changes in the update. But I couldn't figure out why the results were so different with the same starting point. Also is it possible to get the same results from both?
There is a mistake in the second derivative formula in the function gdd. changing
def gdd(x):
return -4*np.power(x,-3)*np.exp(-2/x)-4*np.power(x,-4)*np.exp(-2/x)+24*np.power(x,2)*np.exp(-np.power(x,4))-32*np.power(x,6)*np.exp(-np.power(x,4))
to
def gdd(x):
return 4*np.power(x,-3)*np.exp(-2/x)-4*np.power(x,-4)*np.exp(-2/x)+24*np.power(x,2)*np.exp(-np.power(x,4))-32*np.power(x,6)*np.exp(-np.power(x,4))
should resolve the issue and produce the same result in both cases, which will be
x = 1.90803013971
x = 3.96640484492
x = 6.6181614689
x = 10.5162392894
x = 16.3269006983
x = 25.0229734288
x = 38.0552735534
x = 57.5964036862
x = 86.9034400129
x = 130.860980508
x = 196.795321033
x = 295.695535237
x = 444.044999522
x = 666.568627836
x = 1000.35369299
x = 1501.03103981
x = 2252.04689304
x = 3378.57056168
x = 5068.35599056
Solution failed to converge. Try another starting value!
This points to a problem with choosing the step size as per the comment
Here is what I want to do with numpy arrays:
import numpy as np
x = np.random.random((10,321,321))
y = np.random.rand((10,21,321,321))
z = np.random.randint(0,21,size=(10,321,321))
for i in range(10):
for j in range(321):
for k in range(321):
if x[i][j][k] <= 0.5:
for l in range(21):
if l == z[i][j][k]:
y[i][l][j][k] = 1
else:
y[i][l][j][k] = 0
What would be the better way of doing this?
One way would be to use np.identity to create a complete set of one-hot vectors and select the appropriate ones using advanced indexing:
import numpy as np
x = np.random.random((10,321,321))
y = np.random.rand(10,21,321,321)
z = np.random.randint(0,21,size=(10,321,321))
yc = y.copy()
yct= yc.transpose(0, 2, 3, 1)
yct[x <= 0.5] = np.identity(21, dtype=yc.dtype)[z[x <= 0.5]]
for i in range(10):
for j in range(321):
for k in range(321):
if x[i][j][k] <= 0.5:
for l in range(21):
if l == z[i][j][k]:
y[i][l][j][k] = 1
else:
y[i][l][j][k] = 0
print(np.all(yc == y))
# True
Here's one possibility:
tmp_y = np.zeros_like(y)
for l in range(21):
tmp_y[:, l, :, :] = (z == l).astype(dtype=np.int32)
y = np.where(x <= 0.5, tmp_y, y)
I try to use Lagrange multiplier to optimize a function, and I am trying to loop through the function to get a list of number, however I got the error
ValueError: setting an array element with a sequence.
Here is my code, where do I go wrong? If the n is not an array I can get the result correctly though
import numpy as np
from scipy.optimize import fsolve
n = np.arange(10000,100000,10000)
def func(X):
x = X[0]
y = X[1]
L = X[2]
return (x + y + L * (x**2 + y**2 - n))
def dfunc(X):
dLambda = np.zeros(len(X))
h = 1e-3
for i in range(len(X)):
dX = np.zeros(len(X))
dX[i] = h
dLambda[i] = (func(X+dX)-func(X-dX))/(2*h);
return dLambda
X1 = fsolve(dfunc, [1, 1, 0])
print (X1)
Helps would be appreciated, thank you very much
First, check func = fsolve()
Second, print(func([1,1,0]))` - result in not number ([2 2 2 2 2 2 2 2 2]), beause "n" is list. if you want to iterate n try:
import numpy as np
from scipy.optimize import fsolve
n = np.arange(10000,100000,10000)
def func(X,n):
x = X[0]
y = X[1]
L = X[2]
return (x + y + L * (x**2 + y**2 - n))
def dfunc(X,n):
dLambda = np.zeros(len(X))
h = 1e-3
r = 0
for i in range(len(X)):
dX = np.zeros(len(X))
dX[i] = h
dLambda[i] = (func(X+dX,n)-func(X-dX,n))/(2*h)
return dLambda
for iter_n in n:
print("for n = {0} dfunc = {1}".format(iter_n,dfunc([0.8,0.4,0.3],iter_n)))
I need to speed up a for loop that does something like the code below:
import numpy as np
x = np.random.normal(size=(206,11,11))
y = np.random.normal(size=(206,11,11))
complx = x + 1j*y
complx
a,b,c = complx.shape
for n in xrange(a):
#do somthing
z = np.zeros(b)
for i in xrange(b):
z[i] = (complx[n,:,:].real[i][i]*complx[n,:,:].real[i][i] +\
complx[n,:,:].imag[i][i]*complx[n,:,:].imag[i][i])(**-0.25)
I'm vaguely aware these things can sometimes be done with numpy.einsum.
However, i am not really sure how to use it?
Or does anyone have any other suggestions?
In case you want to speed up the inner for loop you can do something like this
import numpy as np
x = np.random.normal(size=(206,11,11))
y = np.random.normal(size=(206,11,11))
complx = x + 1j*y
# takes only the diagonal part of all the 11x11 matrices
complx_diag = np.diagonal(complx,0,1,2)
# do the calc
zn = np.abs(complx_diag)**(-0.5)
for n in xrange(a):
z = zn[n]
# do your stuff
In case your stuff is not too complicated it can be vectorized as well (very likely).
The more you calculate outside the for loop the faster is your code.
If I am not mistaken, this is more or less what you want. The print statements are only there to convince oneself that the calculation is correct.
def optimize_01():
x = np.random.normal(size=(6, 11, 11))
y = np.random.normal(size=(6, 11, 11))
complx = x + 1j * y
a, b, _ = complx.shape
for n in range(a):
# do somthing
A = complx[n, :, :]
d = np.diagonal(A)
z = np.power(np.abs(d * d), -0.25)
print (d[0])
print (z[0])
print ((d[0].real * d[0].real + d[0].imag * d[0].imag) ** -0.25)
EDIT: If I compare this implementation with your implementation, I get the following.
import timeit
def optimize_02():
x = np.random.normal(size=(206, 11, 11))
y = np.random.normal(size=(206, 11, 11))
complx = x + 1j * y
a, b, _ = complx.shape
for n in range(a):
# do somthing
A = complx[n, :, :]
d = np.diagonal(A)
z = np.power(np.abs(d * d), -0.25)
def optimize_03():
x = np.random.normal(size=(206, 11, 11))
y = np.random.normal(size=(206, 11, 11))
complx = x + 1j * y
a, b, _ = complx.shape
for n in range(a):
# do somthing
z = np.zeros(b)
for i in range(b):
z[i] = (complx[n, :, :].real[i][i] * complx[n, :, :].real[i][i] + \
complx[n, :, :].imag[i][i] * complx[n, :, :].imag[i][i]) ** (-0.25)
if __name__ == '__main__':
print (timeit.timeit(optimize_02, number=10))
print (timeit.timeit(optimize_03, number=10))
Result:
0.03474012700007734
0.09025639800074714
With 6 arrays of 1100 elements, instead of 206 arrays of 11 elements, the result is:
5.762741210999593
5.771216576999905
It looks like my solution is not as fast after all.