Advanced numpy indexing - python

Here is what I want to do with numpy arrays:
import numpy as np
x = np.random.random((10,321,321))
y = np.random.rand((10,21,321,321))
z = np.random.randint(0,21,size=(10,321,321))
for i in range(10):
for j in range(321):
for k in range(321):
if x[i][j][k] <= 0.5:
for l in range(21):
if l == z[i][j][k]:
y[i][l][j][k] = 1
else:
y[i][l][j][k] = 0
What would be the better way of doing this?

One way would be to use np.identity to create a complete set of one-hot vectors and select the appropriate ones using advanced indexing:
import numpy as np
x = np.random.random((10,321,321))
y = np.random.rand(10,21,321,321)
z = np.random.randint(0,21,size=(10,321,321))
yc = y.copy()
yct= yc.transpose(0, 2, 3, 1)
yct[x <= 0.5] = np.identity(21, dtype=yc.dtype)[z[x <= 0.5]]
for i in range(10):
for j in range(321):
for k in range(321):
if x[i][j][k] <= 0.5:
for l in range(21):
if l == z[i][j][k]:
y[i][l][j][k] = 1
else:
y[i][l][j][k] = 0
print(np.all(yc == y))
# True

Here's one possibility:
tmp_y = np.zeros_like(y)
for l in range(21):
tmp_y[:, l, :, :] = (z == l).astype(dtype=np.int32)
y = np.where(x <= 0.5, tmp_y, y)

Related

How can I iterate over a function

I'm simulating forest-fire, and one of my tasks is to plot the density of trees vs those currently burning and empty plots. I have the disparate parts, however I need assistance in putting them together as I can't work out how to put my code together. Currently, I have my initial conditions
p, f = 0.5, 0.3
nx, ny = 100, 100
X = np.zeros((ny, nx))
adjacent = ((-1,0), (0,-1), (0, 1), (1,0))
E, T, F = 0, 1, 2
xvalues = [0]
yvalues = [0]
my function that generates the next frame (distribution of fire) is
def iterate(X):
Xnew = np.zeros((ny, nx))
for ix in range(1,nx-1):
for iy in range(1,ny-1):
if X[iy,ix] == E and np.random.random() <= p:
Xnew[iy,ix] = T
if X[iy,ix] == T:
Xnew[iy,ix] = T
for dx,dy in adjacent:
if X[iy+dy,ix+dx] == F:
Xnew[iy,ix] = F
else:
if np.random.random() <= f:
Xnew[iy,ix] = F
return Xnew
print(Xnew)
The bit I'm struggling with is how to write the following correctly with the above material so that I could go up to Xn where n is about 1000
X1 = iterate(X)
X2 = iterate(X1)
X3 = iterate(X2) and so on
and for each iteration calculate
num_empty = (Xn == 0).sum()
num_tree = (Xn == 1).sum()
num_fire = (Xn == 2).sum()
density = num_tree/(num_fire+num_empty)
xvalues.append(i)
yvalues.append(density)
print(density)
Any help would be appreciated!
I think you need to iterate over the range of n ints rather than "function".
i = 0
_X = iteration(X)
num_empty = (_X == 0).sum()
num_tree = (_X == 1).sum()
num_fire = (_X == 2).sum()
density = num_tree / (num_fire + num_empty)
print(i, density)
xvalues.append(i)
yvalues.append(density)
n = 1000
for i in range(1, n):
_X = iteration(_X)
num_empty = (_X == 0).sum()
num_tree = (_X == 1).sum()
num_fire = (_X == 2).sum()
density = num_tree / (num_fire + num_empty)
print(i, density)
xvalues.append(i)
yvalues.append(density)

cvxpy+ecos: problem INFEASIBLE, how to scale correctly

I have the following code:
import numpy as np
import cvxpy as cp
import math
import sys
def solve05( p, a ):
m,n,ids,inv,k = 0,len(p),{},{},0
for i in range(n):
for j in range(n):
ids[(i,j)] = k
inv[k] = (i,j)
k = k+1
# Problem data
A = np.zeros((2*n,n*n+n))
D = np.zeros((2*n,n*n+n))
b = np.zeros(2*n)
B = np.zeros(2*n)
c = np.zeros(2*n)
for j in range(n):
for i in range(n):
idx = ids[(i,j)]
A[j,idx] = 1
b[j] = 1
for i in range(n):
for j in range(n):
idx = ids[(i,j)]
A[i+n,idx] = p[j]
A[i+n,n*n+i] = -1
b[i+n] = p[i]
# Construct the problem
x = cp.Variable(n*n+n)
print("M = ",A)
print("b = ",b)
CF = 1e3
print("Now scaling M by ",CF)
A = A*CF
print(A)
b = b*CF
constraints = [0 <= x, A*x == b]
pex = x[n*n]+x[n*n+1]+x[n*n+2]+1
constraints.append(x[n*n] <= a[0]*CF)
constraints.append(x[n*n+1] <= a[1]*CF)
constraints.append(x[n*n+2] <= a[2]*CF)
constraints.append(x[n*n] >= 0.01)
constraints.append(x[n*n+1] >= 0.01)
constraints.append(x[n*n+2] >= 0.01)
ex = pex.__pow__(-1)
print("Dummy variables: ",x[n*n],x[n*n+1],x[n*n+2])
print("Objective function: ",ex)
print("[should be convex] Curvature: ",ex.curvature)
objective = cp.Minimize(ex)
prob = cp.Problem(objective,constraints)
result = prob.solve(verbose=True)
print('problem state: ', prob.status)
alpha = np.zeros((n,n))
for i in range(n):
for j in range(n):
alpha[i,j] = x.value[ids[(i,j)]]
dummy = [x.value[j] for j in range(n*n,n*n+n)]
return (x,alpha)
if __name__ == '__main__':
p = [0.0005,0.0001,0.0007]
a = [900,500,700]
n = len(a)
(sl,alpha) = solve05(p,a)
for row in alpha:
for x in row:
print("%.4f " % (x), end=" "),
print("")
It fails with "Problem UNFEASIBLE" verdict, and I am eager to know why.
Is there any way to know more? I am not a convex programming expert, so any comments on why this is a bad model is appreciated. I have also tried scaling the problem, because I thought some numerical instability may be what is causing problems, but alas.
The answer ecos+cvxpy was giving is correct. The problem is unfeasible, which can be shown by summing up all the equations and observing that the LHS is some quantity F, whereas the RHS is F+e, for some e > 0.

How to generate a multidimensional cube in Python

This program creates a cube of size Gridsize**3 with user choice of starting point and space between point (even if they are not function parameters there isn't difficult to implement).
import numpy as np
def CreateMap(Gridsize):
X = Y = Z = Gridsize
M = np.zeros(shape=(X*Y*Z, 3))
d_x = 5 / Gridsize # increment of the cube x dimension
d_y = 5 / Gridsize
d_z = 5 / Gridsize
x0 = -1.0
y0 = 1.0
z0 = 0
x = np.arange(x0, X * d_x, d_x, dtype=float)
y = np.arange(y0, Y * d_y, d_y, dtype=float)
z = np.arange(z0, Z * d_z, d_z, dtype=float)
g = 0
for i in range(X):
for j in range(Y):
for k in range(Z):
M[g, 0] = x[i]
M[g, 1] = y[j]
M[g, 2] = z[k]
g = g + 1
print(M)
return 0
I was wondering what was the best method to create an hyper cube of size Gridsize**n were n will also be user defined?
Check out np.meshgrid. Instead of your for loops, you can just do
M = np.stack(np.meshgrid(x, y, z))
If you guys have optimization advice...
import numpy as np
def CreateMap(Gridsize, x0, xf):
k = np.shape(x0)[0]
M = np.zeros(shape=(Gridsize**k, k))
d_x = np.zeros(k)
for i in range(k):
d = 0
j = 0
d_x[i] = (xf[i] - x0[i]) / (Gridsize - 1) # increment of the cube x dimension
x = np.arange(x0[i], xf[i]+d_x[i], d_x[i], dtype=float)
for v in range(Gridsize ** (k - i - 1)):
for j in range(Gridsize):
temp = x[j]
for z in range(Gridsize ** i):
M[d, i] = temp
d = d + 1
print(M)
return 0
x0 = np.array([-1, 0, 1])
xf = np.array([10, 2, 5])
CreateMap(4, x0, xf)

Numpy arange error with Lagrange Multiplier in Python

I try to use Lagrange multiplier to optimize a function, and I am trying to loop through the function to get a list of number, however I got the error
ValueError: setting an array element with a sequence.
Here is my code, where do I go wrong? If the n is not an array I can get the result correctly though
import numpy as np
from scipy.optimize import fsolve
n = np.arange(10000,100000,10000)
def func(X):
x = X[0]
y = X[1]
L = X[2]
return (x + y + L * (x**2 + y**2 - n))
def dfunc(X):
dLambda = np.zeros(len(X))
h = 1e-3
for i in range(len(X)):
dX = np.zeros(len(X))
dX[i] = h
dLambda[i] = (func(X+dX)-func(X-dX))/(2*h);
return dLambda
X1 = fsolve(dfunc, [1, 1, 0])
print (X1)
Helps would be appreciated, thank you very much
First, check func = fsolve()
Second, print(func([1,1,0]))` - result in not number ([2 2 2 2 2 2 2 2 2]), beause "n" is list. if you want to iterate n try:
import numpy as np
from scipy.optimize import fsolve
n = np.arange(10000,100000,10000)
def func(X,n):
x = X[0]
y = X[1]
L = X[2]
return (x + y + L * (x**2 + y**2 - n))
def dfunc(X,n):
dLambda = np.zeros(len(X))
h = 1e-3
r = 0
for i in range(len(X)):
dX = np.zeros(len(X))
dX[i] = h
dLambda[i] = (func(X+dX,n)-func(X-dX,n))/(2*h)
return dLambda
for iter_n in n:
print("for n = {0} dfunc = {1}".format(iter_n,dfunc([0.8,0.4,0.3],iter_n)))

Speed up looping over three axis' in NumPy

I need to speed up a for loop that does something like the code below:
import numpy as np
x = np.random.normal(size=(206,11,11))
y = np.random.normal(size=(206,11,11))
complx = x + 1j*y
complx
a,b,c = complx.shape
for n in xrange(a):
#do somthing
z = np.zeros(b)
for i in xrange(b):
z[i] = (complx[n,:,:].real[i][i]*complx[n,:,:].real[i][i] +\
complx[n,:,:].imag[i][i]*complx[n,:,:].imag[i][i])(**-0.25)
I'm vaguely aware these things can sometimes be done with numpy.einsum.
However, i am not really sure how to use it?
Or does anyone have any other suggestions?
In case you want to speed up the inner for loop you can do something like this
import numpy as np
x = np.random.normal(size=(206,11,11))
y = np.random.normal(size=(206,11,11))
complx = x + 1j*y
# takes only the diagonal part of all the 11x11 matrices
complx_diag = np.diagonal(complx,0,1,2)
# do the calc
zn = np.abs(complx_diag)**(-0.5)
for n in xrange(a):
z = zn[n]
# do your stuff
In case your stuff is not too complicated it can be vectorized as well (very likely).
The more you calculate outside the for loop the faster is your code.
If I am not mistaken, this is more or less what you want. The print statements are only there to convince oneself that the calculation is correct.
def optimize_01():
x = np.random.normal(size=(6, 11, 11))
y = np.random.normal(size=(6, 11, 11))
complx = x + 1j * y
a, b, _ = complx.shape
for n in range(a):
# do somthing
A = complx[n, :, :]
d = np.diagonal(A)
z = np.power(np.abs(d * d), -0.25)
print (d[0])
print (z[0])
print ((d[0].real * d[0].real + d[0].imag * d[0].imag) ** -0.25)
EDIT: If I compare this implementation with your implementation, I get the following.
import timeit
def optimize_02():
x = np.random.normal(size=(206, 11, 11))
y = np.random.normal(size=(206, 11, 11))
complx = x + 1j * y
a, b, _ = complx.shape
for n in range(a):
# do somthing
A = complx[n, :, :]
d = np.diagonal(A)
z = np.power(np.abs(d * d), -0.25)
def optimize_03():
x = np.random.normal(size=(206, 11, 11))
y = np.random.normal(size=(206, 11, 11))
complx = x + 1j * y
a, b, _ = complx.shape
for n in range(a):
# do somthing
z = np.zeros(b)
for i in range(b):
z[i] = (complx[n, :, :].real[i][i] * complx[n, :, :].real[i][i] + \
complx[n, :, :].imag[i][i] * complx[n, :, :].imag[i][i]) ** (-0.25)
if __name__ == '__main__':
print (timeit.timeit(optimize_02, number=10))
print (timeit.timeit(optimize_03, number=10))
Result:
0.03474012700007734
0.09025639800074714
With 6 arrays of 1100 elements, instead of 206 arrays of 11 elements, the result is:
5.762741210999593
5.771216576999905
It looks like my solution is not as fast after all.

Categories

Resources