I'm new to python, and I have this code for calculating the potential inside a 1x1 box using fourier series, but a part of it is going way too slow (marked in the code below).
If someone could help me with this, I suspect I could've done something with the numpy library, but I'm not that familiar with it.
import matplotlib.pyplot as plt
import pylab
import sys
from matplotlib import rc
rc('text', usetex=False)
rc('font', family = 'serif')
#One of the boundary conditions for the potential.
def func1(x,n):
V_c = 1
V_0 = V_c * np.sin(n*np.pi*x)
return V_0*np.sin(n*np.pi*x)
#To calculate the potential inside a box:
def v(x,y):
n = 1;
sum = 0;
nmax = 20;
while n < nmax:
[C_n, err] = quad(func1, 0, 1, args=(n), );
sum = sum + 2*(C_n/np.sinh(np.pi*n)*np.sin(n*np.pi*x)*np.sinh(n*np.pi*y));
n = n + 1;
return sum;
def main(argv):
x_axis = np.linspace(0,1,100)
y_axis = np.linspace(0,1,100)
V_0 = np.zeros(100)
V_1 = np.zeros(100)
n = 4;
#Plotter for V0 = v_c * sin () x
for i in range(100):
V_0[i] = V_0_1(i/100, n)
plt.plot(x_axis, V_0)
plt.xlabel('x/L')
plt.ylabel('V_0')
plt.title('V_0(x) = sin(m*pi*x/L), n = 4')
plt.show()
#Plot for V_0 = V_c(1-(x-1/2)^4)
for i in range(100):
V_1[i] = V_0_2(i/100)
plt.figure()
plt.plot(x_axis, V_1)
plt.xlabel('x/L')
plt.ylabel('V_0')
plt.title('V_0(x) = 1- (x/L - 1/2)^4)')
#plt.legend()
plt.show()
#Plot V(x/L,y/L) on the boundary:
V_0_Y = np.zeros(100)
V_1_Y = np.zeros(100)
V_X_0 = np.zeros(100)
V_X_1 = np.zeros(100)
for i in range(100):
V_0_Y[i] = v(0, i/100)
V_1_Y[i] = v(1, i/100)
V_X_0[i] = v(i/100, 0)
V_X_1[i] = v(i/100, 1)
# V(x/L = 0, y/L):
plt.figure()
plt.plot(x_axis, V_0_Y)
plt.title('V(x/L = 0, y/L)')
plt.show()
# V(x/L = 1, y/L):
plt.figure()
plt.plot(x_axis, V_1_Y)
plt.title('V(x/L = 1, y/L)')
plt.show()
# V(x/L, y/L = 0):
plt.figure()
plt.plot(x_axis, V_X_0)
plt.title('V(x/L, y/L = 0)')
plt.show()
# V(x/L, y/L = 1):
plt.figure()
plt.plot(x_axis, V_X_1)
plt.title('V(x/L, y/L = 1)')
plt.show()
#Plot V(x,y)
#######
# This is where the code is way too slow, it takes like 10 minutes when n in v(x,y) is 20.
#######
V = np.zeros(10000).reshape((100,100))
for i in range(100):
for j in range(100):
V[i,j] = v(j/100, i/100)
plt.figure()
plt.contour(x_axis, y_axis, V, 50)
plt.savefig('V_1')
plt.show()
if __name__ == "__main__":
main(sys.argv[1:])
You can find how to use FFT/DFT in this document :
Discretized continuous Fourier transform with numpy
Also, regarding your V matrix, there are many ways to improve the execution speed. One is to make sure you use Python 3, or xrange() instead of range() if you a are still in Python 2.. I usually put these lines in my Python code, to allow it to run evenly wether I use Python 3. or 2.*
# Don't want to generate huge lists in memory... use standard range for Python 3.*
range = xrange if isinstance(range(2),
list) else range
Then, instead of re-computing j/100 and i/100, you can precompute these values and put them in an array; knowing that a division is much more costly than a multiplication ! Something like :
ratios = np.arange(100) / 100
V = np.zeros(10000).reshape((100,100))
j = 0
while j < 100:
i = 0
while i < 100:
V[i,j] = v(values[j], values[i])
i += 1
j += 1
Well, anyway, this is rather cosmetic and will not save your life; and you still need to call the function v()...
Then, you can use weave :
http://docs.scipy.org/doc/scipy-0.14.0/reference/tutorial/weave.html
Or write all your pure computation/loop code in C, compile it and generate a module which you can call from Python.
You should look into numpy's broadcasting tricks and vectorization (several references, one of the first good links that pops up is from Matlab but it is just as applicable to numpy - can anyone recommend me a good numpy link in the comments that I might point other users to in the future?).
What I saw in your code (once you remove all the unnecessary bits like plots and unused functions), is that you are essentially doing this:
from __future__ import division
from scipy.integrate import quad
import numpy as np
import matplotlib.pyplot as plt
def func1(x,n):
return 1*np.sin(n*np.pi*x)**2
def v(x,y):
n = 1;
sum = 0;
nmax = 20;
while n < nmax:
[C_n, err] = quad(func1, 0, 1, args=(n), );
sum = sum + 2*(C_n/np.sinh(np.pi*n)*np.sin(n*np.pi*x)*np.sinh(n*np.pi*y));
n = n + 1;
return sum;
def main():
x_axis = np.linspace(0,1,100)
y_axis = np.linspace(0,1,100)
#######
# This is where the code is way too slow, it takes like 10 minutes when n in v(x,y) is 20.
#######
V = np.zeros(10000).reshape((100,100))
for i in range(100):
for j in range(100):
V[i,j] = v(j/100, i/100)
plt.figure()
plt.contour(x_axis, y_axis, V, 50)
plt.show()
if __name__ == "__main__":
main()
If you look carefully (you could use a profiler too), you'll see that you're integrating your function func1 (which I'll rename into the integrand) about 20 times for each element in the 100x100 array V. However, the integrand doesn't change! So you can already bring it out of your loop. If you do that, and use broadcasting tricks, you could end up with something like this:
import numpy as np
from scipy.integrate import quad
import matplotlib.pyplot as plt
def integrand(x,n):
return 1*np.sin(n*np.pi*x)**2
sine_order = np.arange(1,20).reshape(-1,1,1) # Make an array along the third dimension
integration_results = np.empty_like(sine_order, dtype=np.float)
for enu, order in enumerate(sine_order):
integration_results[enu] = quad(integrand, 0, 1, args=(order,))[0]
y,x = np.ogrid[0:1:.01, 0:1:.01]
term = integration_results / np.sinh(np.pi * sine_order) * np.sin(sine_order * np.pi * x) * np.sinh(sine_order * np.pi * y)
# This is the key: you have a 3D matrix here and with this summation,
# you're basically squashing the entire 3D structure into a flat, 2D
# representation. This 'squashing' is done by means of a sum.
V = 2*np.sum(term, axis=0)
x_axis = np.linspace(0,1,100)
y_axis = np.linspace(0,1,100)
plt.figure()
plt.contour(x_axis, y_axis, V, 50)
plt.show()
which runs in less than a second on my system.
Broadcasting becomes much more understandable if you take pen&paper and draw out the vectors that you are "broadcasting" as if you were constructing a building, from basic Tetris-blocks.
These two versions are functionally the same, but one is completely vectorized, while the other uses python for-loops. As a new user to python and numpy, I definitely recommend reading through the broadcasting basics. Good luck!
Related
So i was trying this and I find it really unfeasible. I am not that aware about smart ways to do the following. Can somebody help ? Also inputs of lists are quite big.
This task was to create an image from the values generated by me.
center_star contains list of [x,y] pairs which are centers of various point like objects.
1800 value represents that image to be generated is of 1800x1800 pixel.
Sigma variable has value 2 by default.
final=[[0]*1800]*1800
for i in range(len(center_stars)):
xi=center_stars[i][0]
yi=center_stars[i][1]
print(i)
for j in range(1800):
for k in range(1800):
final[j][k]+=gauss_amplitude[i]*(math.e**((-1*((xi-j)**2+(yi-k)**2))/2*sigma*sigma))
Is there a smarter way to save time using some of the numpy operation and execute this piece of code in less time?
Something like that:
import math
import numpy as np
N = 1800
final = np.empty((N, N))
final1 = np.empty((N, N))
j = np.arange(N)
k = np.arange(N)
jj, kk = np.meshgrid(j, k)
sigma = 2.
s = 0.5 / (sigma * sigma)
for i in range(len(center_stars)):
xi = center_stars[i][0]
yi = center_stars[i][1]
final += gauss_amplitude[i] * np.exp(- ((xi - jj.T)**2 + (yi - kk.T)**2) * s)
# Code below is for comparison:
for j in range(N):
for k in range(N):
final1[j][k]+=gauss_amplitude[i] * (math.e** (-((xi-j)**2+(yi-k)**2)/(2*sigma*sigma)))
Besides, I assume you missed brackets around 2*sigma*sigma
you can try to compact your code like this:
Gauss=lambda i,j,k,xi,yi:gauss_amplitude[i]*(math.e**((-((xi-j)**2+(yi-k)**2))/(2*sigma*sigma)))
final=[[Gauss(i,j,k,x[0],x[1]) for j in range(1800) for k in range(1800)] for i,x in enumerate(center_starts)]
If your sigmas are all the same, you can achieve this with out any loop by using
scipy.signal.convolve2d.
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import convolve2d
from scipy.stats import multivariate_normal
sigma = 3
width = 200 # smaller than yours so you can see the single pixels
n_stars = 50
# draw some random stars
star_row = np.random.randint(0, width, n_stars)
star_col = np.random.randint(0, width, n_stars)
star_amplitude = np.random.normal(50, 10, n_stars)
# assign amplitudes to center pixel of stars
amplitudes = np.zeros((width, width))
amplitudes[star_row, star_col] = star_amplitude
# create 2d gaussian kernel
row = col = np.arange(-4 * sigma, 4 * sigma + 1)
grid = np.stack(np.meshgrid(row, col)).T
kernel = multivariate_normal(
[0, 0],
[[sigma**2, 0], [0, sigma**2]]
).pdf(grid)
kernel /= kernel.sum()
# convolve with 2d gaussian
final = convolve2d(amplitudes, kernel, mode='same')
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(9, 3))
img = ax1.imshow(amplitudes)
fig.colorbar(img, ax=ax1)
ax1.set_label('Before Convolution')
img = ax2.imshow(kernel)
fig.colorbar(img, ax=ax2)
ax2.set_label('Convolution Kernel')
img = ax3.imshow(final)
fig.colorbar(img, ax=ax3)
ax3.set_label('After Convolution')
fig.tight_layout()
fig.savefig('conv2d.png', dpi=300)
Result:
If the sigmas differ, you get a way with a single loop over the possible sigmas.
I'm transitioning my code from using scipy's odeint to scipy's solve_ivp. When using odeint I would use a while loop as follows:
while solver.successful() :
solver.integrate(t_final, step=True)
# do other operations
This method allowed me to store values that depended on the solutions after each timestep.
I'm now switching to using solve_ivp but not sure how to accomplish this functionality with the solve_ivp solver. Has anyone accomplished this functionality with solve_ivp?
Thanks!
I think i know what your trying to ask. I had a program that used solve_ivp to integrate between each time step individually, then used the values to calculate the values for the next iteration. (i.e. heat transfer coefficients, transport coefficients etc.) I used two nested for loops. The inner for loop calculates or completes the operations you need to at each step. Then save each value in a list or array and then the inner loop should terminate. The outer loop should only be used to feed time values and possibly reload necessary constants.
For example:
for i in range(start_value, end_value, time_step):
start_time = i
end_time = i + time_step
# load initial values and used most recent values
for j in range(0, 1, 1):
answer = solve_ivp(function,(start_time,end_time), [initial_values])
# Save new values at the end of a list storing all calculated values
Say you have a system such as
d(Y1)/dt = a1*Y2 + Y1
d(Y2)/dt = a2*Y1 + Y2
and you want to solve it from t = 0, 10. With a 0.1 time step. Where a1 and a2 are values calculated or determined elsewhere. This code would work.
from scipy.integrate import solve_ivp
import sympy as sp
import numpy as np
import math
import matplotlib.pyplot as plt
def a1(n):
return 1E-10*math.exp(n)
def a2(n):
return 2E-10*math.exp(n)
def rhs(t,y, *args):
a1, a2 = args
return [a1*y[1] + y[0],a2*y[0] + y[1]]
Y1 = [0.02]
Y2 = [0.01]
A1 = []
A2 = []
endtime = 10
time_step = 0.1
times = np.linspace(0,endtime, int(endtime/time_step)+1)
tsymb = sp.symbols('t')
ysymb = sp.symbols('y')
for i in range(0,endtime,1):
for j in range(0,int(1/time_step),1):
tstart = i + j*time_step
tend = i + j*time_step + time_step
A1.append(a1(tstart/100))
A2.append(a2(tstart/100))
Y0 = [Y1[-1],Y2[-1]]
args = [A1[-1],A2[-1]]
answer = solve_ivp(lambda tsymb, ysymb : rhs(tsymb,ysymb, *args), (tstart,tend), Y0)
Y1.append(answer.y[0][-1])
Y2.append(answer.y[1][-1])
fig = plt.figure()
plt1 = plt.plot(times,Y1, label = "Y1")
plt2 = plt.plot(times,Y2, label = "Y2")
plt.xlabel('Time')
plt.ylabel('Y Values')
plt.legend()
plt.grid()
plt.show()
I am using the below python codes so as to generate the
backlash signal for a simple sine wave input.The generated
output is not as per the requirement.The output should be
similar to that of backlash block used in Simulink.
#Importing libraries
import matplotlib.pyplot as plt
import numpy as np
#Setting upper limit and lower limit
LL = -0.5
UL = 0.5
#Generating the sine wave
x=np.linspace(0,10,1000)
y=(np.sin(x))
#phase shift of y1 by -pi/2
y1=(np.sin(x-1.571))
# plot original sine
plt.plot(x,y)
#setting the thresholds
y1[(y1>UL)] = UL
y1[(y1<LL)] = LL
#Initializing at the input
y1[(y==0)] = 0
y1[(y1>UL)] -= UL
y1[(y1<LL)] -= LL
#Plotting both the waves
plt.plot(x,y)
plt.plot(x,y1)
plt.grid()
plt.show()
I don't think there is a simple vectorized implementation for the backlash process. The k-th output depends on the previous values in a nontrivial way. A concise way to write the process (assuming x is the input array and y is the output array) is
y[k] = min(max(y[k-1], x[k] - h), x[k] + h)
where h is half the deadband.
The following script includes a backlash function that uses a Python for-loop. (The function uses if statements instead of the min and max functions.) It is simple, but it won't be very fast. If high performance is important, you might consider reimplementing the function in Cython or numba.
import numpy as np
def backlash(x, deadband=1.0, initial=0.0):
"""
Backlash process.
This function emulates the Backlash block of Simulink
(https://www.mathworks.com/help/simulink/slref/backlash.html).
x must be a one-dimensional numpy array (or array-like).
deadband must be a nonnegative scalar.
initial must be a scalar.
"""
halfband = 0.5*deadband
y = np.empty_like(x, dtype=np.float64)
current_y = initial
for k in range(len(x)):
current_x = x[k]
xminus = current_x - halfband
if xminus > current_y:
current_y = xminus
else:
xplus = current_x + halfband
if xplus < current_y:
current_y = xplus
y[k] = current_y
return y
if __name__ == "__main__":
import matplotlib.pyplot as plt
t = np.linspace(0, 10, 500)
x = np.sin(t)
deadband = 1
y = backlash(x, deadband=deadband)
plt.plot(t, x, label='x(t)')
plt.plot(t, y, '--', label='backlash(x(t))')
plt.xlabel('t')
plt.legend(framealpha=1, shadow=True)
plt.grid(alpha=0.5)
plt.show()
Update: I implemented the backlash function as a NumPy gufunc in my ufunclab repository.
I am working through example 8.1 titled Euler's Method from Mark Newman's book Computational Physics. I rewrote the example as a method with Numpy arrays but when I plot it I get two plots on the same figure not sure how to correct it. Also is there better way to convert my 2 1D arrays into 1 2D array to use for plotting in Matplotlib, thanks.
Newman's example :
from math import sin
from numpy import arange
from pylab import plot,xlabel,ylabel,show
def f(x,t):
return -x**3 + sin(t)
a = 0.0 # Start of the interval
b = 10.0 # End of the interval
N = 1000 # Number of steps
h = (b-a)/N # Size of a single step
x = 0.0 # Initial condition
tpoints = arange(a,b,h)
xpoints = []
for t in tpoints:
xpoints.append(x)
x += h*f(x,t)
plot(tpoints,xpoints)
xlabel("t")
ylabel("x(t)")
show()
My modifications:
from pylab import plot,show,xlabel,ylabel
from numpy import linspace,exp,sin,zeros,vstack,column_stack
def f(x,t):
return (-x**(3) + sin(t))
def Euler(f,x0,a,b):
N=1000
h = (b-a)/N
t = linspace(a,b,N)
x = zeros(N,float)
y = x0
for i in range(N):
x[i] = y
y += h*f(x[i],t[i])
return column_stack((t,x)) #vstack((t,x)).T
plot(Euler(f,0.0,0.0,10.0))
xlabel("t")
ylabel("x(t)")
show()
The reason you get two lines is that t as well as x are plotted against their index, instead of x plotted against t
I don't see why you'd want to stack the two arrays. Just keep then separate, which will also solve the problem of the two plots.
The following works fine.
import numpy as np
import matplotlib.pyplot as plt
f = lambda x,t: -x**3 + np.sin(t)
def Euler(f,x0,a,b):
N=1000
h = (b-a)/N
t = np.linspace(a,b,N)
x = np.zeros(N,float)
y = x0
for i in range(N):
x[i] = y
y += h*f(x[i],t[i])
return t,x
t,x = Euler(f,0.0,0.0,10.0)
plt.plot(t,x)
plt.xlabel("t")
plt.ylabel("x(t)")
plt.show()
I am trying to convert this code from Matlab to Python:
x(1) = 0.1;
j = 0;
for z = 2.8:0.0011:3.9
j = j+1 %Gives progress of calculation
zz(j) = z;
for n = 1:200
x(n+1) = z*x(n)*(1 - x(n));
xn(n,j) = x(n);
end
end
h = plot(zz,xn(100:200,:),'r.');
set(h,'Markersize',3);
and so far I have got this:
import numpy as np
import matplotlib.pyplot as plt
x = []
x.append(0.1)
xn = []
j = 0
z_range = np.arange(2.8, 3.9, 0.0011)
n_range = range(0,200,1)
plt.figure()
for zz in z_range:
j = j+1
print j # Gives progress of calculation
for n in n_range:
w = zz * x[n] * (1.0-x[n])
x.append(zz * x[n] * (1.0-x[n]))
xn.append(w)
x = np.array(x)
xn = np.array(xn)
xn_matrix = xn.reshape((z_range.size, len(n_range)))
xn_mat = xn_matrix.T
plt.figure()
#for i in z_range:
# plt.plot(z_range, xn_mat[0:i], 'r.')
plt.show()
I'm not sure if this is the best way to convert the for loops from Matlab into Python, and I seem to have problems with plotting the result. The x(n+1) = z*x(n)*(1 - x(n)); and xn(n,j) = x(n); lines in Matlab are bugging me, so could someone please explain if there is a more efficient way of writing this in Python?
import numpy as np
import matplotlib.pyplot as plt
x = 0.1
# preallocate xn
xn = np.zeros([1001, 200])
# linspace is better for a non-integer step
zz = np.linspace(2.8, 3.9, 1001)
# use enumerate instead of counting iterations
for j,z in enumerate(zz):
print(j)
for n in range(200):
# use tuple unpacking so old values of x are unneeded
xn[j,n], x = x, z*x*(1 - x)
plt.plot(zz, xn[:, 100:], 'r.')
plt.show()