minimum spanning tree remove all extra edges - python

Extra edge- edge made with 2 points, where each point is connected with another edge.
I want to disconnect MST by deleting these edges.
What is the best approach to minimize the weight of new disconnected MST,
or in what order should I delete these edges(deleting one could affect the other)?
My approach is to delete the biggest weight extra edges first?
https://prnt.sc/1xq1msp
In this case, removing 7(CD)-> no more edges could be deleted.
But you could also remove B-C, and then remove D-E which is better solution

Here’s an exact solution with NumPy/SciPy/OR-Tools that uses a kd-tree
to enumerate a sparse subset of edges that can possibly be included in
an optimal solution, then formulates and solves a mixed integer program.
Not sure it will scale to your needs though; you could set setting a gap
limit if you’re willing to settle for an approximation.
import collections
import numpy
import scipy.spatial
from ortools.linear_solver import pywraplp
def min_edge_cover(points):
# Enumerate the candidate edges.
candidate_edges = set()
tree = scipy.spatial.KDTree(points)
min_distances = numpy.ndarray(len(points))
for i, p in enumerate(points):
if i % 1000 == 0:
print(i)
distances, indexes = tree.query(p, k=2)
# Ignore p itself.
d, j = (
(distances[1], indexes[1])
if indexes[0] == i
else (distances[0], indexes[0])
)
candidate_edges.add((min(i, j), max(i, j)))
min_distances[i] = d
for i, p in enumerate(points):
if i % 1000 == 0:
print(i)
# An edge is profitable only if it's shorter than the sum of the
# distance from each of its endpoints to that endpoint's nearest
# neighbor.
indexes = tree.query_ball_point(p, 2 * min_distances[i])
for j in indexes:
if i == j:
continue
discount = (
min_distances[i] + min_distances[j]
) - scipy.spatial.distance.euclidean(points[i], points[j])
if discount >= 0:
candidate_edges.add((min(i, j), max(i, j)))
candidate_edges = sorted(candidate_edges)
# Formulate and solve a mixed integer program to find the minimum distance
# edge cover. There's a way to do this with general weighted matching, but
# OR-Tools doesn't expose that library yet.
solver = pywraplp.Solver.CreateSolver("SCIP")
objective = 0
edge_variables = []
coverage = collections.defaultdict(lambda: 0)
for i, j in candidate_edges:
x = solver.BoolVar("x{}_{}".format(i, j))
objective += scipy.spatial.distance.euclidean(points[i], points[j]) * x
coverage[i] += x
coverage[j] += x
edge_variables.append(x)
solver.Minimize(objective)
for c in coverage.values():
solver.Add(c >= 1)
solver.EnableOutput()
assert solver.Solve() == pywraplp.Solver.OPTIMAL
return {e for (e, x) in zip(candidate_edges, edge_variables) if x.solution_value()}
def random_point():
return complex(random(), random())
def test(points, graphics=False):
cover = min_edge_cover(points)
if not graphics:
return
with open("out.ps", "w") as f:
print("%!PS", file=f)
print(0, "setlinewidth", file=f)
inch = 72
scale = 7 * inch
print((8.5 * inch - scale) / 2, (11 * inch - scale) / 2, "translate", file=f)
for x, y in points:
print(scale * x, scale * y, 1, 0, 360, "arc", "fill", file=f)
for i, j in cover:
xi, yi = points[i]
xj, yj = points[j]
print(
scale * xi,
scale * yi,
"moveto",
scale * xj,
scale * yj,
"lineto",
file=f,
)
print("stroke", file=f)
print("showpage", file=f)
test(numpy.random.rand(100, 2), graphics=True)
test(numpy.random.rand(10000, 2))

Related

Cannot pass one test case using Bellman Ford Algorithm

Problem Description
Task. Given an directed graph with possibly negative edge weights and with 𝑛 vertices and 𝑚 edges, check
whether it contains a cycle of negative weight.
Input Format. A graph is given in the standard format.
Constraints. 1 ≤ 𝑛 ≤ 103
, 0 ≤ 𝑚 ≤ 104
, edge weights are integers of absolute value at most 103
.
Output Format. Output 1 if the graph contains a cycle of negative weight and 0 otherwise.
Here is my solution:
import sys
def negative_cycle(adj, cost):
#write your code here
distance = [float('inf')] * len(adj)
distance[0] = 0
edges = []
for i in range(len(adj)):
for j in adj[i]:
edges.append([i,j])
for _ in range(len(adj)-1):
for i in edges:
a = i[0]
b = adj[a].index(i[1])
if distance[i[1]] > distance[a] + cost[a][b] and distance[a] != float('inf'):
distance[i[1]] = distance[a] + cost[a][b]
for i in edges:
a, b = i[0], adj[i[0]].index(i[1])
if distance[i[1]] > distance[a] + cost[a][b] and distance[a] != float('inf'):
return 1
return 0
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n, m = data[0:2]
data = data[2:]
edges = list(zip(zip(data[0:(3 * m):3], data[1:(3 * m):3]), data[2:(3 * m):3]))
data = data[3 * m:]
adj = [[] for _ in range(n)]
cost = [[] for _ in range(n)]
for ((a, b), w) in edges:
adj[a - 1].append(b - 1)
cost[a - 1].append(w)
print(negative_cycle(adj, cost))
My code works for most test cases but fails on one test case.
Failed case #12/19: Wrong answer
(Time used: 0.23/10.00, memory used: 14229504/2147483648.)
The input format is
Number of vertices, number of edges
Vertice 1, vertice 2, weight of edge
...
...
... For all edges
May I know whats the error in this code?
cost[a - 1].append(w)
Here, you're not taking the cost of the edges properly. It should be cost[a - 1][b - 1] = w, the cost of the edge going from A -> B.

Split the upper edge line and lower edge line for a given cloud of points?

I have a list of 2D points x,y. And I need to find a smooth curve for the upper and lower edges (red and blue curves, correspondingly).
See the picture below:
Here I've found a good example, where the outer edge of x,y points is detected.
Using these I have work I have:
# https://stackoverflow.com/a/50714300/7200745
from scipy.spatial import Delaunay
import numpy as np
def alpha_shape(points, alpha, only_outer=True):
"""
Compute the alpha shape (concave hull) of a set of points.
:param points: np.array of shape (n,2) points.
:param alpha: alpha value.
:param only_outer: boolean value to specify if we keep only the outer border
or also inner edges.
:return: set of (i,j) pairs representing edges of the alpha-shape. (i,j) are
the indices in the points array.
"""
assert points.shape[0] > 3, "Need at least four points"
def add_edge(edges, i, j):
"""
Add an edge between the i-th and j-th points,
if not in the list already
"""
if (i, j) in edges or (j, i) in edges:
# already added
assert (j, i) in edges, "Can't go twice over same directed edge right?"
if only_outer:
# if both neighboring triangles are in shape, it's not a boundary edge
edges.remove((j, i))
return
edges.add((i, j))
tri = Delaunay(points)
edges = set()
# Loop over triangles:
# ia, ib, ic = indices of corner points of the triangle
for ia, ib, ic in tri.vertices:
pa = points[ia]
pb = points[ib]
pc = points[ic]
# Computing radius of triangle circumcircle
# www.mathalino.com/reviewer/derivation-of-formulas/derivation-of-formula-for-radius-of-circumcircle
a = np.sqrt((pa[0] - pb[0]) ** 2 + (pa[1] - pb[1]) ** 2)
b = np.sqrt((pb[0] - pc[0]) ** 2 + (pb[1] - pc[1]) ** 2)
c = np.sqrt((pc[0] - pa[0]) ** 2 + (pc[1] - pa[1]) ** 2)
s = (a + b + c) / 2.0
area = np.sqrt(s * (s - a) * (s - b) * (s - c))
circum_r = a * b * c / (4.0 * area)
if circum_r < alpha:
add_edge(edges, ia, ib)
add_edge(edges, ib, ic)
add_edge(edges, ic, ia)
return edges
def find_edges_with(i, edge_set):
i_first = [j for (x,j) in edge_set if x==i]
i_second = [j for (j,x) in edge_set if x==i]
return i_first,i_second
def stitch_boundaries(edges):
edge_set = edges.copy()
boundary_lst = []
while len(edge_set) > 0:
boundary = []
edge0 = edge_set.pop()
boundary.append(edge0)
last_edge = edge0
while len(edge_set) > 0:
i,j = last_edge
j_first, j_second = find_edges_with(j, edge_set)
if j_first:
edge_set.remove((j, j_first[0]))
edge_with_j = (j, j_first[0])
boundary.append(edge_with_j)
last_edge = edge_with_j
elif j_second:
edge_set.remove((j_second[0], j))
edge_with_j = (j, j_second[0]) # flip edge rep
boundary.append(edge_with_j)
last_edge = edge_with_j
if edge0[0] == last_edge[1]:
break
boundary_lst.append(boundary)
return boundary_lst[0]
#generating of random points
N = 1000
r = 1 - 2*np.random.random((N,2))
r_norm = np.linalg.norm(r, axis=1)
points = r[r_norm <= 1]
plt.figure(figsize=(10,10))
plt.scatter(points[:,0], points[:,1], color='k', s=1)
# Computing the alpha shape
edges = alpha_shape(points, alpha=1, only_outer=True)
#order edges
edges = stitch_boundaries(edges)
plt.axis('equal')
edge_points = np.zeros((len(edges),2))
k=0
for i, j in edges:
edge_points[k,:] = points[[i, j], 0][0] , points[[i, j], 1][0]
k += 1
plt.plot(edge_points[:,0],edge_points[:,1])
#theoretical/expected edges
# xx = np.linspace(-1,1, 100)
# yy_upper = np.sqrt(1 - xx**2)
# yy_lower = -np.sqrt(1 - xx**2)
# plt.plot(xx, yy_upper, 'r:')
# plt.plot(xx, yy_lower, 'b:')
Right now the cloud of points is black. Blue line is obtained from the algorithm above.
UPDATE:
the starting and final points can be chosen the most left point (OR by hand it is no problem)
I am expecting the following result:
You can use the fact (see the scipy.spatial.Delaunay documentation) that
"for 2-D, the triangle points are oriented counterclockwise".
Therefore, the outer edge points constructed in the Alpha shape will always be oriented counterclockwise i.e., the inner side of the shape will be to their left (if this was not certified in the documentation we could have checked it ourselves and flipped if necessary, but here there is no need).
This means that the points along the polygon between the leftmost point and the rightmost point are the lower hull, and the points between the rightmost and the leftmost are the upper hull. The code below implements this idea.
min_x_ind = np.argmin(edge_points[:, 0])
max_x_ind = np.argmax(edge_points[:, 0])
if min_x_ind < max_x_ind:
lower_hull = edge_points[min_x_ind:max_x_ind+1, :]
upper_hull = np.concatenate([edge_points[max_x_ind:, :], edge_points[:min_x_ind+1, :]])
else:
upper_hull = edge_points[max_x_ind:min_x_ind+1, :]
lower_hull = np.concatenate([edge_points[min_x_ind:, :], edge_points[:max_x_ind+1, :]])
and the result can be visualized for example with:
plt.plot(upper_hull[:,0],upper_hull[:,1], "r", lw=3)
plt.plot(lower_hull[:,0],lower_hull[:,1], "b", lw=3)

Multigrid Poisson Solver

I am trying to make my own CFD solver and one of the most computationally expensive parts is solving for the pressure term. One way to solve Poisson differential equations faster is by using a multigrid method. The basic recursive algorithm for this is:
function phi = V_Cycle(phi,f,h)
% Recursive V-Cycle Multigrid for solving the Poisson equation (\nabla^2 phi = f) on a uniform grid of spacing h
% Pre-Smoothing
phi = smoothing(phi,f,h);
% Compute Residual Errors
r = residual(phi,f,h);
% Restriction
rhs = restriction(r);
eps = zeros(size(rhs));
% stop recursion at smallest grid size, otherwise continue recursion
if smallest_grid_size_is_achieved
eps = smoothing(eps,rhs,2*h);
else
eps = V_Cycle(eps,rhs,2*h);
end
% Prolongation and Correction
phi = phi + prolongation(eps);
% Post-Smoothing
phi = smoothing(phi,f,h);
end
I've attempted to implement this algorithm myself (also at the end of this question) however it is very slow and doesn't give good results so evidently it is doing something wrong. I've been trying to find why for too long and I think it's just worthwhile seeing if anyone can help me.
If I use a grid size of 2^5 by 2^5 points, then it can solve it and give reasonable results. However, as soon as I go above this it takes exponentially longer to solve and basically get stuck at some level of inaccuracy, no matter how many V-Loops are performed. at 2^7 by 2^7 points, the code takes way too long to be useful.
I think my main issue is that my implementation of a jacobian iteration is using linear algebra to calculate the update at each step. This should, in general, be fast however, the update matrix A is an n*m sized matrix, and calculating the dot product of a 2^7 * 2^7 sized matrix is expensive. As most of the cells are just zeros, should I calculate the result using a different method?
if anyone has any experience in multigrid methods, I would appreciate any advice!
Thanks
my code:
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 29 16:24:16 2020
#author: mclea
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import convolve2d
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import griddata
from matplotlib import cm
def restrict(A):
"""
Creates a new grid of points which is half the size of the original
grid in each dimension.
"""
n = A.shape[0]
m = A.shape[1]
new_n = int((n-2)/2+2)
new_m = int((m-2)/2+2)
new_array = np.zeros((new_n, new_m))
for i in range(1, new_n-1):
for j in range(1, new_m-1):
ii = int((i-1)*2)+1
jj = int((j-1)*2)+1
# print(i, j, ii, jj)
new_array[i,j] = np.average(A[ii:ii+2, jj:jj+2])
new_array = set_BC(new_array)
return new_array
def interpolate_array(A):
"""
Creates a grid of points which is double the size of the original
grid in each dimension. Uses linear interpolation between grid points.
"""
n = A.shape[0]
m = A.shape[1]
new_n = int((n-2)*2 + 2)
new_m = int((m-2)*2 + 2)
new_array = np.zeros((new_n, new_m))
i = (np.indices(A.shape)[0]/(A.shape[0]-1)).flatten()
j = (np.indices(A.shape)[1]/(A.shape[1]-1)).flatten()
A = A.flatten()
new_i = np.linspace(0, 1, new_n)
new_j = np.linspace(0, 1, new_m)
new_ii, new_jj = np.meshgrid(new_i, new_j)
new_array = griddata((i, j), A, (new_jj, new_ii), method="linear")
return new_array
def adjacency_matrix(rows, cols):
"""
Creates the adjacency matrix for an n by m shaped grid
"""
n = rows*cols
M = np.zeros((n,n))
for r in range(rows):
for c in range(cols):
i = r*cols + c
# Two inner diagonals
if c > 0: M[i-1,i] = M[i,i-1] = 1
# Two outer diagonals
if r > 0: M[i-cols,i] = M[i,i-cols] = 1
return M
def create_differences_matrix(rows, cols):
"""
Creates the central differences matrix A for an n by m shaped grid
"""
n = rows*cols
M = np.zeros((n,n))
for r in range(rows):
for c in range(cols):
i = r*cols + c
# Two inner diagonals
if c > 0: M[i-1,i] = M[i,i-1] = -1
# Two outer diagonals
if r > 0: M[i-cols,i] = M[i,i-cols] = -1
np.fill_diagonal(M, 4)
return M
def set_BC(A):
"""
Sets the boundary conditions of the field
"""
A[:, 0] = A[:, 1]
A[:, -1] = A[:, -2]
A[0, :] = A[1, :]
A[-1, :] = A[-2, :]
return A
def create_A(n,m):
"""
Creates all the components required for the jacobian update function
for an n by m shaped grid
"""
LaddU = adjacency_matrix(n,m)
A = create_differences_matrix(n,m)
invD = np.zeros((n*m, n*m))
np.fill_diagonal(invD, 1/4)
return A, LaddU, invD
def calc_RJ(rows, cols):
"""
Calculates the jacobian update matrix Rj for an n by m shaped grid
"""
n = int(rows*cols)
M = np.zeros((n,n))
for r in range(rows):
for c in range(cols):
i = r*cols + c
# Two inner diagonals
if c > 0: M[i-1,i] = M[i,i-1] = 0.25
# Two outer diagonals
if r > 0: M[i-cols,i] = M[i,i-cols] = 0.25
return M
def jacobi_update(v, f, nsteps=1, max_err=1e-3):
"""
Uses a jacobian update matrix to solve nabla(v) = f
"""
f_inner = f[1:-1, 1:-1].flatten()
n = v.shape[0]
m = v.shape[1]
A, LaddU, invD = create_A(n-2, m-2)
Rj = calc_RJ(n-2,m-2)
update=True
step = 0
while update:
v_old = v.copy()
step += 1
vt = v_old[1:-1, 1:-1].flatten()
vt = np.dot(Rj, vt) + np.dot(invD, f_inner)
v[1:-1, 1:-1] = vt.reshape((n-2),(m-2))
err = v - v_old
if step == nsteps or np.abs(err).max()<max_err:
update=False
return v, (step, np.abs(err).max())
def MGV(f, v):
"""
Solves for nabla(v) = f using a multigrid method
"""
# global A, r
n = v.shape[0]
m = v.shape[1]
# If on the smallest grid size, compute the exact solution
if n <= 6 or m <=6:
v, info = jacobi_update(v, f, nsteps=1000)
return v
else:
# smoothing
v, info = jacobi_update(v, f, nsteps=10, max_err=1e-1)
A = create_A(n, m)[0]
# calculate residual
r = np.dot(A, v.flatten()) - f.flatten()
r = r.reshape(n,m)
# downsample resitdual error
r = restrict(r)
zero_array = np.zeros(r.shape)
# interploate the correction computed on a corser grid
d = interpolate_array(MGV(r, zero_array))
# Add prolongated corser grid solution onto the finer grid
v = v - d
v, info = jacobi_update(v, f, nsteps=10, max_err=1e-6)
return v
sigma = 0
# Setting up the grid
k = 6
n = 2**k+2
m = 2**(k)+2
hx = 1/n
hy = 1/m
L = 1
H = 1
x = np.linspace(0, L, n)
y = np.linspace(0, H, m)
XX, YY = np.meshgrid(x, y)
# Setting up the initial conditions
f = np.ones((n,m))
v = np.zeros((n,m))
# How many V cyles to perform
err = 1
n_cycles = 10
loop = True
cycle = 0
# Perform V cycles until converged or reached the maximum
# number of cycles
while loop:
cycle += 1
v_new = MGV(f, v)
if np.abs(v - v_new).max() < err:
loop = False
if cycle == n_cycles:
loop = False
v = v_new
print("Number of cycles " + str(cycle))
plt.contourf(v)
I realize that I'm not answering your question directly, but I do note that you have quite a few loops that will contribute some overhead cost. When optimizing code, I have found the following thread useful - particularly the line profiler thread. This way you can focus in on "high time cost" lines and then start to ask more specific questions regarding opportunities to optimize.
How do I get time of a Python program's execution?

ValueError: x and y must have the same first dimension

I am trying to implement a finite difference approximation to solve the Heat Equation, u_t = k * u_{xx}, in Python using NumPy.
Here is a copy of the code I am running:
## This program is to implement a Finite Difference method approximation
## to solve the Heat Equation, u_t = k * u_xx,
## in 1D w/out sources & on a finite interval 0 < x < L. The PDE
## is subject to B.C: u(0,t) = u(L,t) = 0,
## and the I.C: u(x,0) = f(x).
import numpy as np
import matplotlib.pyplot as plt
# parameters
L = 1 # legnth of the rod
T = 10 # terminal time
N = 10
M = 100
s = 0.25
# uniform mesh
x_init = 0
x_end = L
dx = float(x_end - x_init) / N
x = np.arange(x_init, x_end, dx)
x[0] = x_init
# time discretization
t_init = 0
t_end = T
dt = float(t_end - t_init) / M
t = np.arange(t_init, t_end, dt)
t[0] = t_init
# Boundary Conditions
for m in xrange(0, M):
t[m] = m * dt
# Initial Conditions
for j in xrange(0, N):
x[j] = j * dx
# definition of solution u(x,t) to u_t = k * u_xx
u = np.zeros((N, M+1)) # array to store values of the solution
# Finite Difference Scheme:
u[:,0] = x**2 #initial condition
for m in xrange(0, M):
for j in xrange(1, N-1):
if j == 1:
u[j-1,m] = 0 # Boundary condition
elif j == N-1:
u[j+1,m] = 0
else:
u[j,m+1] = u[j,m] + s * ( u[j+1,m] -
2 * u[j,m] + u[j-1,m] )
print u, #t, x
plt.plot(u, t)
#plt.show()
I think my code is working properly and it is producing an output. I want to plot the output of the solution u versus t (my time vector). If I can plot the graph then I am able to check if my numerical approximation agrees with the expected phenomena for the Heat Equation. However, I am getting the error that "x and y must have same first dimension". How can I correct this issue?
An additional question: Am I better off attempting to make an animation with matplotlib.animation instead of using matplotlib.plyplot ???
Thanks so much for any and all help! It is very greatly appreciated!
Okay so I had a "brain dump" and tried plotting u vs. t sort of forgetting that u, being the solution to the Heat Equation (u_t = k * u_{xx}), is defined as u(x,t) so it has values for time. I made the following correction to my code:
print u #t, x
plt.plot(u)
plt.show()
And now my programming is finally displaying an image. And here it is:
It is absolutely beautiful, isn't it?

Ising Model in Python

I'm currently working on writing code for the Ising Model using Python3. I'm still pretty new to coding. I have working code, but the output result is not as expected and I can't seem to find the error. Here is my code:
import numpy as np
import random
def init_spin_array(rows, cols):
return np.random.choice((-1, 1), size=(rows, cols))
def find_neighbors(spin_array, lattice, x, y):
left = (x , y - 1)
right = (x, y + 1 if y + 1 < (lattice - 1) else 0)
top = (x - 1, y)
bottom = (x + 1 if x + 1 < (lattice - 1) else 0, y)
return [spin_array[left[0], left[1]],
spin_array[right[0], right[1]],
spin_array[top[0], top[1]],
spin_array[bottom[0], bottom[1]]]
def energy(spin_array, lattice, x ,y):
return -1 * spin_array[x, y] * sum(find_neighbors(spin_array, lattice, x, y))
def main():
lattice = eval(input("Enter lattice size: "))
temperature = eval(input("Enter the temperature: "))
sweeps = eval(input("Enter the number of Monte Carlo Sweeps: "))
spin_array = init_spin_array(lattice, lattice)
print("Original System: \n", spin_array)
# the Monte Carlo follows below
for sweep in range(sweeps):
for i in range(lattice):
for j in range(lattice):
e = energy(spin_array, lattice, i, j)
if e <= 0:
spin_array[i, j] *= -1
elif np.exp(-1 * e/temperature) > random.randint(0, 1):
spin_array[i, j] *= -1
else:
continue
print("Modified System: \n", spin_array)
main()
I think the error is in the Monte Carlo Loop, but I am not sure. The system should be highly ordered at low temperatures and become disordered past the critical temperature of 2.27. In other words, the randomness of the system should increase as T approaches 2.27. For example, at T=.1, we should see large patches of spins that are aligned, i.e. patches of -1s and 1s. Past 2.27 the system should be disordered and we should not see these patches.
Your question would make much more sense if you were to include the system size, the number of sweeps, and the average manetisation. How many of the intermediate configurations are ordered and how many disordered? MC is a sampling technique - individual configurations mean nothing and there might (and will) be disordered states at low temperature and ordered states at high T. It is the assembly properties (the average magnetisation) that is meaningful.
Anyway, there are three errors in your code: a small one, a medium one, and a really severe one.
The small one is that you are ignoring an entire row and an entire column while searching for neighbours in find_neighbors:
right = (x, y + 1 if y + 1 < (lattice - 1) else 0)
should be:
right = (x, y + 1 if y + 1 < lattice else 0)
or even better:
right = (x, (y + 1) % lattice)
Same applies to bottom.
The medium one is that your computation of the energy difference is off by a factor of two:
def energy(spin_array, lattice, x ,y):
return -1 * spin_array[x, y] * sum(find_neighbors(spin_array, lattice, x, y))
^^
The factor is actually 2*J, where J is the coupling constant, therefore having -1 there means:
your critical temperature is halved, and more importantly...
you have antiferromagnetic spin interaction (J < 0), so no ordered states for you even at very low temperatures.
The worst mistake however is the use of random.randint() for the rejection sampling:
elif np.exp(-1 * e/temperature) > random.randint(0, 1):
spin_array[i, j] *= -1
You should be using random.random() instead, otherwise the transition probability will always be 50%.
Here is a modification of your program that automatically sweeps over the temperature region from 0.1 to 5.0:
import numpy as np
import random
def init_spin_array(rows, cols):
return np.ones((rows, cols))
def find_neighbors(spin_array, lattice, x, y):
left = (x, y - 1)
right = (x, (y + 1) % lattice)
top = (x - 1, y)
bottom = ((x + 1) % lattice, y)
return [spin_array[left[0], left[1]],
spin_array[right[0], right[1]],
spin_array[top[0], top[1]],
spin_array[bottom[0], bottom[1]]]
def energy(spin_array, lattice, x ,y):
return 2 * spin_array[x, y] * sum(find_neighbors(spin_array, lattice, x, y))
def main():
RELAX_SWEEPS = 50
lattice = eval(input("Enter lattice size: "))
sweeps = eval(input("Enter the number of Monte Carlo Sweeps: "))
for temperature in np.arange(0.1, 5.0, 0.1):
spin_array = init_spin_array(lattice, lattice)
# the Monte Carlo follows below
mag = np.zeros(sweeps + RELAX_SWEEPS)
for sweep in range(sweeps + RELAX_SWEEPS):
for i in range(lattice):
for j in range(lattice):
e = energy(spin_array, lattice, i, j)
if e <= 0:
spin_array[i, j] *= -1
elif np.exp((-1.0 * e)/temperature) > random.random():
spin_array[i, j] *= -1
mag[sweep] = abs(sum(sum(spin_array))) / (lattice ** 2)
print(temperature, sum(mag[RELAX_SWEEPS:]) / sweeps)
main()
And the result for 20x20 and 100x100 lattices and 100 sweeps:
The starting configuration is a completely ordered one to prevent the development of domain walls that are very stable at low temperatures. Also, 30 additional sweeps are performed initially in order to thermalise the system (not nearly enough when close to the critical temperature, but the Metropolis-Hastings algorithm cannot properly handle the critical slowdown there anyway).

Categories

Resources