N-Queens Annealing Program Not Working - python

I'm trying to recreate the n-queens problem and solve it with simulated annealing, although the board object from my Object class is throwing an error when I try to add the temperature using len(board)**2. Any help would be very appreciated!! I have included the source code and the output. Thanks!
import time
import random
import math
class Board(object):
"""An N-queens solution attempt."""
def __init__(self, queens):
"""Instances differ by their queen placements."""
self.queens = queens.copy()
def display(self):
"""Print the board."""
for r in range(len(self.queens)):
for c in range(len(self.queens)):
if self.queens[c] == r:
print 'Q',
else:
print '-',
print
print
def moves(self):
"""Return a list of possible moves given the current placements."""
bestMoves = []
optimalHeuristic = heuristic(board)
for a, b in moves.iteritems():
if b < optimalHeuristic:
optimalHeuristic = b
for a, b in moves.iteritems():
if b == optimalHeuristic:
bestMoves.append(a)
return bestMoves
def neighbor(self, move):
"""Return a Board instance like this one but with one move made."""
if len(bestMoves) > 0:
pick = random.randint(0, len(bestMoves) - 1)
column = bestMoves[pick][0]
row = bestMoves[pick][1]
board[column] = row
return board
def heuristic(self):
"""Compute the cost of this solution."""
h = 0
##checking columns
for i in range(1, n):
##checking rows
for j in range(i+1, n):
if board[i] == board[j]:
h += 1
x = j - i
##checking the diagonals
if board[i] == board[j] - x or board[i] == board[j] + x:
h += 1
return h
class Agent(object):
"""Knows how to solve an n-queens problem with simulated annealing."""
def anneal(self, board):
"""Return a list of moves to adjust queen placements."""
temperature = len(board)**2
annealRate = 0.95
newHeuristic = heuristic(board)
while newHeuristic > 0:
board = makeMove(board, newHeuristic, temperature)
newHeuristic = heuristic(board)
newTemperature = max(temperature * annealRate, 0.01)
temperature = newTemperature
##steps cap is here to avoid the algorithm getting stuck
if steps >= 10000:
break
boardCopy = list(board)
foundMove = False
while not foundMove:
boardCopy = list(board)
newRow = random.randint(0, len(board)-1)
newColumn = random.randint(0, len(board)-1)
boardCopy[newColumn] = newRow
newHeuristic = heuristic(boardCopy)
if newHeuristic < optimalHeuristic:
foundMove = True
else:
delta_e = optimalHeuristic - newHeuristic
##aceptance prob equation min(1, e**(delta e/temp))
acceptProbability = min(1, math.exp(delta_e/temperature))
foundMove = random.random() <= acceptProbability
return boardCopy
def main():
"""Create a problem, solve it with simulated anealing, and console-animate."""
print("Enter the number of queens")
n = input()
queens = dict()
for column in range(n):
row = random.choice(range(n))
queens[column] = row
board = Board(queens)
board.display()
agent = Agent()
path = agent.anneal(board)
while path:
move = path.pop(0)
board = board.neighbor(move)
time.sleep(0.1)
board.display()
if __name__ == '__main__':
main()
Output:
Enter the number of queens
8
- - - - - - Q -
- - - - - Q - -
- - - - - - - Q
- - - - - - - -
- Q - - - - - -
- - - Q Q - - -
- - - - - - - -
Q - Q - - - - -
Traceback (most recent call last):
File "F:\Intelligent Systems\annealingnqueens.py", line 119, in <module>
main()
File "F:\Intelligent Systems\annealingnqueens.py", line 110, in main
path = agent.anneal(board)
File "F:\Intelligent Systems\annealingnqueens.py", line 66, in anneal
temperature = len(board)**2
TypeError: object of type 'Board' has no len()

You need to define a __len__ method for your Board class, otherwise the call to len(board) will not be defined. This and many other special double-underscore methods are mentioned in the Python 3 documentation (or, if you prefer, the Python 2 documentation). Presuming that the length of queens is the quantity you want:
class Board(object):
"""An N-queens solution attempt."""
def __init__(self, queens):
"""Instances differ by their queen placements."""
self.queens = queens.copy()
def __len__(self):
return len(self.queens)

Related

Heap Dijkstra Implementation is slower than Naive Dijsktra Implementation

I have tried to implement the Naive and Heap Dijkstra as shown below but somehow my naive Dijkstra implementation is surprisingly faster. I debugged my code but couldn't understand where my problem in my implementations are.
Why is my heap-based implementation is slower than the naive implementation?
Raw data is stored here:
https://www.algorithmsilluminated.org/datasets/problem9.8.txt
Data Import and Manipulation:
import time
with open("DijkstraTest2.txt", 'r') as input:
lines = input.readlines()
lengths = {}
vertices = []
for line in lines:
contents = line.split("\t")
vertices.append(contents[0])
for content in contents:
content = content.replace('\n', '')
if ',' in content:
edge = contents[0] + '-' + content.split(',')[0]
lengths[edge] = int(content.split(',')[1])
Naive Dijkstra:
def NaiveDijkstra(vertices, start_point, lengths):
X = [start_point]
shortest_paths = {}
for vertex in vertices:
if vertex == start_point:
shortest_paths[vertex] = 0
else:
shortest_paths[vertex] = 999999999999
subset = [key for key in lengths.keys() if start_point == key.split('-')[0]
and key.split('-')[0] in X and key.split('-')[1] not in X]
while len(subset) > 0:
temp_min_dict = {}
for edge in subset:
temp_min = shortest_paths[edge.split('-')[0]] + lengths[edge]
temp_min_dict[edge] = temp_min
new_edge = min(temp_min_dict, key=temp_min_dict.get)
X.append(new_edge.split('-')[1])
shortest_paths[new_edge.split('-')[1]] = shortest_paths[new_edge.split('-')[0]] + lengths[new_edge]
subset = []
for key in lengths.keys():
if key.split('-')[0] in X and key.split('-')[1] not in X:
subset.append(key)
return shortest_paths
start_time = time.time()
print(NaiveDijkstra(vertices = vertices, start_point = '1', lengths = lengths)['197'])
print(time.time() - start_time, "seconds")
My Heap based Dijkstra code:
class Heap:
def __init__(self):
self.size = 0
self.lst = []
def swap(self, a):
if self.size == 1:
return self.lst
else:
if a == 1:
i = 1
else:
i = a // 2
while i > 0:
if i * 2 - 1 >= self.size:
break
elif self.lst[i - 1][1] > self.lst[i * 2 - 1][1]:
temp = self.lst[i - 1]
self.lst[i - 1] = self.lst[i * 2 - 1]
self.lst[i * 2 - 1] = temp
elif i * 2 >= self.size:
break
elif self.lst[i - 1][1] > self.lst[i * 2][1]:
temp = self.lst[i - 1]
self.lst[i - 1] = self.lst[i * 2]
self.lst[i * 2] = temp
i -= 1
# print(f"output: {self.lst}")
def insert(self, element):
# print(f"input: {self.lst}")
self.lst.append(element)
self.size += 1
self.swap(self.size)
def extractmin(self):
val = self.lst.pop(0)[0]
self.size -= 1
self.swap(self.size - 1)
return val
def delete(self, deleted):
ix = self.lst.index(deleted)
temp = self.lst[-1]
self.lst[ix] = temp
self.lst[-1] = deleted
self.lst.pop(-1)
self.size -= 1
#self.swap(self.size)
def FastDijkstra(vertices, start_point, lengths):
X = []
h = Heap()
width = {}
shortest_paths = {}
for vertex in vertices:
if vertex == start_point:
width[vertex] = 0
h.insert((vertex, width[vertex]))
else:
width[vertex] = 999999999999
h.insert((vertex, width[vertex]))
while h.size > 0:
w = h.extractmin()
X.append(w)
shortest_paths[w] = width[w]
Y = set(vertices).difference(X)
for x in X:
for y in Y:
key = f"{x}-{y}"
if lengths.get(key) is not None:
h.delete((y, width[y]))
if width[y] > shortest_paths[x] + lengths[key]:
width[y] = shortest_paths[x] + lengths[key]
h.insert((y, width[y]))
return shortest_paths
start_time = time.time()
print(FastDijkstra(vertices=vertices, start_point='1', lengths=lengths)['197'])
print(time.time() - start_time, "seconds")
The way you implemented the heap version is not efficient. Notably the following make it inefficient:
All nodes are put on the heap instead of only the direct neighbors of the visited nodes. This makes the heap large and slower than needed.
Y = set(vertices).difference(X) is a slow operation, and makes Y unnecessary large.
The nested loop that tries every pair in the Cartesian product of X and Y to see if it is an edge. This point together with the previous should be replaced with a collection of edges starting from X, and then discarding edges that lead to already visited nodes.
For every found edge to delete the target node from the heap, and re-insert it, even if the width didn't change! Deletion is a costly operation (see next point). Only if the Heap implementation supports a decrease-key operation, this is an option, but otherwise the heap should just get an extra entry for the same vertex, knowing that the one with the lowest cost will come out of the heap first.
The heap's delete method has a bad time complexity due to the .index() call.
The heap's extractmin method has a bad time complexity, due to the .pop(0) call. This has O(n) time complexity.
The heap's extractmin does not give correct results (again due to that pop(0)). Here is a little script that shows a mistake:
h = Heap()
for value in 4, 3, 5, 2, 1:
h.insert((value, value))
print(h.extractmin()) # 1 = OK
print(h.extractmin()) # 2 = OK
print(h.extractmin()) # 4 = NOK. 3 expected.
The data structure lengths does not allow to quickly find the edges from a particular vertex. But this is a point that is also making the naive implementation slow. I would however suggest to turn that in a dict.
If this is done right it should run faster. Certainly when you would make use of the native heapq module you'll get good running times.
Here is a (much) faster implementation. It doesn't bother about unreachable vertices, and doesn't bother about possibly having multiple entries on the heap for the same node (with different distances). But it does start with only the starting node on the heap, and uses heapq:
from heapq import heappush, heappop
from collections import defaultdict
def FastDijkstra(vertices, start_point, lengths):
# Create a dictionary for the edges, keyed by source node
edges = defaultdict(list)
for key, length in lengths.items():
x, y = key.split("-")
edges[x].append((length, y))
heap = [(0, start_point)]
shortest_paths = {}
while heap:
cost, x = heappop(heap)
if x in shortest_paths:
continue # this vertex had already been on the heap before
shortest_paths[x] = cost
for length, y in edges[x]:
if y not in shortest_paths:
heappush(heap, (cost + length, y))
return shortest_paths
In my tests this ran hundreds times faster.
Thanks to the above answer (wonderful analysis) I adjusted my implementation which is way faster than the previous version. It is shown below.
class Heap:
def __init__(self):
self.size = 0
self.lst = []
def swap(self, a):
if self.size == 1:
return self.lst
else:
if a == 1:
i = 1
else:
i = a // 2
while i > 0:
if i * 2 - 1 >= self.size:
break
elif self.lst[i - 1][1] > self.lst[i * 2 - 1][1]:
temp = self.lst[i - 1]
self.lst[i - 1] = self.lst[i * 2 - 1]
self.lst[i * 2 - 1] = temp
elif i * 2 >= self.size:
break
elif self.lst[i - 1][1] > self.lst[i * 2][1]:
temp = self.lst[i - 1]
self.lst[i - 1] = self.lst[i * 2]
self.lst[i * 2] = temp
elif self.lst[2*i - 1][1] > self.lst[i * 2][1]:
temp = self.lst[2*i - 1]
self.lst[2*i - 1] = self.lst[i * 2]
self.lst[i * 2] = temp
i -= 1
#print(f"output: {self.lst}")
def insert(self, element):
#print(f"input: {self.lst}")
self.lst.append(element)
self.size += 1
self.swap(self.size)
def extractmin(self):
val = self.lst[0][0]
del self.lst[0]
self.size -= 1
self.swap(self.size-1)
return val
def delete(self, deleted):
ix = self.lst.index(deleted)
temp = self.lst[-1]
self.lst[ix] = temp
self.lst[-1] = deleted
del self.lst[-1]
self.size -= 1
#self.swap(self.size)
def FastDijkstra(vertices, start_point, lengths):
X = []
h = Heap()
width = {}
shortest_paths = {}
for vertex in vertices:
if vertex == start_point:
width[vertex] = 0
h.insert((vertex, width[vertex]))
else:
width[vertex] = 999999999999
h.insert((vertex, width[vertex]))
while h.size > 0:
w = h.extractmin()
X.append(w)
shortest_paths[w] = width[w]
Y = set(vertices).difference(X)
for y in Y:
key = f"{w}-{y}"
if lengths.get(key) is not None:
h.delete((y, width[y]))
if width[y] > shortest_paths[w] + lengths[key]:
width[y] = shortest_paths[w] + lengths[key]
h.insert((y, width[y]))
return shortest_paths
start_time = time.time()
print(FastDijkstra(vertices=vertices, start_point='1', lengths=lengths)['197'])
print(time.time() - start_time, "seconds")

How can I implement IDA* algorithm in Python for 15-Puzzle problem?

I'm trying to solve the 15-Puzzle problem using IDA* algorithm and Manhattan heuristic.
I already implemented the algorithm from the pseudocode in this Wikipedia page (link).
Here's my code so far :
def IDA(initial_state, goal_state):
initial_node = Node(initial_state)
goal_node = Node(goal_state)
threshold = manhattan_heuristic(initial_state, goal_state)
path = [initial_node]
while 1:
tmp = search(path, goal_state, 0, threshold)
if tmp == True:
return path, threshold
elif tmp == float('inf'):
return False
else:
threshold = tmp
def search(path, goal_state, g, threshold):
node = path[-1]
f = g + manhattan_heuristic(node.state, goal_state)
if f > threshold:
return f
if np.array_equal(node.state, goal_state):
return True
minimum = float('inf')
for n in node.nextnodes():
if n not in path:
path.append(n)
tmp = search(path, goal_state, g + 1, threshold)
if tmp == True:
return True
if tmp < minimum:
minimum = tmp
path.pop()
return minimum
def manhattan_heuristic(state1, state2):
size = range(1, len(state1) ** 2)
distances = [count_distance(num, state1, state2) for num in size]
return sum(distances)
def count_distance(number, state1, state2):
position1 = np.where(state1 == number)
position2 = np.where(state2 == number)
return manhattan_distance(position1, position2)
def manhattan_distance(a, b):
return abs(b[0] - a[0]) + abs(b[1] - a[1])
class Node():
def __init__(self, state):
self.state = state
def nextnodes(self):
zero = np.where(self.state == 0)
y,x = zero
y = int(y)
x = int(x)
up = (y - 1, x)
down = (y + 1, x)
right = (y, x + 1)
left = (y, x - 1)
arr = []
for direction in (up, down, right, left):
if len(self.state) - 1 >= direction[0] >= 0 and len(self.state) - 1 >= direction[1] >= 0:
tmp = np.copy(self.state)
tmp[direction[0], direction[1]], tmp[zero] = tmp[zero], tmp[direction[0], direction[1]]
arr.append(Node(tmp))
return arr
I'm testing this code with a 3x3 Puzzle and here's the infinite loop! Due to the recursion I have some trouble testing my code...
I think the error might be here : tmp = search(path, goal_state, g + 1, threshold) (in the search function). I'm adding only one to the g cost value. It should be correct though, because I can only move a tile 1 place away.
Here's how to call the IDA() function:
initial_state = np.array([8 7 3],[4 1 2],[0 5 6])
goal_state = np.array([1 2 3],[8 0 4],[7 6 5])
IDA(initial_state, goal_state)
Can someone help me on this ?
There are couple of issues in your IDA* implementation. First, what is the purpose of the variable path? I found two purposes of path in your code:
Use as a flag/map to keep the board-states that is already been visited.
Use as a stack to manage recursion states.
But, it is not possible to do both of them by using a single data structure. So, the first modification that your code requires:
Fix-1: Pass current node as a parameter to the search method.
Fix-2: flag should be a data structure that can perform a not in query efficiently.
Now, fix-1 is easy as we can just pass the current visiting node as the parameter in the search method. For fix-2, we need to change the type of flag from list to set as:
list's average case complexity for x in s is: O(n)
set's
Average case complexity for x in s is: O(1)
Worst case complexity for x in s is: O(n)
You can check more details about performance for testing memberships: list vs sets for more details.
Now, to keep the Node information into a set, you need to implement __eq__ and __hash__ in your Node class. In the following, I have attached the modified code.
import timeit
import numpy as np
def IDA(initial_state, goal_state):
initial_node = Node(initial_state)
goal_node = Node(goal_state)
threshold = manhattan_heuristic(initial_state, goal_state)
#print("heuristic threshold: {}".format(threshold))
loop_counter = 0
while 1:
path = set([initial_node])
tmp = search(initial_node, goal_state, 0, threshold, path)
#print("tmp: {}".format(tmp))
if tmp == True:
return True, threshold
elif tmp == float('inf'):
return False, float('inf')
else:
threshold = tmp
def search(node, goal_state, g, threshold, path):
#print("node-state: {}".format(node.state))
f = g + manhattan_heuristic(node.state, goal_state)
if f > threshold:
return f
if np.array_equal(node.state, goal_state):
return True
minimum = float('inf')
for n in node.nextnodes():
if n not in path:
path.add(n)
tmp = search(n, goal_state, g + 1, threshold, path)
if tmp == True:
return True
if tmp < minimum:
minimum = tmp
return minimum
def manhattan_heuristic(state1, state2):
size = range(1, len(state1) ** 2)
distances = [count_distance(num, state1, state2) for num in size]
return sum(distances)
def count_distance(number, state1, state2):
position1 = np.where(state1 == number)
position2 = np.where(state2 == number)
return manhattan_distance(position1, position2)
def manhattan_distance(a, b):
return abs(b[0] - a[0]) + abs(b[1] - a[1])
class Node():
def __init__(self, state):
self.state = state
def __repr__(self):
return np.array_str(self.state.flatten())
def __hash__(self):
return hash(self.__repr__())
def __eq__(self, other):
return self.__hash__() == other.__hash__()
def nextnodes(self):
zero = np.where(self.state == 0)
y,x = zero
y = int(y)
x = int(x)
up = (y - 1, x)
down = (y + 1, x)
right = (y, x + 1)
left = (y, x - 1)
arr = []
for direction in (up, down, right, left):
if len(self.state) - 1 >= direction[0] >= 0 and len(self.state) - 1 >= direction[1] >= 0:
tmp = np.copy(self.state)
tmp[direction[0], direction[1]], tmp[zero] = tmp[zero], tmp[direction[0], direction[1]]
arr.append(Node(tmp))
return arr
initial_state = np.array([[8, 7, 3],[4, 1, 2],[0, 5, 6]])
goal_state = np.array([[1, 2, 3],[8, 0, 4],[7, 6, 5]])
start = timeit.default_timer()
is_found, th = IDA(initial_state, goal_state)
stop = timeit.default_timer()
print('Time: {} seconds'.format(stop - start))
if is_found is True:
print("Solution found with heuristic-upperbound: {}".format(th))
else:
print("Solution not found!")
Node: Please double check your Node.nextnodes() and manhattan_heuristic() methods as I did not pay much attention in those areas. You can check this GitHub repository for other algorithmic implementations (i.e., A*, IDS, DLS) to solve this problem.
References:
Python Wiki: Time Complexity
TechnoBeans: Performance for testing memberships: list vs tuples vs sets
GitHub: Puzzle Solver (by using problem solving techniques)

Efficient enemy avoidance for pathfinding with fewer enemies

I am currently working on a 2D top down rogue-like game using Python. The map is a dungeon containing many open rectangular rooms (image), each with around 2-4 enemies inside. I am currently looking to implement a path-finding system where the enemies will move around each other and attempt to swarm the player.
So far, I have implemented an A* algorithm that does allow the enemies to navigate and swarm the player in this way. However, my approach is causing very low frame rates: generally around 15 FPS but it will go as low as under 1 FPS when an enemy has no path to the player. I feel it is very inefficient, since path-finding is being done for every enemy on each frame. Currently, other enemies are seen as obstacles for the A* algorithm, and the only optimization is that an enemy will move directly towards the player if there are no obstacles in its way. Here's the code:
import heapq
#...
FLOOR = 1
#...
class Game:
def __init__(self):
#...
self.pathfindingGranularity = 5
# Slope and line intersection functions are based on: https://www.codeproject.com/Tips/864704/Python-Line-Intersection-for-Pygame
def lineInRect(self, start, end, r):
if start in r and end in r: return True
if self.segmentIntersect(start, end, r.origin, Point(r.x + r.width, r.y)) is not None: return True
if self.segmentIntersect(start, end, Point(r.x, r.y + r.height), Point(r.x + r.width, r.y + r.height)) is not None: return True
if self.segmentIntersect(start, end, r.origin, Point(r.x, r.y + r.height)) is not None: return True
if self.segmentIntersect(start, end, Point(r.x + r.width, r.y), Point(r.x + r.width, r.y + r.height)) is not None: return True
return False
def slope(self, p1, p2):
if p2.x - p1.x == 0: return 1e10
return (p2.y - p1.y) / (p2.x - p1.x)
def yIntercept(self, slope, p1):
return p1.y - slope * p1.x
def lineIntersect(self, start1, end1, start2, end2):
min_allowed = 1e-5
big_value = 1e10
m1 = self.slope(start1, end1)
b1 = self.yIntercept(m1, start1)
m2 = self.slope(start2, end2)
b2 = self.yIntercept(m2, start2)
if abs(m1 - m2) < min_allowed: x = big_value if (b2 - b1 >= 0) else -big_value
else: x = (b2 - b1) / (m1 - m2)
y = m1 * x + b1
return Point(x, y)
def segmentIntersect(self, start1, end1, start2, end2):
intersection = self.lineIntersect(start1, end1, start2, end2)
def approx(f):
return round(f * 10000) / 10000
if not approx(start1.x) <= approx(intersection.x) <= approx(end1.x):
if not approx(end1.x) <= approx(intersection.x) <= approx(start1.x):
return None
if not approx(start2.x) <= approx(intersection.x) <= approx(end2.x):
if not approx(end2.x) <= approx(intersection.x) <= approx(start2.x):
return None
if not approx(start1.y) <= approx(intersection.y) <= approx(end1.y):
if not approx(end1.y) <= approx(intersection.y) <= approx(start1.y):
return None
if not approx(start2.y) <= approx(intersection.y) <= approx(end2.y):
if not approx(end2.y) <= approx(intersection.y) <= approx(start2.y):
return None
return intersection
class Enemy (Entity):
def update(self, game):
#...
if not self.getRect().intersects(game.player.getRect()) and self.canMove():
self.generatePath(game)
if self.path:
# Move towards player
elif self.canMove():
# Hurt the player
#...
def generatePath(self, game):
if not self.lineOccupied(Point(self.x, self.y), game.player.getCenterpoint(), game):
self.path = [game.player.getCenterpoint()]
return
frontier = PriorityQueue()
start = Point(self.x, self.y)
frontier.put(start, 0)
came_from = {}
came_from[start] = None
done = False
while not frontier.empty():
current = frontier.get()
if Rect(current.x + self.hitbox.x, current.y + self.hitbox.y, self.hitbox.w, self.hitbox.h).intersects(game.player.getRect()):
done = True
break
for next in self.findAdjacents(current, game):
if self.lineOccupied(current, next, game): continue
if next not in came_from:
priority = self.heuristic(next, game)
frontier.put(next, priority)
came_from[next] = current
if not done:
self.path.clear()
else:
p = [current]
while came_from[p[-1]] is not None:
p.append(came_from[p[-1]])
self.path = p[::-1][1:]
i = 0
def findAdjacents(self, currentPoint, game):
d = 1 / game.pathfindingGranularity
for x in (currentPoint.x - d, currentPoint.x, currentPoint.x + d):
for y in (currentPoint.y - d, currentPoint.y, currentPoint.y + d):
if x == currentPoint.x and y == currentPoint.y: continue
elif self.canWalkAtCoords(x, y, game):
yield Point(x, y)
def canWalkAtCoords(self, x, y, game):
for nx in (x, x + self.hitbox.w):
for ny in (y, y + self.hitbox.h):
if game.blockAt(nx, ny) != FLOOR:
return False
return True
def lineOccupied(self, start, end, game):
for e in self.room.enemies:
if e is self:
continue
for xo in (self.hitbox.x, self.hitbox.x + self.hitbox.w):
for yo in (self.hitbox.y, self.hitbox.y + self.hitbox.h):
if game.lineInRect(start + Point(xo, yo), end + Point(xo, yo), e.getRect()):
return True
return False
I feel like there should be a much more efficient solution to this, especially seeing as the room is rectangular and there are no extra walls or obstacles that the enemies need to move around, but so far my searches for a solution have come up empty-handed. Are there some optimizations I could make to increase the performance of the program? Or if not, is there a better pathfinding method I should look into? Any help would be greatly appreciated!
You should try to have you path finding start from your character and fan out using a breadth-first-search (with some adjustment for slopes). Every time you come across an enemy, you can compute its optimal path toward the player.
That way you only do one pass across the whole board rather than one for each enemy.
Let me know if you want more details.

Slow performance in agent based model python

I originally posted this on code-review (hence the lengthy code) but failed to get an answer.
My model is based on this game https://en.wikipedia.org/wiki/Ultimatum_game . I won't go into the intuition behind it but generally speaking it functions as follows:
The game consists of a n x n lattice on which an agent is placed at each node.
During each time step, each player on each node plays against a random neighbour by playing a particular strategy.
Each of their strategies (a value between 1-9) has a propensity attached to it (which is randomly assigned and is just some number). The propensity then in turn determines the probability of playing that strategy. The probability is calculated as the propensity of that strategy over the sum of the propensities of all strategies.
If a game results in a positive payoff, then the payoffs from that game get added to the propensities for those strategies.
These propensities then determine the probabilities for their strategies in the next time step, and so on.
The simulation ends after time step N is reached.
For games with large lattices and large time steps, my code runs really really slowly. I ran cProfiler to check where the bottleneck(s) are, and as I suspected the update_probabilitiesand play_rounds functions seem to be taking up a lot time. I want to be able to run the game with gridsize of about 40x40 for about 100000+ time steps, but right now that is not happening.
So what would be a more efficient way to calculate and update the probabilities/propensities of each player in the grid? I've considered implementing NumPy arrays but I am not sure if it would be worth the hassle here?
import numpy as np
import random
from random import randint
from numpy.random import choice
from numpy.random import multinomial
import cProfile
mew = 0.001
error = 0.05
def create_grid(row, col):
return [[0 for j in range(col)] for i in range(row)]
def create_random_propensities():
propensities = {}
pre_propensities = [random.uniform(0, 1) for i in range(9)]
a = np.sum(pre_propensities)
for i in range(1, 10):
propensities[i] = (pre_propensities[i - 1]/a) * 10 # normalize sum of propensities to 10
return propensities
class Proposer:
def __init__(self):
self.propensities = create_random_propensities()
self.probabilites = []
self.demand = 0 # the amount the proposer demands for themselves
def pick_strat(self, n_trials): # gets strategy, an integer in the interval [1, 9]
results = multinomial(n_trials, self.probabilites)
i, = np.where(results == max(results))
if len(i) > 1:
return choice(i) + 1
else:
return i[0] + 1
def calculate_probability(self, dict_data, index, total_sum): # calculates probability for particular strat, taking propensity
return dict_data[index]/total_sum # of that strat as input
def calculate_sum(self, dict_data):
return sum(dict_data.values())
def initialize(self):
init_sum = self.calculate_sum(self.propensities)
for strategy in range(1, 10):
self.probabilites.append(self.calculate_probability(self.propensities, strategy, init_sum))
self.demand = self.pick_strat(1)
def update_strategy(self):
self.demand = self.pick_strat(1)
def update_probablities(self):
for i in range(9):
self.propensities[1 + i] *= 1 - mew
pensity_sum = self.calculate_sum(self.propensities)
for i in range(9):
self.probabilites[i] = self.calculate_probability(self.propensities, 1 + i, pensity_sum)
def update(self):
self.update_probablities()
self.update_strategy()
class Responder: # methods same as proposer class, can skip-over
def __init__(self):
self.propensities = create_random_propensities()
self.probabilites = []
self.max_thresh = 0 # the maximum demand they are willing to accept
def pick_strat(self, n_trials):
results = multinomial(n_trials, self.probabilites)
i, = np.where(results == max(results))
if len(i) > 1:
return choice(i) + 1
else:
return i[0] + 1
def calculate_probability(self, dict_data, index, total_sum):
return dict_data[index]/total_sum
def calculate_sum(self, dict_data):
return sum(dict_data.values())
def initialize(self):
init_sum = self.calculate_sum(self.propensities)
for strategy in range(1, 10):
self.probabilites.append(self.calculate_probability(self.propensities, strategy, init_sum))
self.max_thresh = self.pick_strat(1)
def update_strategy(self):
self.max_thresh = self.pick_strat(1)
def update_probablities(self):
for i in range(9):
self.propensities[1 + i] *= 1 - mew # stops sum of propensites from growing without bound
pensity_sum = self.calculate_sum(self.propensities)
for i in range(9):
self.probabilites[i] = self.calculate_probability(self.propensities, 1 + i, pensity_sum)
def update(self):
self.update_probablities()
self.update_strategy()
class Agent:
def __init__(self):
self.prop_side = Proposer()
self.resp_side = Responder()
self.prop_side.initialize()
self.resp_side.initialize()
def update_all(self):
self.prop_side.update()
self.resp_side.update()
class Grid:
def __init__(self, rowsize, colsize):
self.rowsize = rowsize
self.colsize = colsize
def make_lattice(self):
return [[Agent() for j in range(self.colsize)] for i in range(self.rowsize)]
#staticmethod
def von_neumann_neighbourhood(array, row, col, wrapped=True): # gets up, bottom, left, right neighbours of some node
neighbours = set([])
if row + 1 <= len(array) - 1:
neighbours.add(array[row + 1][col])
if row - 1 >= 0:
neighbours.add(array[row - 1][col])
if col + 1 <= len(array[0]) - 1:
neighbours.add(array[row][col + 1])
if col - 1 >= 0:
neighbours.add(array[row][col - 1])
#if wrapped is on, conditions for out of bound points
if row - 1 < 0 and wrapped == True:
neighbours.add(array[-1][col])
if col - 1 < 0 and wrapped == True:
neighbours.add(array[row][-1])
if row + 1 > len(array) - 1 and wrapped == True:
neighbours.add(array[0][col])
if col + 1 > len(array[0]) - 1 and wrapped == True:
neighbours.add(array[row][0])
return neighbours
def get_error_term(pay, strategy):
index_strat_2, index_strat_8 = 2, 8
if strategy == 1:
return (1 - (error/2)) * pay, error/2 * pay, index_strat_2
if strategy == 9:
return (1 - (error/2)) * pay, error/2 * pay, index_strat_8
else:
return (1 - error) * pay, error/2 * pay, 0
class Games:
def __init__(self, n_rows, n_cols, n_rounds):
self.rounds = n_rounds
self.rows = n_rows
self.cols = n_cols
self.lattice = Grid(self.rows, self.cols).make_lattice()
self.lookup_table = np.full((self.rows, self.cols), False, dtype=bool) # if player on grid has updated their strat, set to True
def reset_look_tab(self):
self.lookup_table = np.full((self.rows, self.cols), False, dtype=bool)
def run_game(self):
n = 0
while n < self.rounds:
for r in range(self.rows):
for c in range(self.cols):
if n != 0:
self.lattice[r][c].update_all()
self.lookup_table[r][c] = True
self.play_rounds(self.lattice, r, c)
self.reset_look_tab()
n += 1
def play_rounds(self, grid, row, col):
neighbours = Grid.von_neumann_neighbourhood(grid, row, col)
neighbour = random.sample(neighbours, 1).pop()
neighbour_index = [(ix, iy) for ix, row in enumerate(self.lattice) for iy, i in enumerate(row) if i == neighbour]
if self.lookup_table[neighbour_index[0][0]][neighbour_index[0][1]] == False: # see if neighbour has already updated their strat
neighbour.update_all()
player = grid[row][col]
coin_toss = randint(0, 1) # which player acts as proposer or responder in game
if coin_toss == 1:
if player.prop_side.demand <= neighbour.resp_side.max_thresh: # postive payoff
payoff, adjacent_payoff, index = get_error_term(player.prop_side.demand, player.prop_side.demand)
if player.prop_side.demand == 1 or player.prop_side.demand == 9: # extreme strategies get bonus payoffs
player.prop_side.propensities[player.prop_side.demand] += payoff
player.prop_side.propensities[index] += adjacent_payoff
else:
player.prop_side.propensities[player.prop_side.demand] += payoff
player.prop_side.propensities[player.prop_side.demand - 1] += adjacent_payoff
player.prop_side.propensities[player.prop_side.demand + 1] += adjacent_payoff
else:
return 0 # if demand > max thresh -> both get zero
if coin_toss != 1:
if neighbour.prop_side.demand <= player.resp_side.max_thresh:
payoff, adjacent_payoff, index = get_error_term(10 - neighbour.prop_side.demand, player.resp_side.max_thresh)
if player.resp_side.max_thresh == 1 or player.resp_side.max_thresh == 9:
player.resp_side.propensities[player.resp_side.max_thresh] += payoff
player.resp_side.propensities[index] += adjacent_payoff
else:
player.resp_side.propensities[player.resp_side.max_thresh] += payoff
player.resp_side.propensities[player.resp_side.max_thresh - 1] += adjacent_payoff
player.resp_side.propensities[player.resp_side.max_thresh + 1] += adjacent_payoff
else:
return 0
#pr = cProfile.Profile()
#pr.enable()
my_game = Games(10, 10, 2000) # (rowsize, colsize, n_steps)
my_game.run_game()
#pr.disable()
#pr.print_stats(sort='time')
(For those who might be wondering, the get_error_term just returns the propensities for strategies that are next to strategies that receive a positive payoff, for example if the strategy 8 works, then 7 and 9's propensities also get adjusted upwards and this is calculated by said function. And the first for loop inside update_probabilities just makes sure that the sum of propensities don't grow without bound).

Project Euler 461 - Genetic Algorithm

Someone told me that this problem should be easy to solve with a genetic algorithm.
I read some stuff about this topic (I hadn't heard about it before), and wrote (and copied) some code.
The results I get are close to optimum, but not close enough.
I'd like to have some help with it:
import time
import math
import random
def f(n, k):
return math.exp(k / n) - 1
def individual(length, min, max):
'Create a member of the population.'
return [random.randint(min, max) for x in range(length)]
def population(count, length, min, max):
"""
Create a number of individuals (i.e. a population).
count: the number of individuals in the population
length: the number of values per individual
min: the minimum possible value in an individual's list of values
max: the maximum possible value in an individual's list of values
"""
return [individual(length, min, max) for x in range(count)]
def fitness(individual, target):
def get_best_last_element(a, b, c):
s = math.pi - f(eu461.BASE, a) - f(eu461.BASE, b) - f(eu461.BASE, c)
s += 1
if s > 1:
return round(math.log(s) * eu461.BASE)
else:
return 0
def getg():
return get_best_last_element
"""
Determine the fitness of an individual. Higher is better.
individual: the individual to evaluate
target: the target number individuals are aiming for
"""
l = get_best_last_element(individual[0], individual[1], individual[2])
return abs(target - sum([f(eu461.BASE, k) for k in individual]) - f(eu461.BASE, l))
def grade(pop, target):
'Find average fitness for a population.'
return sum([fitness(x, target) for x in pop]) / (len(pop))
def evolve(pop, target, retain=0.2, random_select=0.05, mutate=0.01):
graded = [(fitness(x, target), x) for x in pop]
graded = [x[1] for x in sorted(graded)]
retain_length = int(len(graded) * retain)
parents = graded[:retain_length]
# randomly add other individuals to
# promote genetic diversity
for individual in graded[retain_length:]:
if random_select > random.random():
parents.append(individual)
# mutate some individuals
for individual in parents:
if mutate > random.random():
pos_to_mutate = random.randint(0, len(individual) - 1)
# this mutation is not ideal, because it
# restricts the range of possible values,
# but the function is unaware of the min/max
# values used to create the individuals,
individual[pos_to_mutate] = random.randint(min(individual), max(individual))
# crossover parents to create children
parents_length = len(parents)
desired_length = len(pop) - parents_length
children = []
while len(children) < desired_length:
male = random.randint(0, parents_length - 1)
female = random.randint(0, parents_length - 1)
if male != female:
male = parents[male]
female = parents[female]
half = len(male) // 2
if random.randint(0, 1):
child = male[:half] + female[half:]
else:
child = female[:half] + male[half:]
children.append(child)
parents.extend(children)
return parents
def get_best_last_element(a, b, c):
s = math.pi - f(eu461.BASE, a) - f(eu461.BASE, b) - f(eu461.BASE, c)
s += 1
if s > 0:
return round(math.log(s) * eu461.BASE)
else:
return 0
def eu461():
target = math.pi
p_count = 10000
i_length = 3
i_min = 0
i_max = round(eu461.BASE * math.log(math.pi + 1))
p = population(p_count, i_length, i_min, i_max)
fitness_history = [grade(p, target),]
for i in range(150):
p = evolve(p, target)
fitness_history.append(grade(p, target))
for datum in fitness_history:
pass #print (datum)
return p[0], get_best_last_element(p[0][0], p[0][1], p[0][2]), sum([f(eu461.BASE, k) for k in p[0]]) + f(eu461.BASE, get_best_last_element(p[0][0], p[0][1], p[0][2]))
eu461.BASE = 200
if __name__ == "__main__":
startTime = time.clock()
print (eu461())
elapsedTime = time.clock() - startTime
print ("Time spent in (", __name__, ") is: ", elapsedTime, " sec")

Categories

Resources