Do I need another layer in my neural network? - python

I'm trying to program a neural network to play noughts and crosses (also known as tic tac toe). It works well enough to play against and decreases the loss function when I train it, but only up to a point, after which it plateaus. I have tried playing it and it is still not much better than a random bot.
I have already tried adjusting the learning rate and the size of the hidden layer. I have also previously tried training it on my games against it, as well as training it only based upon the victorious side (interestingly, this minimised the loss function better than my current version, but only tried to attack, and had no clue that it had to prevent me from trying to win). In addition to this, I have tried changing the learning rate so that it decreases by 5 or 10% every 100 self-play games.
I have had a look online but haven't found any python neural networks for tic tac toe that I can compare mine to in order to debug.
import random
def bot_go(player_to_move, game_over, board):
played = False
iteration = 0
while played is False:
move, input_layer, hidden_layer, output_layer = neural_net_move(iteration, board)
if board[int(move[0])][int(move[1])] == "-":
played = True
board[int(move[0])][int(move[1])] = player_to_move
if check_for_win(player_to_move, board) is True:
game_over = True
elif check_for_draw(board) is True:
game_over = True
if player_to_move == "X":
player_to_move = "O"
else:
player_to_move = "X"
iteration += 1
return game_over, player_to_move, move, input_layer, hidden_layer, output_layer
def neural_net_move(iteration, board):
neural_network_input = {}
neural_network_hidden_layer = {}
neural_network_output = []
layer_1_weights, layer_2_weights, bias = get_neural_network()
# initialises the input layer
for i in range(9):
if board[i // 3][i % 3] == "X":
neural_network_input[i] = 0
elif board[i // 3][i % 3] == "O":
neural_network_input[i] = 1
else:
neural_network_input[i] = 0.5
# calculates the hidden layer neuron values
for i in range(Global_variables.hidden_layer_size):
net_total = 0
for j in range(9):
net_total += neural_network_input[j]*layer_1_weights[str(j) + str(i)]
neural_network_hidden_layer[i] = (1/(1 + 2.718**(-net_total)))
# calculates neural network output
for i in range(9):
net_total = 0
for j in range(Global_variables.hidden_layer_size):
net_total += neural_network_hidden_layer[j] * layer_2_weights[str(j) + str(i)]
net_total += bias * layer_2_weights[str(Global_variables.hidden_layer_size) + str(i)]
neural_network_output.append(1 / (1 + (2.718 ** (-net_total))))
# finds output value by ordering the outputs in terms of size
order_of_size = [0]
for i in range(1, len(neural_network_output)):
inserted = False
for j in range(len(order_of_size)):
if neural_network_output[i] > neural_network_output[order_of_size[j]] and inserted is False:
order_of_size.insert(j, i)
inserted = True
elif j == len(order_of_size) - 1 and inserted is False:
order_of_size.append(i)
move = [order_of_size[iteration] // 3, order_of_size[iteration] % 3]
return move, neural_network_input, neural_network_hidden_layer, neural_network_output
def train_neural_network(input_layer, hidden_layer, output_layer, actual_move):
layer_1_weights, layer_2_weights, bias = get_neural_network()
new_l1_weights = {}
new_l2_weights = {}
# calculates total error
total_error = 0
for i in range(len(output_layer)):
if actual_move[0] * 3 + actual_move[1] == i:
total_error += ((1 - output_layer[i])**2)/2
else:
total_error += 0.5*((output_layer[i])**2)
# adjusts second layer weights
for i in range((hidden_layer_size + 1)*9):
if actual_move[0] * 3 + actual_move[1] == i % 9:
d_error_by_d_output_node = output_layer[i % 9] - 1
else:
d_error_by_d_output_node = output_layer[i % 9]
d_output_node_by_d_node_net_value = output_layer[i % 9]*(1 - output_layer[i % 9])
if i // 9 != hidden_layer_size:
d_node_net_value_by_d_weight = hidden_layer[i // 9]
else:
d_node_net_value_by_d_weight = bias
d_error_by_d_weight = d_error_by_d_output_node*d_output_node_by_d_node_net_value*d_node_net_value_by_d_weight
new_l2_weights[str(i // 9) + str(i % 9)] = \
layer_2_weights[str(i // 9) + str(i % 9)] - learning_rate*d_error_by_d_weight
# adjusts bias
d_error_by_d_bias = 0
for i in range(9):
d_node_net_value_by_d_bias = layer_2_weights[str(hidden_layer_size) + str(i)]
d_output_node_by_d_node_net_value = output_layer[i]*(1 - output_layer[i])
if actual_move[0] * 3 + actual_move[1] == i:
d_error_by_d_output_node = output_layer[i] - 1
else:
d_error_by_d_output_node = output_layer[i]
d_error_by_d_bias += d_node_net_value_by_d_bias * d_output_node_by_d_node_net_value * d_error_by_d_output_node
bias = bias - learning_rate * d_error_by_d_bias
# adjusts first layer weights
for i in range(hidden_layer_size*9):
d_error_by_d_weight = 0
if i // 9 != hidden_layer_size:
d_output_of_node_by_d_node_net_value = \
hidden_layer[i % hidden_layer_size]*(1 - hidden_layer[i % hidden_layer_size])
else:
d_output_of_node_by_d_node_net_value = \
bias * (1 - bias)
d_node_net_value_by_d_weight = input_layer[i // Global_variables.hidden_layer_size]
for j in range(9):
d_output_node_net_value_by_d_output_of_node = layer_2_weights[str(i // 9) + str(j)]
d_output_node_by_d_output_node_net_value = output_layer[j]*(1 - output_layer[j])
if actual_move[0] * 3 + actual_move[1] == i:
d_error_by_d_output_node = output_layer[j] - 1
else:
d_error_by_d_output_node = output_layer[j]
d_error_by_d_weight += d_output_of_node_by_d_node_net_value * d_node_net_value_by_d_weight * \
d_output_node_net_value_by_d_output_of_node * d_output_node_by_d_output_node_net_value * \
d_error_by_d_output_node
new_l1_weights[str(i // hidden_layer_size) + str(i % hidden_layer_size)] = \
layer_1_weights[str(i // hidden_layer_size) + str(i % hidden_layer_size)] - \
d_error_by_d_weight * learning_rate
network_file = open("neural network", "w")
line = ""
for i in range(9 * hidden_layer_size):
line += str(new_l1_weights[str(i // hidden_layer_size) + str(i % hidden_layer_size)]) + " "
network_file.write(line + "\n")
line = ""
for i in range(9 * (hidden_layer_size + 1)):
line += str(new_l2_weights[str(i // 9) + str(i % 9)]) + " "
network_file.write(line + "\n")
network_file.write(str(bias))
network_file.close()
return total_error
def get_neural_network():
layer_1_weights = {}
layer_2_weights = {}
# opens text file holding neural network
network_file = open("neural network", "r")
network = network_file.readlines()
network_file.close()
# gets the neural network weights from the text file
weight_list = network[0].split()
for i in range(len(weight_list)):
layer_1_weights[str(i // Global_variables.hidden_layer_size) + str(i % Global_variables.hidden_layer_size)] = float(weight_list[i])
weight_list = network[1].split()
for i in range(len(weight_list)):
layer_2_weights[str(i // 9) + str(i % 9)] = float(weight_list[i])
bias = 1
return layer_1_weights, layer_2_weights, bias
def make_up_neural_net():
network_file = open("neural network", "w")
line = ""
for i in range(9*Global_variables.hidden_layer_size):
line += str(random.random()) + " "
network_file.write(line + "\n")
line = ""
for i in range(9*(Global_variables.hidden_layer_size + 1)):
line += str(random.random()) + " "
network_file.write(line + "\n")
network_file.write(str(random.random()))
network_file.close()
def main():
error = 0
make_up_neural_net()
for i in range(100):
for j in range(100):
game_over = False
winner = ""
player_to_move = "X"
board = set_up_board()
o_moves = []
x_moves = []
while game_over is False:
if player_to_move == "X":
game_over, player_to_move, move, input_layer, hidden_layer, output_layer = bot_go(player_to_move, game_over, board)
x_moves.append([move, input_layer, hidden_layer, output_layer])
if game_over is True:
winner = "X"
else:
game_over, player_to_move, move, input_layer, hidden_layer, output_layer = bot_go(player_to_move, game_over, board)
o_moves.append([move, input_layer, hidden_layer, output_layer])
if game_over is True:
winner = "O"
if winner == "X":
for move in x_moves:
error = train_neural_network(move[1], move[2], move[3], move[0])
for move in o_moves:
error = un_train_neural_network(move[1], move[2], move[3], move[0])
else:
for move in o_moves:
error = train_neural_network(move[1], move[2], move[3], move[0])
for move in x_moves:
error = un_train_neural_network(move[1], move[2], move[3], move[0])
print(error)
main()
I would expect this code to print the result of the loss function after every 100 self-play games, and for this value to decrease over time.
However, it tends to flatten off at a value of at least 0.45, whereas I believe it should be several orders of magnitude smaller (I was getting a value for the loss function on the order of 10 to the negative 5 when I was training it on me).
I think I'm justified in my view because it is also crap at noughts and crosses when it plays.
I was wondering if this is because there's a problem with my code or because the neural network is not complex enough to model the problem and requires another layer.
NOTE: Sorry about the quantity of code, but I couldn't really find a way to shorten it. I have removed the win/draw checks to shorten it, as well as the "untrain" function, which is just the train function but adding the learning rate multiplied by the derivative instead of subtracting. I can add them back if anyone wants to test the code without the inconvenience of writing the functions themselves.

Related

Populating an Array in a loop not working

I am dumbfounded right now, I have some code that works generating an array of data and operating on it.
I am trying to sample random sections from my code, in order to check the calculations I am doing.
I have done this before and it has worked fine. I
target_sample =[1,2,10,25,83,62]
df, s_array_track ,z_array_track = MonteCarloValuationAntithetic(df,target_sample)
#df,z,s_array,lookback_scenario = MonteCarloValuation(df)
target_sample =[1,2,10,25,83,62]
lookback = []
for i in range(n_samples):
s = df["current_index"][i]
s_max = df["max_index"][i]
t = df["time to maturity_Months"][i]
sigma = df["volatility"][i]
cap = df["cap_rate"][i]
r = df["interest_rate"][i]
z = np.zeros((int(index_crediting_term*12)+1,n_scenarios))
s_array_track=np.zeros((len(target_sample),int(index_crediting_term*12)+1,n_scenarios))
z_array_track = np.zeros((len(target_sample),int(index_crediting_term*12)+1,n_scenarios))
df_track = df
s_start = df['initial_index'][i]
s_array = np.zeros((int(index_crediting_term*12)+1,n_scenarios))
for k in range(int(n_scenarios/2)):
for j in range(int(t)+1):
drift =( r - .5 *(sigma**2)) * (1/12)
z[j][k] = np.random.normal(0, 1)
diffusion = sigma* z[j][k] * (np.sqrt(1/12))
if j == 0:
s_array[j][k] = s
if (0 < j) and (j < t):
s_array[j][k] = s_array[j-1][k]*np.exp(drift + diffusion)
if j==t:
s_array[j][k] = s_max
else:
continue
for k in range(int(n_scenarios/2),int(n_scenarios)):
for j in range(int(t)+1):
drift =( r - .5 *(sigma**2)) * (1/12)
z[j][k] = -z[j][int(k-n_scenarios/2)]
diffusion = sigma* z[j][k] * (np.sqrt(1/12))
if j == 0:
s_array[j][k] = s
if (0 < j) and (j < t):
s_array[j][k] = s_array[j-1][k]*np.exp(drift + diffusion)
if j == t:
s_array[j][k] = s_max
else:
continue
if i in target_sample:
print(str(i) + " is in Target")
h = target_sample.index(i)
print(str(h))
s_array_track[h] = s_array
z_array_track[h] = z
lookback_temp = max(0,np.mean(np.clip(np.max(((s_array[:][:] / s_start)-1) ,axis =0 ),None,cap))))
lookback.append(lookback_temp)
df["Lookback"] = lookback
I am not getting the results I am expecting. When I do
s_array_track[h] = s_array
Outside of the code it works as expected. What is going on in my loop? I have spent hours on this and I am really confused as to why its not working.

Chrome T-Rex-Game Reinforcement learning showing no improvement

I would like to create an AI for the Chrome-No-Internet-Dino-Game. Therefore I adapted this Github-Repository to fit my needs. I used the following formula to calculate the new Q:
Source: https://en.wikipedia.org/wiki/Q-learning
My problem now is that even after ~ 2.000.000 iterations my game score is not increasing.
You can find the game file here: https://pastebin.com/XrwQ0suJ
QLearning.py:
import pickle
import Game_headless
import Game
import numpy as np
from collections import defaultdict
rewardAlive = 1
rewardKill = -10000
alpha = 0.2 # Learningrate
gamma = 0.9 # Discount
Q = defaultdict(lambda: [0, 0, 0]) # 0 = Jump / 1 = Duck / 2 = Do Nothing
oldState = None
oldAction = None
gameCounter = 0
gameScores = []
def paramsToState(params):
cactus1X = round(params["cactus1X"] / 10) * 10
cactus2X = round(params["cactus2X"] / 10) * 10
cactus1Height = params["cactus1Height"]
cactus2Height = params["cactus2Height"]
pteraX = round(params["pteraX"] / 10) * 10
pteraY = params["pteraY"]
playerY = round(params["playerY"] / 10) * 10
gamespeed = params["gamespeed"]
return str(cactus1X) + "_" + str(cactus2X) + "_" + str(cactus1Height) + "_" + \
str(cactus2Height) + "_" + str(pteraX) + "_" + str(pteraY) + "_" + \
str(playerY) + "_" + str(gamespeed)
def shouldEmulateKeyPress(params): # 0 = Jump / 1 = Duck / 2 = Do Nothing
global oldState
global oldAction
state = paramsToState(params)
oldState = state
estReward = Q[state]
action = estReward.index(max(estReward))
if oldAction is None:
oldAction = action
return action
# Previous action was successful
# -> Update Q
prevReward = Q[oldState]
prevReward[oldAction] = (1 - alpha) * prevReward[oldAction] + \
alpha * (rewardAlive + gamma * max(estReward))
Q[oldState] = prevReward
oldAction = action
return action
def onGameOver(score):
# Previous action was NOT successful
# -> Update Q
global oldState
global oldAction
global gameCounter
global gameScores
gameScores.append(score)
if gameCounter % 10000 == 0:
print(f"{gameCounter} : {np.mean(gameScores[-100:])}")
prevReward = Q[oldState]
prevReward[oldAction] = (1 - alpha) * prevReward[oldAction] + \
alpha * rewardKill
Q[oldState] = prevReward
oldState = None
oldAction = None
if gameCounter % 10000 == 0:
with open("Q\\" + str(gameCounter) + ".pickle", "wb") as file:
pickle.dump(dict(Q), file)
gameCounter += 1
Game_headless.main(shouldEmulateKeyPress, onGameOver)
On every frame the gameplay() function from Game_headless.py calls shouldEmulateKeyPress(). Said function then returns 0 for Jump, 1 for duck and 2 for nothing.
I tried adjusting the constants, but that didn't show any effect.
If you any questions, please don't hesitate to ask me!
Thank you in advance!
Someone on Reddit did this, did you take a look at their code? https://www.reddit.com/r/MachineLearning/comments/8iujuu/p_tfrex_ai_learns_to_play_google_chromes_dinosaur/
I was able to fix the problem, but I don't really know what the mistake was. I added a return statement at the end the gameplay function, and somehow it works now.

What is the next step when having completed first neural network? [closed]

Closed. This question is opinion-based. It is not currently accepting answers.
Want to improve this question? Update the question so it can be answered with facts and citations by editing this post.
Closed 3 years ago.
Improve this question
I'm a big fan of the youtube channel 3Blue1Brown and his series on Neural networks really got me excited on the subject.
I decided to create my own neural network in python from scratch engaging deeply in the mathematics. So with the help from the MNIST database on hand-written numbers I got started and succeded with the task after 2 weeks.
I have since then been further developing my code so that I can adjust the number of neurons and hidden layers neatly within the code.
I also experimented with different activation functions.
The best accuracy I've gotten is about 95% with 2 hidden layers of 16 neurons and 5 minutes of training.
Now, my question is fairly vague but I am now looking for the next challenge within the area, do you guys have any suggestions?
I now have the framework set up so I'd love some new type of problem with a bigger dataset or something or maybe should I work more on my existing problem to increase the accuracy of the ouput further?
What do you guys think?
Yours,
Emil
(Here's the code if anyone is interested)
import pickle
import gzip
import numpy as np
import random
import time
import pickle
import gzip
import numpy as np
import random
import time
class mnistClass:
def __init__(self, inputAmount=784, layers=2, layerSize=16, outputSize=10, loops=1, sampleSize=100):
with gzip.open('mnist.pkl.gz', 'rb') as f:
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
self.A, self.y = train_set
self.V, self.v2 = valid_set
self.dataSize = len(self.A)
self.inputAmount = inputAmount
self.layers = layers
self.layerSize = layerSize
self.outputSize = outputSize
self.loops = loops
self.sampleSize = sampleSize
self.iterations = int(self.dataSize/self.sampleSize)
self.clock = time.time()
self.Weights = []
self.Biases = []
self.initializeArrays()
self.initializeTraining()
print("Accuracy: " + str(self.getAccuracy()) + "%")
def initializeArrays(self):
for i in range(self.layers):
if self.layers - i > 2: #Adding middle layers
self.Weights.append(np.random.rand(self.layerSize, self.layerSize)-0.5)
if self.layers - i > 1:
self.Biases.append(np.random.rand(self.layerSize)-0.5)
if self.layers > 1:
self.Weights.insert(0, np.random.rand(self.layerSize, self.inputAmount)-0.5)
self.Weights.insert(len(self.Weights), np.random.rand(self.outputSize, self.layerSize)-0.5)
else:
self.Weights.insert(len(self.Weights), np.random.rand(self.outputSize, self.inputAmount)-0.5)
self.Biases.insert(len(self.Biases), np.random.rand(self.outputSize)-0.5)
def sigmoid(self, x, shiftType):
if shiftType == 0:
result = 1/(1+np.exp(-x))
elif shiftType == 1:
result = 2 * (1/(1+np.exp(-x))) - 1
return result
def sigmoidPrime(self, x, shiftType):
if shiftType == 0:
result = self.sigmoid(x, 0) - self.sigmoid(x, 0)**2
elif shiftType == 1:
result = 2*np.exp(-x)/(1+np.exp(-x))**2
return result
def Rdependance(self, Z, layer1, layer2, multi=False): #How R depends on a preceeding R
multi = layer1-layer2 > 1
if not multi:
if layer1 == self.layers-1:
shiftType = 0
else:
shiftType = 1
R1_R2_differential = np.multiply(self.Weights[layer1], self.sigmoidPrime(Z[layer1]+self.Biases[layer1], shiftType)[:, np.newaxis])
result = R1_R2_differential
else:
chainRule = []
for i in reversed(range(layer2, layer1)):
chainRule.append(self.Rdependance(Z, i+1, i))
result = chainRule[0]
for i in range(len(chainRule)-1):
result = np.dot(result, chainRule[i+1])
return result
def RWdependance(self, R, Z, dataCaseNo, layer): #How R depends on connecting Weights
if layer == self.layers-1:
shiftType = 0
else:
shiftType = 1
R_W_differential = self.Weights[layer]/self.Weights[layer]
mergeW_Z = np.multiply(R_W_differential, self.sigmoidPrime(Z[layer]+self.Biases[layer], shiftType)[:, np.newaxis])
if layer == 0:
R_W_differential = np.multiply(mergeW_Z.T, self.A[dataCaseNo][:, np.newaxis]).T
else:
R_W_differential = np.multiply(mergeW_Z.T, R[layer-1][:, np.newaxis]).T
return R_W_differential
def RBdependance(self, Z, layer): #How R depends on internal Biases
if layer == self.layers-1:
shiftType = 0
else:
shiftType = 1
R_B_differential = np.multiply(self.Rdependance(Z, self.layers-1, layer).T, self.sigmoidPrime(Z[layer]+self.Biases[layer], shiftType)[:, np.newaxis]).T
return R_B_differential
def integralWeightCost(self, R, Z, dataCaseNo, quadDifferential, layer): # Cost of system for weights
if layer == self.layers-1:
nodes = np.identity(self.outputSize)
else:
nodes = self.Rdependance(Z, self.layers-1, layer)
cost_differential = np.multiply(nodes, quadDifferential[:, np.newaxis])
cost_differential = np.sum(cost_differential, 0)
result = np.multiply(self.RWdependance(R, Z, dataCaseNo, layer), cost_differential[:, np.newaxis])
return result
def integralBiasCost(self, Z, quadDifferential, layer): # Cost of system for biases
if layer == self.layers-1:
nodes = np.identity(self.outputSize)
else:
nodes = self.RBdependance(Z, layer)
cost_differential = np.multiply(nodes, quadDifferential[:, np.newaxis])
result = np.sum(cost_differential, 0)
return result
def initializeTraining(self):
for loop in range(self.loops):
for iteration in range(self.iterations):
avg_cost = 0
avg_deltaWeights = []
avg_deltaBiases = []
for i in range(len(self.Weights)): #Creating zeros of weight arrays
avg_deltaWeights.append(self.Weights[i]*0)
for i in range(len(self.Biases)):
avg_deltaBiases.append(self.Biases[i]*0)
for dataCaseNo in range(iteration*self.sampleSize, iteration*self.sampleSize + self.sampleSize):
if self.layers == 1:
shiftType = 0
else:
shiftType = 1
Y1 = np.zeros(self.outputSize)
Y1[self.y[dataCaseNo]] = 1
Z = []
Z.append(np.dot(self.Weights[0], self.A[dataCaseNo]))
R = []
R.append(self.sigmoid(Z[0]+self.Biases[0], shiftType))
for i in range(1, self.layers):
if i == self.layers-1:
shiftType = 0
else:
shiftType = 1
Z.append(np.dot(self.Weights[i], R[i-1]))
R.append(self.sigmoid(Z[i]+self.Biases[i], shiftType))
C = np.sum((R[-1] - Y1)**2)
avg_cost += C
quadDifferential = 2 * (R[-1]-Y1)
for i in range(self.layers):
avg_deltaWeights[i] += self.integralWeightCost(R, Z, dataCaseNo, quadDifferential, i)
avg_deltaBiases[i] += self.integralBiasCost(Z, quadDifferential, i)
avg_cost = avg_cost/self.sampleSize
for i in range(self.layers):
self.Weights[i] = self.Weights[i] - avg_deltaWeights[i]/self.sampleSize
self.Biases[i] = self.Biases[i] - avg_deltaBiases[i]/self.sampleSize
print("Average cost: " + str(round(avg_cost, 4)))
print("\n" + "*"*25 + " " + str(loop+1) +" " + "*"*25 + "\n")
executionEndTime = round((time.time() - self.clock), 2)
print("Completed " + str(self.loops) + " rounds of " + str(self.sampleSize*self.iterations) + " samples (sampleSize: " + str(self.sampleSize) + "), " + " in " + str(executionEndTime) + " seconds..")
print("Layers: " + str(self.layers))
print("Middle layer nodes: " + str(self.layerSize))
print("Input amount: " + str(self.inputAmount))
amountVariables = 0
for i in range(self.layers):
amountVariables += self.Weights[i].size
amountVariables += self.Biases[i].size
print("Variables: " + str(amountVariables))
print("Output size: " + str(self.outputSize))
time.sleep(2)
def getAccuracy(self):
runs = 10000
correct = 0
print("Testing validation set accuracy over " + str(runs) + " samples...\n")
for i in range(runs):
if self.layers == 1:
shiftType = 0
else:
shiftType = 1
ran = i
Y1 = np.zeros(self.outputSize)
Y1[self.v2[ran]] = 1
Z = []
Z.append(np.dot(self.Weights[0], self.V[ran]))
R = []
R.append(self.sigmoid(Z[0]+self.Biases[0], shiftType))
for i in range(1, self.layers):
if i == self.layers-1:
shiftType = 0
else:
shiftType = 1
Z.append(np.dot(self.Weights[i], R[i-1]))
R.append(self.sigmoid(Z[i]+self.Biases[i], shiftType))
result = np.where(R[-1] == np.amax(R[-1]))
maxNum = result[0][0]
if int(self.v2[ran]) == int(maxNum):
correct += 1
accuracy = correct*100/runs
return accuracy
instance = mnistClass(784, 3, 16, 10, 2, 100)
#(input, layers, layer size, output, loops, sample subsize)
#input - amount of nodes in input data
#layers - amount of layers including last output layer but not first input layer
#layer size - amount of nodes in hidden layers
#output - amount of nodes in output layer
#loops - how many times to train through the entire data set
#sample subsize - what quantity of data samples to average the gradient on
I'm so glad to hear about new faces joining the field of ML (specifically DL),
That's quite an accomplishment what you said you've achieved so first of all salute.
Now as for your question, I'd suggest you take a step back and understand the concept of data exploration, and features extraction, and why those are important and how I suggest you do it is by exploring some kaggle tutorials about machine learning, trying to do some basic classification of data sets from there like the titanic data set etc...
https://www.kaggle.com/learn/overview
go for the "into to machine learning".
Best of luck!

Optimizing the A-Star (A*) Path Finding

I recently coded the A* pathfinding algorithm in Python. This is based on a coordinate system where each coordinate has a value which is its elevation and there is a maximum elevation difference (the difference between the elevation of 2 adjacent nodes) which decides whether we can move to that node or not.
How do I optimize this more?
Also, any suggestions to improve the code or if you see any feedback if I am going wrong anywhere are welcome!
I tried multiple heuristics like the distance formula but the one mentioned here seems to be working best for me. Correct me if I am wrong.
def astar(target_cord):
def find_neighbor_astar(current):
neighbor_mat = dict()
current_matrix_cord = matrix[current[0]][current[1]]
for x in neighbor_scope:
for y in neighbor_scope:
x_cord = current[0]+x
y_cord = current[1]+y
if(x != 0 or y != 0):
if((x_cord) >= 0 and x_cord < max_cols and y_cord >= 0 and y_cord < max_rows and abs(current_matrix_cord - matrix[x_cord][y_cord]) <= elevation_max):
if(x!=0 and y!=0):
neighbor_mat[x_cord, y_cord] = 14 + abs(current_matrix_cord - matrix[x_cord][y_cord])
else:
neighbor_mat[x_cord, y_cord] = 10 + abs(current_matrix_cord - matrix[x_cord][y_cord])
return neighbor_mat
def getheuristic(current, target_cord):
# return (int(abs(((((target_cord[0]-current[0])**2)+((target_cord[1]-current[1])**2))**0.5))*10))
dx = abs(current[0] - target_cord[0])
dy = abs(current[1] - target_cord[1])
return (dx + dy) + (2**0.5 - 2) * min(dx, dy)
parent_cord = {start_cord: (None, 0)}
frontier_nodes_heap = [(0, start_cord)]
frontier_nodes = dict()
frontier_nodes[start_cord] = 0
node_gofx = dict()
node_gofx[start_cord] = 0
explored_nodes = set()
while frontier_nodes_heap:
curr_node = heapq.heappop(frontier_nodes_heap)[1]
del frontier_nodes[curr_node]
curr_cost = node_gofx[curr_node]
if(curr_node == target_cord):
return tracepath_ucs(parent_cord, target_cord)
explored_nodes.add(curr_node)
for x, step_cost in find_neighbor_astar(curr_node).items():
gofx = curr_cost + step_cost
path_cost = gofx + getheuristic(x, target_cord)
if x in explored_nodes:
continue
if(x not in frontier_nodes):
if(x not in parent_cord):
parent_cord[x] = (curr_node, step_cost)
heapq.heappush(frontier_nodes_heap,(path_cost,x))
frontier_nodes[x] = path_cost
node_gofx[x] = gofx
elif(x in frontier_nodes and path_cost < frontier_nodes[x]):
frontier_nodes_heap[frontier_nodes_heap.index((frontier_nodes[x],x))] = (path_cost, x)
frontier_nodes[x] = path_cost
parent_cord[x] = (curr_node, step_cost)
node_gofx[x] = gofx
return []

input-output program doesn't output the correct integer in python 3

So i've designed a program that you write a football(soccer) stat and it prints you the number of the times you write it. Its been working but i have added the option to quit the function which caused me the follow promblems. If you havent write a stat at least once it would crash. So I fixed it but now when i try to run it, it starts to count but it counts 0 too, which it didnt before. Here is only one stat's as i figured it will be unappropriate to copy-paste over 1000 lines of code here:
pk_range_goal = range(0, 1000)
pk_list_goal = list(pk_range_goal)
pk_input = '''goal/saved/missed
'''
def body():
while True:
choice = input("")
first_number_pk_goal = pk_list_goal[0]
integer_pk_number_goal = int(first_number_pk_goal)
if choice == "pk":
good_bad_input_pk = input(pk_input)
if good_bad_input_pk == "goal":
pk_list_goal.remove(first_number_pk_goal)
integer_pk_number_goal = int(first_number_pk_goal)
string_number_pk_goal = str(first_number_pk_goal)
print("Penalty Kick goal(s): ", string_number_pk_goal)
elif choice == "q":
print("Are you sure that you want to finish the stat counting? (yes/no)")
quit_choice = input()
if quit_choice == "yes":
if integer_pk_number_goal >= 1:
print("Penalty Kick goal(s): ", string_number_pk_goal)
elif integer_pk_number_goal == 0:
print("Penalty Kick goal(s): 0")
else:
pass
break
else:
pass
body()
EDIT: I tried to make the range to start from 1 but then you would need to write it twice in order to go to 2
Complete
# -----> * INTRODUCTION * <-----
Commands = ''' STAT ------------> COMMAND
Penalty Kick -----> pk
-> Goal ----------> goal
-> Saved ---------> saved
-> Missed --------> missed
Free Kick --------> fk
Corner Kick ------> ck
Throw In ---------> ti
Cross ------------> cross
-> Good Delivery -> gd
-> Good Delivery -> pd
1 versus 1 ------> 1v1
-> Won ----------> w
-> Lost ---------> l
Shot ------------> shot
Header ----------> header
-> On Target ----> on target
-> Off Target ---> off target
Save ------------> save
To quit press q
'''
print(Commands)
# -----> * Penalty Kicks Variables * <-----
pk_range = range(0, 1000)
pk_list = list(pk_range)
pk_range_goal = range(0, 1000)
pk_list_goal = list(pk_range_goal)
pk_range_saved = range(0, 1000)
pk_list_saved = list(pk_range_saved)
pk_range_missed = range(0, 1000)
pk_list_missed = list(pk_range_missed)
pk_input = '''goal/saved/missed
'''
# -----> * Free Kicks Variables <-----
fk_range = range(0, 1000)
fk_list = list(fk_range)
fk_range_gd = range(0, 1000)
fk_list_gd = list(fk_range_gd)
fk_range_pd = range(0, 1000)
fk_list_pd = list(fk_range_pd)
fk_input = '''gd/pd
'''
# -----> * Corner Kicks Variables * <-----
ck_range = range(0, 1000)
ck_list = list(ck_range)
ck_range_gd = range(0, 1000)
ck_list_gd = list(ck_range_gd)
ck_range_pd = range(0, 1000)
ck_list_pd = list(ck_range_pd)
ck_input = '''gd/pd
'''
# -----> * Throw Ins Variables * <-----
ti_range = range(0, 1000)
ti_list = list(ti_range)
ti_range_gd = range(0, 1000)
ti_list_gd = list(ti_range_gd)
ti_range_pd = range(0, 1000)
ti_list_pd = list(ti_range_pd)
ti_input = '''gd/pd
'''
# -----> * Crosses Variables * <-----
crosses_range = range(0, 1000)
crosses_list = list(crosses_range)
crosses_range_gd = range(0, 1000)
crosses_list_gd = list(crosses_range_gd)
crosses_range_pd = range(0, 1000)
crosses_list_pd = list(crosses_range_pd)
crosses_input = '''gd/pd
'''
# -----> * 0 vs 0 Variables * <-----
v1_range = range(0, 1000)
v1_list = list(v1_range)
v1_range_w = range(0, 1000)
v1_list_w = list(v1_range_w)
v1_range_l = range(0, 1000)
v1_list_l = list(v1_range_l)
v1_input = '''w/l
'''
# -----> * Shots Variables * <-----
shots_range = range(0, 1000)
shots_list = list(shots_range)
shots_range_gd = range(0, 1000)
shots_list_gd = list(shots_range_gd)
shots_range_pd = range(0, 1000)
shots_list_pd = list(shots_range_pd)
shots_input = '''on target/off target
'''
# -----> * Headers Variables * <-----
headers_range = range(0, 1000)
headers_list = list(headers_range)
headers_range_gd = range(0, 1000)
headers_list_gd = list(headers_range_gd)
headers_range_pd = range(0, 1000)
headers_list_pd = list(headers_range_pd)
headers_input = '''on target/off target
'''
# -----> * Saves Variables * <-----
saves_range = range(1, 1000)
saves_list = list(saves_range)
# -----> * Long Pass Variables * <-----
long_passes_range = range(1, 1000)
long_passes_list = list(long_passes_range)
long_passes_range_first_touch = range(1, 1000)
long_passes_list_first_touch = list(long_passes_range_first_touch)
long_passes_range_second_touch = range(1, 1000)
long_passes_list_second_touch = list(long_passes_range_second_touch)
long_passes_input = '''first touch/second touch
'''
# -----> * Main Function * <-----
def body():
while True:
choice = input("")
# -----> * Penalty Kicks Goal Variables * <-----
# -----> * Penalty Kicks Missed Variables * <-----
first_number_pk_missed = pk_list_missed[0]
integer_pk_number_missed = int(first_number_pk_missed)
number_pk_missed = integer_pk_number_missed - 1
string_number_pk_missed = str(number_pk_missed)
# -----> * Penalty Kicks Saved Variables * <-----
first_number_pk_saved = pk_list_saved[0]
integer_pk_number_saved = int(first_number_pk_saved)
number_pk_saved = integer_pk_number_saved - 1
string_number_pk_saved = str(number_pk_saved)
# -----> * Total Penalty Kicks Variables * <-----
first_number_pk = pk_list[0]
integer_pk_number = int(first_number_pk)
number_pk = integer_pk_number - 1
string_number_pk = str(number_pk)
# -----> * Penalty Kicks Function * <-----
first_number_pk_goal = pk_list_goal[0]
integer_pk_number_goal = int(first_number_pk_goal)
if choice == "pk":
good_bad_input_pk = input(pk_input)
if good_bad_input_pk == "goal":
integer_pk_number_goal = 0
string_number_pk_goal = str(first_number_pk_goal)
pk_list_goal.remove(first_number_pk_goal)
print("Penalty Kick goal(s): ", string_number_pk_goal)
elif good_bad_input_pk == "saved":
print("Penalty Kick(s) saved: ", string_number_pk_saved)
elif good_bad_input_pk == "missed":
print("Penalty Kick(s) missed: ", string_number_pk_missed)
else:
pass
print("Penalty Kick(s) : ", string_number_pk)
# -----> * Free Kicks with Good Delivery Variables * <-----
first_number_fk_gd = fk_list_gd[0]
integer_fk_number_gd = int(first_number_fk_gd)
number_fk_gd = integer_fk_number_gd - 1
string_number_fk_gd = str(number_fk_gd)
# -----> * Free Kicks with Poor Delivery Variables * <-----
first_number_fk_pd = fk_list_pd[0]
integer_fk_number_pd = int(first_number_fk_pd)
number_fk_pd = integer_fk_number_pd - 1
string_number_fk_pd = str(number_fk_pd)
# -----> * Free Kicks Variables * <-----
first_number_pk = pk_list[0]
integer_pk_number = int(first_number_pk)
number_pk = integer_pk_number - 1
string_number_pk = str(number_pk)
# -----> * Free Kicks Function * <-----
if choice == "fk":
good_bad_input_fk = input(fk_input)
if good_bad_input_fk == "gd":
print("Free Kick(s) with a Good Delivery: ", string_number_fk_gd)
elif good_bad_input_fk == "pd":
print("Free Kick(s) with a Poor Delivery: ", string_number_fk_pd)
else:
pass
print("Free Kick(s)", string_number_fk)
# -----> * Corner Kick Variables * <-----
elif choice == "ck":
good_bad_input_ck = input(ck_input)
if good_bad_input_ck == "gd":
first_number_ck_gd = ck_list_gd[0]
ck_list_gd.remove(first_number_ck_gd)
number_ck_gd = ck_list_gd[0]
string_number_ck_gd = str(number_ck_gd)
print("Corner Kick(s) with a Good Delivery: ", string_number_ck_gd)
elif good_bad_input_ck == "pd":
first_number_ck_pd = ck_list_pd[0]
ck_list_pd.remove(first_number_ck_pd)
number_ck_pd = ck_list_pd[0]
string_number_ck_pd = str(number_ck_pd)
print("Corner Kick(s) with a Poor Delivery: ", string_number_ck_pd)
else:
pass
first_number_ck = ck_list[0]
ck_list.remove(first_number_ck)
number_ck = ck_list[0]
string_number_ck = str(number_ck)
print("Corner Kick(s): ", string_number_ck)
# -----> * Throw Ins Functions * <-----
elif choice == "ti":
good_bad_input_ti = input(ti_input)
if good_bad_input_ti == "gd":
first_number_ti_gd = ti_list_gd[0]
ti_list_gd.remove(first_number_ti_gd)
number_ti_gd = ti_list_gd[0]
string_number_ti_gd = str(number_ti_gd)
print("Throw In(s) with a Good Delivery: ", string_number_ti_gd)
elif good_bad_input_ti == "pd":
first_number_ti_pd = ti_list_pd[0]
ti_list_pd.remove(first_number_ti_pd)
number_ti_pd = ti_list_pd[0]
string_number_ti_pd = str(number_ti_pd)
print("Throw In(s) with a Poor Delivery: ", string_number_ti_pd)
else:
pass
first_number_ti = ti_list[0]
ti_list.remove(first_number_ti)
number_ti = ti_list[0]
string_number_ti = str(number_ti)
print("Throw In(s): ", string_number_ti)
# -----> * Crosses Function * <-----
elif choice == "cross":
good_bad_input_crosses = input(crosses_input)
if good_bad_input_crosses == "gd":
first_number_crosses_gd = crosses_list_gd[0]
crosses_list_gd.remove(first_number_crosses_gd)
number_crosses_gd = crosses_list_gd[0]
string_number_crosses_gd = str(number_crosses_gd)
print("Cross(es) with a Good Delivery: ", string_number_crosses_gd)
elif good_bad_input_crosses == "pd":
first_number_crosses_pd = crosses_list_pd[0]
crosses_list_pd.remove(first_number_crosses_pd)
number_crosses_pd = crosses_list_pd[0]
string_number_crosses_pd = str(number_crosses_pd)
print("Cross(es) with a Good Delivery: ", string_number_crosses_pd)
else:
pass
first_number_crosses = crosses_list[0]
crosses_list.remove(first_number_crosses)
number_crosses = crosses_list[0]
string_number_crosses = str(number_crosses)
print("Cross(es): ", string_number_crosses)
# -----> * 1 versus 1 Function * <-----
elif choice == "1v1":
good_bad_input_v1 = input(v1_input)
if good_bad_input_v1 == "w":
first_number_v1_w = v1_list_w[0]
v1_list_w.remove(first_number_v1_w)
number_v1_w = v1_list_w[0]
string_number_v1_w = str(number_v1_w)
print("Won 1vs1: ", string_number_v1_w)
elif good_bad_input_v1 == "l":
first_number_v1_l = v1_list_l[0]
v1_list_l.remove(first_number_v1_l)
number_v1_l = v1_list_l[0]
string_number_v1_l = str(number_v1_l)
print("Lost 1vs1: ", string_number_v1_l)
else:
pass
first_number_v1 = v1_list[0]
v1_list.remove(first_number_v1)
number_v1 = v1_list[0]
string_number_v1 = str(number_v1)
print("1vs1: ", string_number_v1)
# -----> * Shots Function * <-----
elif choice == "shot":
good_bad_input_shots = input(shots_input)
if good_bad_input_shots == "on target":
first_number_shots_gd = shots_list_gd[0]
shots_list_gd.remove(first_number_shots_gd)
number_shots_gd = shots_list_gd[0]
string_number_shots_gd = str(number_shots_gd)
print("Shot(s) on target: ", string_number_shots_gd)
elif good_bad_input_shots == "off target":
first_number_shots_pd = shots_list_pd[0]
shots_list_pd.remove(first_number_shots_pd)
number_shots_pd = shots_list_pd[0]
string_number_shots_pd = str(number_shots_pd)
print("Shot(s) off target: ", string_number_shots_pd)
else:
pass
first_number_shots = shots_list[0]
shots_list.remove(first_number_shots)
number_shots = shots_list[0]
string_number_shots = str(number_shots)
print("Shot(s): ", string_number_shots)
# -----> * Headers Function * <-----
elif choice == "header":
good_bad_input_headers = input(headers_input)
if good_bad_input_headers == "on target":
first_number_headers_gd = headers_list_gd[0]
headers_list_gd.remove(first_number_headers_gd)
number_headers_gd = headers_list_gd[0]
string_number_headers_gd = str(number_headers_gd)
print("Header(s) on target: ", string_number_headers_gd)
elif good_bad_input_headers == "off target":
first_number_headers_pd = headers_list_pd[0]
headers_list_pd.remove(first_number_headers_pd)
number_headers_pd = headers_list_pd[0]
string_number_headers_pd = str(number_headers_pd)
print("Header(s) off target: ", string_number_headers_pd)
else:
pass
first_number_headers = headers_list[0]
headers_list.remove(first_number_headers)
number_headers = headers_list[0]
string_number_headers = str(number_headers)
print("Header(s): ", string_number_headers)
# -----> * Long Passes * <-----
elif choice == "long pass":
good_bad_input_long_passes = input(long_passes_input)
first_number_long_passes = long_passes_list[0]
long_passes_list.remove(first_number_long_passes)
number_long_passes = long_passes_list[0]
string_number_long_passes = str(number_long_passes)
print("Long Pass(es): ", string_number_long_passes)
if good_bad_input_long_passes == "first touch":
first_number_long_passes_first_touch = long_passes_list_first_touch[0]
long_passes_list_first_touch.remove(first_number_long_passes_first_touch)
number_long_passes_first_touch = long_passes_list_first_touch[0]
string_number_long_passes_first_touch = str(number_long_passes_first_touch)
print("Long Pass(es) first touch: ", string_number_long_passes_first_touch)
elif good_bad_input_long_passes == "second touch":
first_number_long_passes_second_touch = long_passes_list_second_touch[0]
long_passes_list_second_touch.remove(first_number_long_passes_second_touch)
number_long_passes_second_touch = long_passes_list_second_touch[0]
string_number_long_passes_second_touch = str(number_long_passes_second_touch)
print("Long Pass(es) second touch: ", string_number_long_passes_second_touch)
else:
pass
# -----> * Saves * <-----
elif choice == "save":
first_number_save = saves_list[0]
saves_list.remove(first_number_save)
number_save = saves_list[0]
string_number_save = str(number_save)
print("Save(s)", string_number_save)
elif choice == "q":
print("Are you sure that you want to finish the stat counting? (yes/no)")
quit_choice = input()
if quit_choice == "yes":
# -----> * Penalty_Kicks_goal_Quit_Function * <-----
if integer_pk_number_goal >= 1:
print("Penalty Kick goal(s): ", string_number_pk_goal)
elif integer_pk_number_goal == 0:
print("Penalty Kick goal(s): 0")
# -----> * Penalty_Kicks_saved_Quit_Function * <-----
if integer_pk_number_saved >= 1:
print("Saved Penalty Kick(s): ", string_number_pk_saved)
elif integer_pk_number_saved == 0:
print("Saved Penalty Kick(s): 0")
# -----> * Penalty_Kicks_missed_Quit_Function * <-----
if integer_pk_number_missed >= 1:
print("Missed Penalty Kick(s): ", string_number_pk_missed)
elif integer_pk_number_missed == 0:
print("Missed Penalty Kick(s): 0")
# -----> * Penalty_Kicks_Quit_Function * <-----
if integer_pk_number >= 1:
print("Penalty Kick(s): ", string_number_pk)
elif integer_pk_number == 0:
print("Penalty Kick(s): 0")
else:
pass
# -----> * Free_Kicks_gd_Quit_Function * <-----
if integer_fk_number_gd >= 1:
print("Free Kick(s) with Good delivery: ", string_number_fk_gd)
elif integer_fk_number_gd == 0:
print("Free Kick(s) with Good delivery: 0")
# -----> * Free_Kicks_Quit_Function * <-----
if integer_fk_number_pd >= 1:
print("Free Kick(s) with Poor delivery: ", string_number_fk_pd)
elif integer_fk_number_pd == 0:
print("Free Kick(s) with Poor delivery: 0")
# -----> * Free_Kicks_Quit_Function * <-----
if integer_fk_number >= 1:
print("Free Kick(s): ", string_number_fk)
elif integer_fk_number == 0:
print("Free Kick(s): 0")
break
elif quit_choice == "no":
pass
else:
pass
body()
In the complete source code i havent added the quit functionality in all stats as i did in the minimal as it doesnt work and it will be a waste of time. Also, you may find other bugs.
try to define the variable as False before they get called and then true

Categories

Resources