Adding neurons to Adam optimizer state in Pytorch - python

I've posted the following to the Pytorch discussion board too. I'm trying to keep the learning-rates per parameter for the already existing parameters when adding more neurons (to existing layers, not new layers) to a network. I’ve written the following class which allows me to add neurons to hidden layers during training:
import torch
import torch.nn as nn
class DQN(nn.Module):
def __init__(self, num_inputs, hidden, num_actions, non_linearity):
super(DQN, self).__init__()
self.num_inputs = num_inputs
self.hidden = hidden
self.num_actions = num_actions
self.non_linearity = non_linearity
self.layers = nn.ModuleList()
self.layers.append(nn.Linear(num_inputs, self.hidden[0]))
previous = self.hidden[0]
for hidden_layer_size in self.hidden[1:]:
self.layers.append(nn.Linear(previous, hidden_layer_size))
previous = hidden_layer_size
self.layers.append(nn.Linear(previous, num_actions))
def forward(self, x):
for i in range(len(self.layers) - 1):
x = self.non_linearity(self.layers[i](x))
return self.layers[-1](x)
def increase_capacity(self, increment):
for i in range(len(self.hidden)):
self.hidden[i] += increment[i]
bias = self.layers[0].bias.data
weight = self.layers[0].weight.data
self.layers[0] = nn.Linear(self.num_inputs, self.hidden[0])
if increment[0]>0:
self.layers[0].weight.data[0:-increment[0],:] = weight
self.layers[0].bias.data[0:-increment[0]] = bias
else:
self.layers[0].weight.data[0:,:] = weight
self.layers[0].weight.data = bias
for i in range(1, len(self.layers) - 1):
bias = self.layers[i].bias.data
weight = self.layers[i].weight.data
self.layers[i] = nn.Linear(self.hidden[i-1], self.hidden[i])
if increment[i] > 0:
if increment[i-1] >0:
self.layers[i].bias.data[0:-increment[i]] = bias
self.layers[i].weight.data[0:-increment[i],0:-increment[i-1]] = weight
else:
self.layers[i].bias.data[0:-increment[i]] = bias
self.layers[i].weight.data[0:-increment[i],0:] = weight
else:
if increment[i-1] >0:
self.layers[i].bias.data = bias
self.layers[i].weight.data[0:,0:-increment[i-1]] = weight
else:
self.layers[i].bias.data = bias
self.layers[i].weight.data[0:,0:] = weight
bias = self.layers[-1].bias.data
weight = self.layers[-1].weight.data
self.layers[-1] = nn.Linear(self.hidden[-1], self.num_actions)
if increment[-1] >0:
self.layers[-1].bias.data = bias
self.layers[-1].weight.data[:,0:-increment[-1]] = weight
else:
self.layers[-1].bias.data = bias
self.layers[-1].weight.data[:,0:] = weight
def act(self, state, epsilon, mask):
if np.random.rand() > epsilon:
state = torch.tensor([state], dtype=torch.float32, device=device)
mask = torch.tensor([mask], dtype=torch.float32, device=device)
q_values = self.forward(state) + mask
action = q_values.max(1)[1].view(1, 1).item()
else:
action = np.random.randint(self.num_actions)
return action
Now I’ve written a little sanity check (whether it leads to sanity is questionable at this point): a network with 2 layers with both 1 neuron should fail to learn the x-or function, whereas a network where 4 neurons have been added should. If I initialise a new optimiser this indeed works. The optimiser I use is Adam, which keeps track of learning-rates per parameter. I’d like to keep the learning-rates of Adam for the weights and biases that already existed before I add additional neurons. The following is my failed attempt to doing so:
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
# Credits to Alvations
def generate_zero():
return random.uniform(0, 49) / 100
def generate_one():
return random.uniform(50, 100) / 100
def generate_xor_XY(num_data_points):
Xs, Ys = [], []
for _ in range(num_data_points):
# xor(0, 0) -> 0
Xs.append([generate_zero(), generate_zero()]); Ys.append([0])
# xor(1, 0) -> 1
Xs.append([generate_one(), generate_zero()]); Ys.append([1])
# xor(0, 1) -> 1
Xs.append([generate_zero(), generate_one()]); Ys.append([1])
# xor(1, 1) -> 0
Xs.append([generate_one(), generate_one()]); Ys.append([0])
return Xs, Ys
# Initialisation
network = DQN(2,[1,1],1,F.relu)
# optimizer = optim.Adam(network.parameters(), amsgrad=False)
optimizer = optim.Adam(network.parameters(), amsgrad=True)
criterion = nn.MSELoss()
# Train 50000 steps to show 1 neuron cannot solve x-or task
for i in range(50000):
optimizer.zero_grad()
Xs, Ys = generate_xor_XY(1)
Xs = torch.tensor(Xs)
Ys = torch.tensor(Ys, dtype=torch.float)
prediction = network(Xs)
loss = criterion(prediction, Ys)
loss.backward()
optimizer.step()
print(network(torch.tensor([[1,0],[0,1],[1,1],[0,0]], dtype=torch.float)))
print(loss)
# Add 5 neurons to first layer
capacity = [4,4]
network.increase_capacity(capacity)
# Uncomment the following line and comment the lines following it for normal initialisation.
# optimizer = optim.Adam(network.parameters(), amsgrad=True)
nw_param = [p for p in network.parameters()]
new_param_group = []
layer_idx = 0
for idx, group in enumerate(optimizer.param_groups):
for idx_p, p in enumerate(group['params']):
# Save previous information
prev_grad = p.grad
old_p = copy.deepcopy(p)
old_state = copy.copy(optimizer.state[p])
old_step = old_state['step']
old_exp_avg = old_state['exp_avg']
old_exp_avg_sq = old_state['exp_avg_sq']
old_max_exp_avg_sq = old_state['max_exp_avg_sq']
# Remove old parameter from state
optimizer.state.pop(p)
# Weights
if p.dim()>1:
p = nn.Parameter(nw_param[layer_idx])
p.grad = torch.zeros_like(p)
new_exp_avg = torch.torch.zeros_like(p)
new_exp_avg_sq = torch.torch.zeros_like(p)
new_max_exp_avg_sq = torch.torch.zeros_like(p)
p.grad[0:prev_grad.size(0),0:prev_grad.size(1)] = prev_grad
optimizer.state[p]['step'] = old_step
optimizer.state[p]['exp_avg'] = new_exp_avg
optimizer.state[p]['exp_avg'][0:prev_grad.size(0),0:prev_grad.size(1)] = old_exp_avg
optimizer.state[p]['exp_avg_sq'] = new_exp_avg_sq
optimizer.state[p]['exp_avg_sq'][0:prev_grad.size(0),0:prev_grad.size(1)] = old_exp_avg_sq
optimizer.state[p]['max_exp_avg_sq'] = new_max_exp_avg_sq
optimizer.state[p]['max_exp_avg_sq'][0:prev_grad.size(0),0:prev_grad.size(1)] = old_max_exp_avg_sq
new_param_group.append(p)
# Biases
else:
p = nn.Parameter(nw_param[layer_idx])
p.grad = torch.zeros_like(p)
new_exp_avg = torch.zeros_like(p)
new_exp_avg_sq = torch.zeros_like(p)
new_max_exp_avg_sq = torch.zeros_like(p)
p.grad[0:prev_grad.size(0)] = prev_grad
optimizer.state[p]['step'] = old_step
optimizer.state[p]['exp_avg'] = new_exp_avg
optimizer.state[p]['exp_avg'][0:prev_grad.size(0)] = old_exp_avg
optimizer.state[p]['exp_avg_sq'] = new_exp_avg_sq
optimizer.state[p]['exp_avg_sq'][0:prev_grad.size(0)] = old_exp_avg_sq
optimizer.state[p]['max_exp_avg_sq'] = new_max_exp_avg_sq
optimizer.state[p]['max_exp_avg_sq'][0:prev_grad.size(0)] = old_max_exp_avg_sq
new_param_group.append(p)
layer_idx += 1
optimizer.param_groups[0]['params'] = new_param_group
print(network)
# Train 50000 steps to show by adding neurons the task can be solved
for i in range(50000):
optimizer.zero_grad()
Xs, Ys = generate_xor_XY(1)
Xs = torch.tensor(Xs)
Ys = torch.tensor(Ys, dtype=torch.float)
prediction = network(Xs)
loss = criterion(prediction, Ys)
loss.backward()
optimizer.step()
print(network(torch.tensor([[1,0],[0,1],[1,1],[0,0]], dtype=torch.float)))
print(loss)
I’m trying to get the same optimizer state, but with additional parameters for the added neurons. This seems like a convoluted way of doing it (and it doesn’t work:p). Does anyone know of an (easier) way to do this or see where I’m going wrong?

Related

Why isn't my CartPole code working? I'm using a neural network with a loss function based off of the Bellman equation

I've been trying to use a Q-Learning based approach to CartPole, but with integration of neural networks.
Here's my code:
import gymnasium as gym
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from gym.utils.play import play
env = gym.make("CartPole-v1")
env2 = gym.make("CartPole-v1",render_mode="human")
inputs = tf.keras.Input(shape=(4,))
x = tf.keras.layers.Dense(5, activation=tf.nn.relu)(inputs)
x = tf.keras.layers.Dense(5, activation=tf.nn.relu)(x)
outputs = tf.keras.layers.Dense(2, activation=tf.nn.softmax)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
epsilon = 0.5 # Chance of taking a random action
discount = 0.12
learningRate = 0.8
batchSize = 10 # Number of episodes to train on at once
epCount = 1000 # Number of episodes
decayMult = (0.001/epsilon)**(1/epCount)
disMult = (0.395/discount)**(2/epCount)
mode = 'S' # Switch between Q-Learning and SARSA-Learning
optimizer = tf.optimizers.SGD(learning_rate=learningRate)
mse_loss = tf.keras.losses.MeanSquaredError()
def getNextAction(state):
if np.random.random() > epsilon:
return tf.argmax(model(state),axis=-1).numpy()[0]
else:
return np.random.randint(2)
def customLoss(state,state2,reward,action):
oldQ = model(state)[0][action]
newQ = reward + discount*(tf.reduce_max(model(state2),axis=-1))
mse = mse_loss(tf.reshape(newQ,[1]),tf.reshape(oldQ,[1]))
return tf.keras.backend.mean(mse)
'''def updateS(state,state2,reward,action,action2):
oldSARSA = q_values[state,action]
return oldSARSA + learningRate*(reward + discount*(q_values[state2,action2]) - oldSARSA)'''
rewardArr = [] # Array of rewards
lossArr = [] # Array of losses
epArr = [] # Array of episode numbers
for _ in range(epCount):
terminated = False
observation, info = env.reset() # Initializes/resets environment, initializes observation and info values with base values
rewSum = 0
lossSum = 0
observation = tf.convert_to_tensor(observation.reshape((1,4)))
while(terminated == False):
state = observation
with tf.GradientTape() as tape:
actionIndex = getNextAction(observation)
observation, reward, terminated, truncated, info = env.step(actionIndex)
observation = tf.convert_to_tensor(observation.reshape((1,4)))
loss = customLoss(state,observation,reward,actionIndex)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
rewSum += reward
lossSum += loss.numpy()
lossArr.append(lossSum)
rewardArr.append(rewSum)
epArr.append(_+1)
#if _ <= epCount/2:
#discount*=disMult
print(f"\rEpisode: {_+1}",end="")
'''if (_+1) % checkWait == 0:
print(f"Score: {rewardAvg/checkWait}\nLoss: {lossAvg/checkWait}\nEpisode: {_+1}")
print( "=========================")
rewardAvg = 0
lossAvg = 0'''
plt.subplot(121)
plt.plot(rewardArr,'r-',label='score',linewidth=1)
plt.xlabel('Episode')
plt.legend()
plt.subplot(122)
plt.plot(lossArr,'b-',label='loss',linewidth=1)
plt.xlabel('Episode')
plt.legend()
plt.show()
#visualization
terminated = False
observation, info = env2.reset()
observation = tf.convert_to_tensor(observation.reshape((1,4)))
while(terminated == False):
actionIndex = tf.argmax(model(observation),axis=-1).numpy()[0]
observation, reward, terminated, truncated, info = env2.step(actionIndex)
observation = tf.convert_to_tensor(observation.reshape((1,4)))
I've tried changing discount value over time, which resulted in an increasing loss the higher my discount value.
I've tried adding more layers and/or increasing layer dimensions, which didn't seem to change anything.

Why does Dropout deteriorates my model accuracy?

The Code below gives about 95 % accuracy if I do not use dropout in training.
The accuracy drops to 11 % if I use dropout.
The network is built using Numpy.
I have used a class Neural Networks which contains many layer objects.
The last layer has sigmoid activation and the rest have Relu.
The code is:
import numpy as np
import idx2numpy as idx
import matplotlib.pyplot as plt
np.random.seed(0)
img = r"C:\Users\Aaditya\OneDrive\Documents\ML\train-image"
lbl = r'C:\Users\Aaditya\OneDrive\Documents\ML\train-labels-idx1-ubyte'
t_lbl = r'C:\Users\Aaditya\OneDrive\Documents\ML\t10k-labels.idx1-ubyte'
t_img = r'C:\Users\Aaditya\OneDrive\Documents\ML\t10k-images.idx3-ubyte'
image = idx.convert_from_file(img)
iput = np.reshape(image, (60000,784))/255
otput = np.eye(10)[idx.convert_from_file(lbl)]
test_image = idx.convert_from_file(t_img)
test_input = np.reshape(test_image, (10000,784))/255
test_output = idx.convert_from_file(t_lbl)
def sigmoid(x):
sigmoid = 1/(1+ np.exp(-x))
return sigmoid
def tanh(x):
return np.tanh(x)
def relu(x):
return np.where(x>0,x,0)
def reluprime(x):
return (x>0).astype(x.dtype)
def sigmoid_prime(x):
return sigmoid(x)*(1-sigmoid(x))
def tanh_prime(x):
return 1 - tanh(x)**2
class Layer_Dense:
def __init__(self,n_inputs,n_neurons,activation="sigmoid",keep_prob=1):
self.n_neurons=n_neurons
if activation == "sigmoid":
self.activation = sigmoid
self.a_prime = sigmoid_prime
elif activation == "tanh":
self.activation = tanh
self.a_prime = tanh_prime
else :
self.activation = relu
self.a_prime = reluprime
self.keep_prob = keep_prob
self.weights = np.random.randn(n_inputs ,n_neurons)*0.1
self.biases = np.random.randn(1,n_neurons)*0.1
def cal_output(self,input,train=False):
output = np.array(np.dot(input,self.weights) + self.biases,dtype="float128")
if train == True:
D = np.random.randn(1,self.n_neurons)
self.D = (D>self.keep_prob).astype(int)
output = output * self.D
return output
def forward(self,input):
return self.activation(self.cal_output(input))
def back_propagate(self,delta,ap,lr=1,keep_prob=1):
dz = delta
self.weights -= 0.001*lr*(np.dot(ap.T,dz)*self.D)
self.biases -= 0.001*lr*(np.sum(dz,axis=0,keepdims=True)*self.D)
return np.multiply(np.dot(dz,self.weights.T),(1-ap**2))
class Neural_Network:
def __init__(self,input,output):
self.input=input
self.output=output
self.layers = []
def Add_layer(self,n_neurons,activation="relu",keepprob=1):
if len(self.layers) != 0:
newL = Layer_Dense(self.layers[-1].n_neurons,n_neurons,activation,keep_prob=keepprob)
else:
newL = Layer_Dense(self.input.shape[1],n_neurons,activation,keep_prob=keepprob)
self.layers.append(newL)
def predict(self,input):
output = input
for layer in self.layers:
output = layer.forward(output)
return output
def cal_zs(self,input):
self.activations = []
self.activations.append(input)
output = input
for layer in self.layers:
z = layer.cal_output(output,train=True)
activation = layer.activation(z)
self.activations.append(activation)
output = activation
def train(self,input=None,output=None,lr=10):
if input is None:
input=self.input
output=self.output
if len(input)>1000:
indices = np.arange(input.shape[0])
np.random.shuffle(indices)
input = input[indices]
output = output[indices]
for _ in range(100):
self.lr = lr
for i in range(int(len(input)/100)):
self.lr *=0.99
self.train(input[i*100:i*100+100],output[i*100:i*100+100],self.lr)
return
self.cal_zs(input)
for i in range(1,len(self.layers)+1):
if i==1:
delta = self.activations[-1] - output
self.delta = self.layers[-1].back_propagate(delta,self.activations[-2],lr)
else:
self.delta = self.layers[-i].back_propagate(self.delta,self.activations[-i-1],lr)
def MSE(self):
predict = self.predict(self.input)
error = (predict - self.output)**2
mse = sum(sum(error))
print(mse)
def Logloss(self):
predict = self.predict(self.input)
error = np.multiply(self.output,np.log(predict)) + np.multiply(1-self.output,np.log(1-predict))
logloss = -1*sum(sum(error))
print(logloss)
def accuracy(self):
predict = self.predict(test_input)
prediction = np.argmax(predict,axis=1)
correct = np.mean(prediction == test_output)
print(correct*100)
# def train(self,input,output):
model = Neural_Network(iput,otput)
# model.Add_layer(4)
model.Add_layer(64)
model.Add_layer(16)
model.Add_layer(10,"sigmoid")
lrc= 6
for _ in range(10):
model.accuracy()
model.Logloss()
model.train(lr=lrc)
model.accuracy()
I have used MNIST database the link is THIS
One of the reason can be that you might be dropping too much neurons. In below code
D = np.random.randn(1,self.n_neurons)
self.D = (D>self.keep_prob).astype(int)
Matrix generated in first line might contain many values which are less then zero. Because of that when comparing it with self.keep_prob (which has value 1) lot of neurons are getting dropped
Please try with one change
self.D = (D < self.keep_prob).astype(int)
There could be various reasons for that. One was specified by #anuragal.
Basically dropout is used to reduce overfitting and to help the network correct errors. But when you use dropout before your final layer, it could be that the network is unable to correct itself, thus leading to a lower accuracy
Another reason could be that I see your network is small. Usually, shallow networks aren't benefitted by dropouts

How can I save DDPG model?

I try to save the model using the saver method (I use the save function in the DDPG class to save), but when restoring the model, the result is far from the one I saved (I save the model when the episodic award is zero, the restor method in the code is commented out ) My code is below with all the features. I use Python 3.7, gym 0.16.0 and TensorFlow version 1.13.1
import tensorflow as tf
import numpy as np
import gym
epsiode_steps = 500
# learning rate for actor
lr_a = 0.001
# learning rate for critic
lr_c = 0.002
gamma = 0.9
alpha = 0.01
memory = 10000
batch_size = 32
render = True
class DDPG(object):
def __init__(self, no_of_actions, no_of_states, a_bound, ):
self.memory = np.zeros((memory, no_of_states * 2 + no_of_actions + 1), dtype=np.float32)
# initialize pointer to point to our experience buffer
self.pointer = 0
self.sess = tf.Session()
# initialize the variance for OU process for exploring policies
self.noise_variance = 3.0
self.no_of_actions, self.no_of_states, self.a_bound = no_of_actions, no_of_states, a_bound,
self.state = tf.placeholder(tf.float32, [None, no_of_states], 's')
self.next_state = tf.placeholder(tf.float32, [None, no_of_states], 's_')
self.reward = tf.placeholder(tf.float32, [None, 1], 'r')
with tf.variable_scope('Actor'):
self.a = self.build_actor_network(self.state, scope='eval', trainable=True)
a_ = self.build_actor_network(self.next_state, scope='target', trainable=False)
with tf.variable_scope('Critic'):
q = self.build_crtic_network(self.state, self.a, scope='eval', trainable=True)
q_ = self.build_crtic_network(self.next_state, a_, scope='target', trainable=False)
self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')
self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')
self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')
self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')
# update target value
self.soft_replace = [
[tf.assign(at, (1 - alpha) * at + alpha * ae), tf.assign(ct, (1 - alpha) * ct + alpha * ce)]
for at, ae, ct, ce in zip(self.at_params, self.ae_params, self.ct_params, self.ce_params)]
q_target = self.reward + gamma * q_
# compute TD error i.e actual - predicted values
td_error = tf.losses.mean_squared_error(labels=(self.reward + gamma * q_), predictions=q)
# train the critic network with adam optimizer
self.ctrain = tf.train.AdamOptimizer(lr_c).minimize(td_error, name="adam-ink", var_list=self.ce_params)
a_loss = - tf.reduce_mean(q)
# train the actor network with adam optimizer for minimizing the loss
self.atrain = tf.train.AdamOptimizer(lr_a).minimize(a_loss, var_list=self.ae_params)
tf.summary.FileWriter("logs2", self.sess.graph)
# initialize all variables
self.sess.run(tf.global_variables_initializer())
# saver
self.saver = tf.train.Saver()
# self.saver.restore(self.sess, "Pendulum/nn.ckpt")
def choose_action(self, s):
a = self.sess.run(self.a, {self.state: s[np.newaxis, :]})[0]
a = np.clip(np.random.normal(a, self.noise_variance), -2, 2)
return a
def learn(self):
# soft target replacement
self.sess.run(self.soft_replace)
indices = np.random.choice(memory, size=batch_size)
batch_transition = self.memory[indices, :]
batch_states = batch_transition[:, :self.no_of_states]
batch_actions = batch_transition[:, self.no_of_states: self.no_of_states + self.no_of_actions]
batch_rewards = batch_transition[:, -self.no_of_states - 1: -self.no_of_states]
batch_next_state = batch_transition[:, -self.no_of_states:]
self.sess.run(self.atrain, {self.state: batch_states})
self.sess.run(self.ctrain, {self.state: batch_states, self.a: batch_actions, self.reward: batch_rewards,
self.next_state: batch_next_state})
def store_transition(self, s, a, r, s_):
trans = np.hstack((s, a, [r], s_))
index = self.pointer % memory
self.memory[index, :] = trans
self.pointer += 1
if self.pointer > memory:
self.noise_variance *= 0.99995
self.learn()
def build_actor_network(self, s, scope, trainable):
# Actor DPG
with tf.variable_scope(scope):
l1 = tf.layers.dense(s, 30, activation=tf.nn.tanh, name='l1', trainable=trainable)
a = tf.layers.dense(l1, self.no_of_actions, activation=tf.nn.tanh, name='a', trainable=trainable)
return tf.multiply(a, self.a_bound, name="scaled_a")
def build_crtic_network(self, s, a, scope, trainable):
with tf.variable_scope(scope):
n_l1 = 30
w1_s = tf.get_variable('w1_s', [self.no_of_states, n_l1], trainable=trainable)
w1_a = tf.get_variable('w1_a', [self.no_of_actions, n_l1], trainable=trainable)
b1 = tf.get_variable('b1', [1, n_l1], trainable=trainable)
net = tf.nn.tanh(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
q = tf.layers.dense(net, 1, trainable=trainable)
return q
def save(self):
self.saver.save(self.sess, "Pendulum/nn.ckpt")
env = gym.make("Pendulum-v0")
env = env.unwrapped
env.seed(1)
no_of_states = env.observation_space.shape[0]
no_of_actions = env.action_space.shape[0]
a_bound = env.action_space.high
ddpg = DDPG(no_of_actions, no_of_states, a_bound)
total_reward = []
# set the number of episodes
no_of_episodes = 300
for i in range(no_of_episodes):
# initialize the environment
s = env.reset()
ep_reward = 0
for j in range(epsiode_steps):
env.render()
# select action by adding noise through OU process
a = ddpg.choose_action(s)
# peform the action and move to the next state s
s_, r, done, info = env.step(a)
# store the the transition to our experience buffer
# sample some minibatch of experience and train the network
ddpg.store_transition(s, a, r, s_)
# update current state as next state
s = s_
# add episodic rewards
ep_reward += r
if int(ep_reward) == 0 and i > 150:
ddpg.save()
print("save")
quit()
if j == epsiode_steps - 1:
total_reward.append(ep_reward)
print('Episode:', i, ' Reward: %i' % int(ep_reward))
break
I solved this problem completely by rewriting the code and adding the learning function in a separate session

Pytorch: Visualize model while training

I am training a neural network by regression but it is predicting a constant value during testing. Which is why I want to visualize the weights of the neural network change during training and see the weights change dynamically in the jupyter notebook.
Currently, my model looks like this:
import torch
from torch import nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.inp = nn.Linear(2, 40)
self.act1 = nn.Tanh()
self.h1 = nn.Linear(40, 40)
self.act2 = nn.Tanh()
self.h2 = nn.Linear(40, 2)
self.act3 = nn.Tanh()
#self.h3 = nn.Linear(20, 20)
#self.act4=nn.Tanh()
self.h4 = nn.Linear(2, 1)
def forward_one_pt(self, x):
out = self.inp(x)
out = self.act1(out)
out = self.h1(out)
out = self.act2(out)
out = self.h2(out)
out = self.act3(out)
#out = self.h3(out)
#out = self.act4(out)
out = self.h4(out)
return out
def forward(self, config):
E = torch.zeros([config.shape[0], 1])
for i in range(config.shape[0]):
E[i] = self.forward_one_pt(config[i])
# print("config[",i,"] = ",config[i],"E[",i,"] = ",E[i])
return torch.sum(E, 0)
and my main function looks like this:
def main() :
learning_rate = 0.5
n_pts = 1000
t_pts = 100
epochs = 15
coords,E = load_data(n_pts,t_pts)
#generating my data to NN
G = get_symm(coords,save,load_symmetry,symmtery_pickle_file,eeta1,eeta2,Rs,ex,lambdaa,zeta,boxl,Rc,pi,E,scale)
net = Net()
if(cuda_flag):
net.cuda()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
net_trained = train(save,text_output,epochs,n_pts,G,E,net,t_pts,optimizer,criterion,out,cuda_flag)
test(save,n_pts,t_pts,G,E,net_trained,out,criterion,cuda_flag)
torch.save(net,save_model)
any tutorials or answers would be helpful
You can use model.state_dict() to see if your weights are updating across epochs:
old_state_dict = {}
for key in model.state_dict():
old_state_dict[key] = model.state_dict()[key].clone()
output = model(input)
new_state_dict = {}
for key in model.state_dict():
new_state_dict[key] = model.state_dict()[key].clone()
for key in old_state_dict:
if not (old_state_dict[key] == new_state_dict[key]).all():
print('Diff in {}'.format(key))
else:
print('NO Diff in {}'.format(key))
On a side note, you can vectorize your forward function instead of looping over it. Following would do the same job as your original forward function but much faster:
def forward(self, config):
out= self.forward_one_pt(config)
return torch.sum(out, 0)

Machine Learning reward artificially capping

So when I run this, it works perfectly, however, for some reason the reward caps at 200. I'm not sure what could be causing this. I'm new to machine learning and this is my first project, so sorry if I am missing something stupid.I hypothesize that done is triggering before I want it too, but playing with that hasn't led to anything. Thanks so much.
import gym
import tensorflow as tf
import numpy as np
import os
import sys
env = gym.make('CartPole-v0')
discount_rate=.95
# TODO Build the policy gradient neural network
class Agent:
def __init__(self, num_actions, state_size):
initializer = tf.contrib.layers.xavier_initializer()
self.input_layer = tf.placeholder(dtype=tf.float32, shape=[None, state_size])
# Neural net starts here
hidden_layer = tf.layers.dense(self.input_layer, 8, activation=tf.nn.relu, kernel_initializer=initializer)
hidden_layer_2 = tf.layers.dense(hidden_layer, 8, activation=tf.nn.relu, kernel_initializer=initializer)
# Output of neural net
out = tf.layers.dense(hidden_layer_2, num_actions, activation=None)
self.outputs = tf.nn.softmax(out)
self.choice = tf.argmax(self.outputs, axis=1)
# Training Procedure
self.rewards = tf.placeholder(shape=[None, ], dtype=tf.float32)
self.actions = tf.placeholder(shape=[None, ], dtype=tf.int32)
one_hot_actions = tf.one_hot(self.actions, num_actions)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=out, labels=one_hot_actions)
self.loss = tf.reduce_mean(cross_entropy * self.rewards)
self.gradients = tf.gradients(self.loss, tf.trainable_variables())
# Create a placeholder list for gradients
self.gradients_to_apply = []
for index, variable in enumerate(tf.trainable_variables()):
gradient_placeholder = tf.placeholder(tf.float32)
self.gradients_to_apply.append(gradient_placeholder)
# Create the operation to update gradients with the gradients placeholder.
optimizer = tf.train.AdamOptimizer(learning_rate=1e-2)
self.update_gradients =
optimizer.apply_gradients(zip(self.gradients_to_apply, tf.trainable_variables()))
def discount_normalize_rewards(rewards):
discounted_rewards = np.zeros_like(rewards)
total_rewards = 0
for i in reversed(range(len(rewards))):
total_rewards = total_rewards * discount_rate + rewards[i]
discounted_rewards[i] = total_rewards
discounted_rewards -= np.mean(discounted_rewards)
discounted_rewards /= np.std(discounted_rewards)
return discounted_rewards
#initialize the training loop
tf.reset_default_graph()
# Modify these to match shape of actions and states in your environment
num_actions = 2
state_size = 4
path = "./cartpole-pg/"
training_episodes = 1000
max_steps_per_episode = 20000
episode_batch_size = 5
agent = Agent(num_actions, state_size)
init = tf.global_variables_initializer()
saver = tf.train.Saver(max_to_keep=2)
if not os.path.exists(path):
os.makedirs(path)
with tf.Session() as sess:
sess.run(init)
total_episode_rewards = []
# Create a buffer of 0'd gradients
gradient_buffer = sess.run(tf.trainable_variables())
for index, gradient in enumerate(gradient_buffer):
gradient_buffer[index] = gradient * 0
for episode in range(training_episodes):
state = env.reset()
episode_history = []
episode_rewards = 0
for step in range(max_steps_per_episode):
if episode % 100 == 0:
env.render()
# Get weights for each action
action_probabilities = sess.run(agent.outputs, feed_dict={agent.input_layer: [state]})
action_choice = np.random.choice(range(num_actions), p=action_probabilities[0])
state_next, reward, done, _ = env.step(action_choice)
episode_history.append([state, action_choice, reward, state_next])
state = state_next
episode_rewards += reward
if done:
total_episode_rewards.append(episode_rewards)
episode_history = np.array(episode_history)
episode_history[:,2] = discount_normalize_rewards(episode_history[:,2])
ep_gradients = sess.run(agent.gradients, feed_dict={agent.input_layer: np.vstack(episode_history[:, 0]),
agent.actions: episode_history[:, 1],
agent.rewards: episode_history[:, 2]})
# add the gradients to the grad buffer:
for index, gradient in enumerate(ep_gradients):
gradient_buffer[index] += gradient
break
if episode % episode_batch_size == 0:
feed_dict_gradients = dict(zip(agent.gradients_to_apply, gradient_buffer))
sess.run(agent.update_gradients, feed_dict=feed_dict_gradients)
for index, gradient in enumerate(gradient_buffer):
gradient_buffer[index] = gradient * 0
if episode % 1 == 0:
saver.save(sess, path + "pg-checkpoint", episode)
print("Reward: " + str(total_episode_rewards[-1:]))
env.close()
Episodes for Cartpole terminate when the pole falls and at 200 successful steps. See the max_episode_steps in the linked file if you want to change this. The reason there is a 200 step max is to make evaluating trials easier (ie you always get episode ends so you can evaluate episode stats) and so that the environment doesn't get stuck in a never ending trial.
register(
id='CartPole-v0',
entry_point='gym.envs.classic_control:CartPoleEnv',
max_episode_steps=200,
reward_threshold=195.0,)

Categories

Resources