Cartpole-v0 loss increasing using DQN - python

Hi I'm trying to train a DQN to solve gym's Cartpole problem.
For some reason the Loss looks like this (orange line). Can y'all take a look at my code and help with this? I've played around with the hyperparameters a decent bit so I don't think they're the issue here.
class DQN(nn.Module):
def __init__(self, input_dim, output_dim):
super(DQN, self).__init__()
self.linear1 = nn.Linear(input_dim, 16)
self.linear2 = nn.Linear(16, 32)
self.linear3 = nn.Linear(32, 32)
self.linear4 = nn.Linear(32, output_dim)
def forward(self, x):
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = F.relu(self.linear3(x))
return self.linear4(x)
final_epsilon = 0.05
initial_epsilon = 1
epsilon_decay = 5000
global steps_done
steps_done = 0
def select_action(state):
global steps_done
sample = random.random()
eps_threshold = final_epsilon + (initial_epsilon - final_epsilon) * \
math.exp(-1. * steps_done / epsilon_decay)
if sample > eps_threshold:
with torch.no_grad():
state = torch.Tensor(state)
steps_done += 1
q_calc = model(state)
node_activated = int(torch.argmax(q_calc))
return node_activated
else:
node_activated = random.randint(0,1)
steps_done += 1
return node_activated
class ReplayMemory(object): # Stores [state, reward, action, next_state, done]
def __init__(self, capacity):
self.capacity = capacity
self.memory = [[],[],[],[],[]]
def push(self, data):
"""Saves a transition."""
for idx, point in enumerate(data):
#print("Col {} appended {}".format(idx, point))
self.memory[idx].append(point)
def sample(self, batch_size):
rows = random.sample(range(0, len(self.memory[0])), batch_size)
experiences = [[],[],[],[],[]]
for row in rows:
for col in range(5):
experiences[col].append(self.memory[col][row])
return experiences
def __len__(self):
return len(self.memory[0])
input_dim, output_dim = 4, 2
model = DQN(input_dim, output_dim)
target_net = DQN(input_dim, output_dim)
target_net.load_state_dict(model.state_dict())
target_net.eval()
tau = 2
discount = 0.99
learning_rate = 1e-4
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
memory = ReplayMemory(65536)
BATCH_SIZE = 128
def optimize_model():
if len(memory) < BATCH_SIZE:
return 0
experiences = memory.sample(BATCH_SIZE)
state_batch = torch.Tensor(experiences[0])
action_batch = torch.LongTensor(experiences[1]).unsqueeze(1)
reward_batch = torch.Tensor(experiences[2])
next_state_batch = torch.Tensor(experiences[3])
done_batch = experiences[4]
pred_q = model(state_batch).gather(1, action_batch)
next_state_q_vals = torch.zeros(BATCH_SIZE)
for idx, next_state in enumerate(next_state_batch):
if done_batch[idx] == True:
next_state_q_vals[idx] = -1
else:
# .max in pytorch returns (values, idx), we only want vals
next_state_q_vals[idx] = (target_net(next_state_batch[idx]).max(0)[0]).detach()
better_pred = (reward_batch + next_state_q_vals).unsqueeze(1)
loss = F.smooth_l1_loss(pred_q, better_pred)
optimizer.zero_grad()
loss.backward()
for param in model.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
return loss
points = []
losspoints = []
#save_state = torch.load("models/DQN_target_11.pth")
#model.load_state_dict(save_state['state_dict'])
#optimizer.load_state_dict(save_state['optimizer'])
env = gym.make('CartPole-v0')
for i_episode in range(5000):
observation = env.reset()
episode_loss = 0
if episode % tau == 0:
target_net.load_state_dict(model.state_dict())
for t in range(1000):
#env.render()
state = observation
action = select_action(observation)
observation, reward, done, _ = env.step(action)
if done:
next_state = [0,0,0,0]
else:
next_state = observation
memory.push([state, action, reward, next_state, done])
episode_loss = episode_loss + float(optimize_model(i_episode))
if done:
points.append((i_episode, t+1))
print("Episode {} finished after {} timesteps".format(i_episode, t+1))
print("Avg Loss: ", episode_loss / (t+1))
losspoints.append((i_episode, episode_loss / (t+1)))
if (i_episode % 100 == 0):
eps = final_epsilon + (initial_epsilon - final_epsilon) * \
math.exp(-1. * steps_done / epsilon_decay)
print(eps)
if ((i_episode+1) % 5001 == 0):
save = {'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}
torch.save(save, "models/DQN_target_" + str(i_episode // 5000) + ".pth")
break
env.close()
x = [coord[0] * 100 for coord in points]
y = [coord[1] for coord in points]
x2 = [coord[0] * 100 for coord in losspoints]
y2 = [coord[1] for coord in losspoints]
plt.plot(x, y)
plt.plot(x2, y2)
plt.show()
I basically followed the tutorial pytorch has, except using the state returned by the env rather than the pixels. I also changed the replay memory because I was having issues there. Other than that, I left everything else pretty much the same.
Edit:
I tried overfitting on a small batch and the Loss looks like this without updating the target net and this when updating it
Edit 2:
This is definitely an issue with the target net, I tried removing it and loss seemed to not increase exponentially

Your tau value is too small, small target network update cause DQN traning unstable. You can try to use 1000 (OpenAI Baseline's DQN example) or 10000 (Deepmind's Nature paper).
In Deepmind's 2015 Nature paper, it states that:
The second modification to online Q-learning aimed at further improving the stability of our method with neural networks is to use a separate network for generating the traget yj in the Q-learning update. More precisely, every C updates we clone the network Q to obtain a target network Q' and use Q' for generating the Q-learning targets yj for the following C updates to Q.
This modification makes the algorithm more stable compared to standard online Q-learning, where an update that increases Q(st,at) often also increases Q(st+1, a) for all a and hence also increases the target yj, possibly leading to oscillations or divergence of the policy. Generating the targets using the older set of parameters adds a delay between the time an update to Q is made and the time the update affects the targets yj, making divergence or oscillations much more unlikely.
Human-level control through deep reinforcement
learning, Mnih et al., 2015
I've run your code with settings of tau=2, tau=10, tau=100, tau=1000 and tau=10000. The update frequency of tau=100 solves the problem (reach maximum steps of 200).
tau=2
tau=10
tau=100
tau=1000
tau=10000
Below is the modified version of your code.
import random
import math
import matplotlib.pyplot as plt
import torch
from torch import nn
import torch.nn.functional as F
import gym
class DQN(nn.Module):
def __init__(self, input_dim, output_dim):
super(DQN, self).__init__()
self.linear1 = nn.Linear(input_dim, 16)
self.linear2 = nn.Linear(16, 32)
self.linear3 = nn.Linear(32, 32)
self.linear4 = nn.Linear(32, output_dim)
def forward(self, x):
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = F.relu(self.linear3(x))
return self.linear4(x)
final_epsilon = 0.05
initial_epsilon = 1
epsilon_decay = 5000
global steps_done
steps_done = 0
def select_action(state):
global steps_done
sample = random.random()
eps_threshold = final_epsilon + (initial_epsilon - final_epsilon) * \
math.exp(-1. * steps_done / epsilon_decay)
if sample > eps_threshold:
with torch.no_grad():
state = torch.Tensor(state)
steps_done += 1
q_calc = model(state)
node_activated = int(torch.argmax(q_calc))
return node_activated
else:
node_activated = random.randint(0,1)
steps_done += 1
return node_activated
class ReplayMemory(object): # Stores [state, reward, action, next_state, done]
def __init__(self, capacity):
self.capacity = capacity
self.memory = [[],[],[],[],[]]
def push(self, data):
"""Saves a transition."""
for idx, point in enumerate(data):
#print("Col {} appended {}".format(idx, point))
self.memory[idx].append(point)
def sample(self, batch_size):
rows = random.sample(range(0, len(self.memory[0])), batch_size)
experiences = [[],[],[],[],[]]
for row in rows:
for col in range(5):
experiences[col].append(self.memory[col][row])
return experiences
def __len__(self):
return len(self.memory[0])
input_dim, output_dim = 4, 2
model = DQN(input_dim, output_dim)
target_net = DQN(input_dim, output_dim)
target_net.load_state_dict(model.state_dict())
target_net.eval()
tau = 100
discount = 0.99
learning_rate = 1e-4
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
memory = ReplayMemory(65536)
BATCH_SIZE = 128
def optimize_model():
if len(memory) < BATCH_SIZE:
return 0
experiences = memory.sample(BATCH_SIZE)
state_batch = torch.Tensor(experiences[0])
action_batch = torch.LongTensor(experiences[1]).unsqueeze(1)
reward_batch = torch.Tensor(experiences[2])
next_state_batch = torch.Tensor(experiences[3])
done_batch = experiences[4]
pred_q = model(state_batch).gather(1, action_batch)
next_state_q_vals = torch.zeros(BATCH_SIZE)
for idx, next_state in enumerate(next_state_batch):
if done_batch[idx] == True:
next_state_q_vals[idx] = -1
else:
# .max in pytorch returns (values, idx), we only want vals
next_state_q_vals[idx] = (target_net(next_state_batch[idx]).max(0)[0]).detach()
better_pred = (reward_batch + next_state_q_vals).unsqueeze(1)
loss = F.smooth_l1_loss(pred_q, better_pred)
optimizer.zero_grad()
loss.backward()
for param in model.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
return loss
points = []
losspoints = []
#save_state = torch.load("models/DQN_target_11.pth")
#model.load_state_dict(save_state['state_dict'])
#optimizer.load_state_dict(save_state['optimizer'])
env = gym.make('CartPole-v0')
for i_episode in range(5000):
observation = env.reset()
episode_loss = 0
if i_episode % tau == 0:
target_net.load_state_dict(model.state_dict())
for t in range(1000):
#env.render()
state = observation
action = select_action(observation)
observation, reward, done, _ = env.step(action)
if done:
next_state = [0,0,0,0]
else:
next_state = observation
memory.push([state, action, reward, next_state, done])
episode_loss = episode_loss + float(optimize_model())
if done:
points.append((i_episode, t+1))
print("Episode {} finished after {} timesteps".format(i_episode, t+1))
print("Avg Loss: ", episode_loss / (t+1))
losspoints.append((i_episode, episode_loss / (t+1)))
if (i_episode % 100 == 0):
eps = final_epsilon + (initial_epsilon - final_epsilon) * \
math.exp(-1. * steps_done / epsilon_decay)
print(eps)
if ((i_episode+1) % 5001 == 0):
save = {'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}
torch.save(save, "models/DQN_target_" + str(i_episode // 5000) + ".pth")
break
env.close()
x = [coord[0] * 100 for coord in points]
y = [coord[1] for coord in points]
x2 = [coord[0] * 100 for coord in losspoints]
y2 = [coord[1] for coord in losspoints]
plt.plot(x, y)
plt.plot(x2, y2)
plt.show()
Here's the result of your plotting code.
tau=100
tau=10000

Related

how can i make target size equals input size in my DQN code?

everyone!When I was doing dqn programming, I encountered some problems. This error says
“ Userwarning: Using a target size (torch.Size([32,32])) that is different to the input size (torch.Size([32,1])).This will likely lead to incorrect results due to broadcasting. Please ensure they have the same size.
return F.mse_loss(input,target,reduction=self.reduction)"
And I don't know where the mistake is because I am new to RL . And some of these codes are borrowed from other people's codes, so I don't understand some places.
here are codes:
# hyperparameters
gamma = 0.9
TARGET_REPLACE_ITER = 20
memory_capability = 100
batch_size = 32
learning_rate = 0.001
n_state = 5
n_action = 32
neural network code:
class NN(nn.Module):
def __init__(self, ):
super(NN,self).__init__()
self.fc1 = nn.Linear(n_state, 32)
self.fc1.weight.data.normal_(0, 0.1)
self.fc2 = nn.Linear(32,64)
self.out = nn.Linear(64, n_action)
self.out.weight.data.normal_(0, 0.1)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
action_value = self.out(x)
return action_value
agent code:
class Agent(object):
def __init__(self,):
self.learn_step_counter = 0
self.memory = np.zeros((memory_capability, n_state * 2 + 2))
self.memory_cntr = 0
self.eval_net, self.target_net = NN(), NN()
self.loss_func = nn.MSELoss()
self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=learning_rate)
def choose_action(self, state):
state = torch.unsqueeze(torch.FloatTensor(state),0) # state is 1-Dim np.array,shape = (5,)
if random.random() < epsilon:
action = random.randint(0,len(stringlist) - 1)
else:
action_value = self.eval_net.forward(state)
action = torch.max(action_value, 1)[1].numpy()[0]
return action
def learn(self):
if self.learn_step_counter % TARGET_REPLACE_ITER == 0:
self.target_net.load_state_dict(self.eval_net.state_dict())
self.learn_step_counter += 1
sample_index = np.random.choice(memory_capability, batch_size)
b_memory = self.memory[sample_index, :]
b_s = torch.FloatTensor(b_memory[:, :n_state])
b_a = torch.LongTensor(b_memory[:, n_state:n_state + 1].astype(int))
b_r = torch.FloatTensor(b_memory[:, n_state + 1:n_state + 2])
b_s_ = torch.FloatTensor(b_memory[:, -n_state:])
q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1)
q_next = self.target_net(b_s_).detach()
q_target = b_r + gamma * q_next.max(1)[0] # other people's code said the shape is (batch, 1)=(32,1),but when i ran ,it was (batch,batch)=(32,32),i don't know why
loss = self.loss_func(q_eval, q_target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def store_transition(self,state,action,reward,state_):
transition = np.hstack((state,action,reward,state_))
index = self.memory_cntr % memory_capability
self.memory[index,:] = transition
self.memory_cntr += 1
the problem is probably in learn(),but i don't know how to modify.I will appreciate it if someone can help me,thanks a lot
The bug is exactly at the line you pointed out:
q_target = b_r + gamma * q_next.max(1)[0]
Here q_next is of shape [batch_size, n_action], so q_next.max(1)[0] is of shape [batch_size]. We also have b_r with a shape of [batch_size,1]. Now adding those two entities will not throw an error as PyTorch is doing some automatic shape broadcasting. So the fix for this is to reshape b_r to [batch_size] from [batch_size,1] by using b_r.squeeze(1)

Why is my DQN (Deep Q Network) not learning?

I am training a DQN (Deep Q Network) on a CartPole problem from OpenAI's gym, but when I start the training, the total score from an episode decreases, instead of increasing. I don't know if it is helpful but I noticed that the AI prefers one action over the another and refuses to do anything else (unless it is forced by the epsilon greedy strategy), at least for some time. I tried my best, but I just can't figure out what is going on.
Here is my code:
import torch as t
import torch.nn as nn
import torch.nn.functional as f
import random as r
class QNet:
def predict(self, x: t.Tensor) -> t.Tensor:
pass
#staticmethod
def copy_weights(origin: [], target: []):
for origin_layer, target_layer in zip(origin, target):
target_layer.weight = nn.Parameter(origin_layer.weight.clone())
class Memory:
def __init__(self, state: t.Tensor, next_state: t.Tensor, action: int, reward: float):
self.state = state
self.next_state = next_state
self.action = action
self.reward = reward
class ReplayMemory:
def __init__(self, capacity: int):
self.capacity = capacity
self.memories = []
def add_memory(self, memory: Memory):
self.memories.append(memory)
if len(self.memories) > self.capacity:
self.memories.pop(0)
def get_batch(self, size: int):
if len(self.memories) < size:
raise Exception("There are not enough memories to make a batch.")
start_index = r.randint(0, len(self.memories) - size)
end_index = start_index + size
return self.memories[start_index:end_index]
class QLearning:
def __init__(self, net: QNet, target_net: QNet, optimizer, gamma: float):
self.net = net
self.target_net = target_net
self.optimizer = optimizer
self.gamma = gamma
def train(self, batch: [Memory]):
batched_pred = []
batched_opt_pred = []
for sample in batch:
pred = self.net.predict(sample.state)
opt_pred = pred.clone()
opt_pred[sample.action] = sample.reward
if sample.next_state is not None:
opt_pred[sample.action] += t.max(self.target_net.predict(sample.next_state)) * self.gamma
batched_pred.append(pred)
batched_opt_pred.append(opt_pred)
loss = f.mse_loss(t.stack(batched_pred), t.stack(batched_opt_pred))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
import gym
from qlearning import *
env = gym.make("CartPole-v1")
state = t.tensor(env.reset(), dtype=t.float)
class Agent(nn.Module, QNet):
def __init__(self):
super().__init__()
self.l1 = nn.Linear(4, 32)
self.l2 = nn.Linear(32, 16)
self.l3 = nn.Linear(16, 8)
self.l4 = nn.Linear(8, 4)
self.l5 = nn.Linear(4, 2)
def predict(self, x):
y = f.relu(self.l1(x))
y = f.relu(self.l2(y))
y = f.relu(self.l3(y))
y = f.relu(self.l4(y))
return self.l5(y)
agent = Agent()
target_agent = Agent()
q = QLearning(agent, target_agent, optim.Adam(agent.parameters(), lr=0.001), 0.9)
replay_memory = ReplayMemory(100000)
epsilon = 1
epsilon_dec = 1 / 1000
total_reward = 0
for i in range(1000):
env.render()
action = 0
if r.random() > epsilon:
action = t.argmax(agent.predict(state)).item()
else:
action = env.action_space.sample()
epsilon -= epsilon_dec
next_state, reward, done, info = env.step(action)
next_state = t.tensor(next_state, dtype=t.float)
if done:
reward = -1
replay_memory.add_memory(Memory(state, None, action, reward))
else:
replay_memory.add_memory(Memory(state, next_state, action, reward))
total_reward += reward
if done:
state = t.tensor(env.reset(), dtype=t.float)
# print(int(total_reward))
total_reward = 0
if len(replay_memory.memories) >= 10:
q.train(replay_memory.get_batch(10))
if i % 10:
QNet.copy_weights([agent.l1, agent.l2, agent.l3, agent.l4, agent.l5],
[target_agent.l1, target_agent.l2, target_agent.l3, target_agent.l4, target_agent.l5])
state = next_state
env.close()

How can I save DDPG model?

I try to save the model using the saver method (I use the save function in the DDPG class to save), but when restoring the model, the result is far from the one I saved (I save the model when the episodic award is zero, the restor method in the code is commented out ) My code is below with all the features. I use Python 3.7, gym 0.16.0 and TensorFlow version 1.13.1
import tensorflow as tf
import numpy as np
import gym
epsiode_steps = 500
# learning rate for actor
lr_a = 0.001
# learning rate for critic
lr_c = 0.002
gamma = 0.9
alpha = 0.01
memory = 10000
batch_size = 32
render = True
class DDPG(object):
def __init__(self, no_of_actions, no_of_states, a_bound, ):
self.memory = np.zeros((memory, no_of_states * 2 + no_of_actions + 1), dtype=np.float32)
# initialize pointer to point to our experience buffer
self.pointer = 0
self.sess = tf.Session()
# initialize the variance for OU process for exploring policies
self.noise_variance = 3.0
self.no_of_actions, self.no_of_states, self.a_bound = no_of_actions, no_of_states, a_bound,
self.state = tf.placeholder(tf.float32, [None, no_of_states], 's')
self.next_state = tf.placeholder(tf.float32, [None, no_of_states], 's_')
self.reward = tf.placeholder(tf.float32, [None, 1], 'r')
with tf.variable_scope('Actor'):
self.a = self.build_actor_network(self.state, scope='eval', trainable=True)
a_ = self.build_actor_network(self.next_state, scope='target', trainable=False)
with tf.variable_scope('Critic'):
q = self.build_crtic_network(self.state, self.a, scope='eval', trainable=True)
q_ = self.build_crtic_network(self.next_state, a_, scope='target', trainable=False)
self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')
self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')
self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')
self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')
# update target value
self.soft_replace = [
[tf.assign(at, (1 - alpha) * at + alpha * ae), tf.assign(ct, (1 - alpha) * ct + alpha * ce)]
for at, ae, ct, ce in zip(self.at_params, self.ae_params, self.ct_params, self.ce_params)]
q_target = self.reward + gamma * q_
# compute TD error i.e actual - predicted values
td_error = tf.losses.mean_squared_error(labels=(self.reward + gamma * q_), predictions=q)
# train the critic network with adam optimizer
self.ctrain = tf.train.AdamOptimizer(lr_c).minimize(td_error, name="adam-ink", var_list=self.ce_params)
a_loss = - tf.reduce_mean(q)
# train the actor network with adam optimizer for minimizing the loss
self.atrain = tf.train.AdamOptimizer(lr_a).minimize(a_loss, var_list=self.ae_params)
tf.summary.FileWriter("logs2", self.sess.graph)
# initialize all variables
self.sess.run(tf.global_variables_initializer())
# saver
self.saver = tf.train.Saver()
# self.saver.restore(self.sess, "Pendulum/nn.ckpt")
def choose_action(self, s):
a = self.sess.run(self.a, {self.state: s[np.newaxis, :]})[0]
a = np.clip(np.random.normal(a, self.noise_variance), -2, 2)
return a
def learn(self):
# soft target replacement
self.sess.run(self.soft_replace)
indices = np.random.choice(memory, size=batch_size)
batch_transition = self.memory[indices, :]
batch_states = batch_transition[:, :self.no_of_states]
batch_actions = batch_transition[:, self.no_of_states: self.no_of_states + self.no_of_actions]
batch_rewards = batch_transition[:, -self.no_of_states - 1: -self.no_of_states]
batch_next_state = batch_transition[:, -self.no_of_states:]
self.sess.run(self.atrain, {self.state: batch_states})
self.sess.run(self.ctrain, {self.state: batch_states, self.a: batch_actions, self.reward: batch_rewards,
self.next_state: batch_next_state})
def store_transition(self, s, a, r, s_):
trans = np.hstack((s, a, [r], s_))
index = self.pointer % memory
self.memory[index, :] = trans
self.pointer += 1
if self.pointer > memory:
self.noise_variance *= 0.99995
self.learn()
def build_actor_network(self, s, scope, trainable):
# Actor DPG
with tf.variable_scope(scope):
l1 = tf.layers.dense(s, 30, activation=tf.nn.tanh, name='l1', trainable=trainable)
a = tf.layers.dense(l1, self.no_of_actions, activation=tf.nn.tanh, name='a', trainable=trainable)
return tf.multiply(a, self.a_bound, name="scaled_a")
def build_crtic_network(self, s, a, scope, trainable):
with tf.variable_scope(scope):
n_l1 = 30
w1_s = tf.get_variable('w1_s', [self.no_of_states, n_l1], trainable=trainable)
w1_a = tf.get_variable('w1_a', [self.no_of_actions, n_l1], trainable=trainable)
b1 = tf.get_variable('b1', [1, n_l1], trainable=trainable)
net = tf.nn.tanh(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
q = tf.layers.dense(net, 1, trainable=trainable)
return q
def save(self):
self.saver.save(self.sess, "Pendulum/nn.ckpt")
env = gym.make("Pendulum-v0")
env = env.unwrapped
env.seed(1)
no_of_states = env.observation_space.shape[0]
no_of_actions = env.action_space.shape[0]
a_bound = env.action_space.high
ddpg = DDPG(no_of_actions, no_of_states, a_bound)
total_reward = []
# set the number of episodes
no_of_episodes = 300
for i in range(no_of_episodes):
# initialize the environment
s = env.reset()
ep_reward = 0
for j in range(epsiode_steps):
env.render()
# select action by adding noise through OU process
a = ddpg.choose_action(s)
# peform the action and move to the next state s
s_, r, done, info = env.step(a)
# store the the transition to our experience buffer
# sample some minibatch of experience and train the network
ddpg.store_transition(s, a, r, s_)
# update current state as next state
s = s_
# add episodic rewards
ep_reward += r
if int(ep_reward) == 0 and i > 150:
ddpg.save()
print("save")
quit()
if j == epsiode_steps - 1:
total_reward.append(ep_reward)
print('Episode:', i, ' Reward: %i' % int(ep_reward))
break
I solved this problem completely by rewriting the code and adding the learning function in a separate session

Understanding Gradient Policy Deriving

I'm trying to recreate very simple example of Policy Gradient, from it's origin resource Andrej Karpathy blog. In that articale, you will find example with CartPole and Policy Gradient with list of weight and Softmax activation. Here is my recreated and very simple example of CartPole policy gradient, which works perfect.
import gym
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures
import copy
NUM_EPISODES = 4000
LEARNING_RATE = 0.000025
GAMMA = 0.99
# noinspection PyMethodMayBeStatic
class Agent:
def __init__(self):
self.poly = PolynomialFeatures(1)
self.w = np.random.rand(5, 2)
def policy(self, state):
z = state.dot(self.w)
exp = np.exp(z)
return exp/np.sum(exp)
def __softmax_grad(self, softmax):
s = softmax.reshape(-1,1)
return np.diagflat(s) - np.dot(s, s.T)
def grad(self, probs, action, state):
dsoftmax = self.__softmax_grad(probs)[action,:]
dlog = dsoftmax / probs[0,action]
grad = state.T.dot(dlog[None,:])
return grad
def update_with(self, grads, rewards):
for i in range(len(grads)):
# Loop through everything that happend in the episode
# and update towards the log policy gradient times **FUTURE** reward
total_grad_effect = 0
for t, r in enumerate(rewards[i:]):
total_grad_effect += r * (GAMMA ** r)
self.w += LEARNING_RATE * grads[i] * total_grad_effect
print("Grads update: " + str(np.sum(grads[i])))
def main(argv):
env = gym.make('CartPole-v0')
np.random.seed(1)
agent = Agent()
complete_scores = []
for e in range(NUM_EPISODES):
state = env.reset()[None, :]
state = agent.poly.fit_transform(state)
rewards = []
grads = []
score = 0
while True:
probs = agent.policy(state)
action_space = env.action_space.n
action = np.random.choice(action_space, p=probs[0])
next_state, reward, done,_ = env.step(action)
next_state = next_state[None,:]
next_state = agent.poly.fit_transform(next_state.reshape(1, 4))
grad = agent.grad(probs, action, state)
grads.append(grad)
rewards.append(reward)
score += reward
state = next_state
if done:
break
agent.update_with(grads, rewards)
complete_scores.append(score)
env.close()
plt.plot(np.arange(NUM_EPISODES),
complete_scores)
plt.savefig('image1.png')
if __name__ == '__main__':
main(None)
.
.
Question
I'm trying to do, almost the same example but with Sigmoid activation (just for simplicity). That is all I need to do. Switch activation in the model from softmax to the sigmoid. Which should work for sure (based on explanation below). But my Policy Gradient model don't learn anything and keeps random. Any suggestion?
import gym
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures
NUM_EPISODES = 4000
LEARNING_RATE = 0.000025
GAMMA = 0.99
# noinspection PyMethodMayBeStatic
class Agent:
def __init__(self):
self.poly = PolynomialFeatures(1)
self.w = np.random.rand(5, 1) - 0.5
# Our policy that maps state to action parameterized by w
# noinspection PyShadowingNames
def policy(self, state):
z = np.sum(state.dot(self.w))
return self.sigmoid(z)
def sigmoid(self, x):
s = 1 / (1 + np.exp(-x))
return s
def sigmoid_grad(self, sig_x):
return sig_x * (1 - sig_x)
def grad(self, probs, action, state):
dsoftmax = self.sigmoid_grad(probs)
dlog = dsoftmax / probs
grad = state.T.dot(dlog)
grad = grad.reshape(5, 1)
return grad
def update_with(self, grads, rewards):
if len(grads) < 50:
return
for i in range(len(grads)):
# Loop through everything that happened in the episode
# and update towards the log policy gradient times **FUTURE** reward
total_grad_effect = 0
for t, r in enumerate(rewards[i:]):
total_grad_effect += r * (GAMMA ** r)
self.w += LEARNING_RATE * grads[i] * total_grad_effect
def main(argv):
env = gym.make('CartPole-v0')
np.random.seed(1)
agent = Agent()
complete_scores = []
for e in range(NUM_EPISODES):
state = env.reset()[None, :]
state = agent.poly.fit_transform(state)
rewards = []
grads = []
score = 0
while True:
probs = agent.policy(state)
action_space = env.action_space.n
action = np.random.choice(action_space, p=[1 - probs, probs])
next_state, reward, done, _ = env.step(action)
next_state = next_state[None, :]
next_state = agent.poly.fit_transform(next_state.reshape(1, 4))
grad = agent.grad(probs, action, state)
grads.append(grad)
rewards.append(reward)
score += reward
state = next_state
if done:
break
agent.update_with(grads, rewards)
complete_scores.append(score)
env.close()
plt.plot(np.arange(NUM_EPISODES),
complete_scores)
plt.savefig('image1.png')
if __name__ == '__main__':
main(None)
Plotting all the learning keeps random. Nothing helps with tuning hyper parameters. Below the sample image.
References:
Deep Reinforcement Learning: Pong from Pixels
An introduction to Policy Gradients with Cartpole and Doom
Deriving Policy Gradients and Implementing REINFORCE
Machine Learning Trick of the Day (5): Log Derivative Trick 12
UPDATE
Seems like answer below could doing some work from the graphic. But it's Not log probability, and Not even gradient of the policy. And changes whole purpose of the RL Gradient Policy. Please check references above. Following the image we next statement.
I need to take a Gradient of the Log function of my Policy (which is simply weights and sigmoid activation). Please let me know in case of any questions.
The problem is with grad method.
def grad(self, probs, action, state):
dsoftmax = self.sigmoid_grad(probs)
dlog = dsoftmax / probs
grad = state.T.dot(dlog)
grad = grad.reshape(5, 1)
return grad
In the original code Softmax was used along with CrossEntropy loss function. When you switch activation to Sigmoid, the proper loss function becomes Binary CrossEntropy. Now, the purpose of grad method is to calculate gradient of the loss function wrt. weights. Sparing the details, proper gradient is given by (probs - action) * state in the terminology of your program. The last thing is to add minus sign - we want to maximize the negative of the loss function.
Proper grad method thus:
def grad(self, probs, action, state):
grad = state.T.dot(probs - action)
return -grad
Another change that you might want to add is to increase learning rate.
LEARNING_RATE = 0.0001 and NUM_EPISODES = 5000 will produce the following plot:
The convergence will be much faster if weights are initialized using Gaussian distribution with zero mean and small variance:
def __init__(self):
self.poly = PolynomialFeatures(1)
self.w = np.random.randn(5, 1) * 0.01
UPDATE
Added complete code to reproduce the results:
import gym
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures
NUM_EPISODES = 5000
LEARNING_RATE = 0.0001
GAMMA = 0.99
# noinspection PyMethodMayBeStatic
class Agent:
def __init__(self):
self.poly = PolynomialFeatures(1)
self.w = np.random.randn(5, 1) * 0.01
# Our policy that maps state to action parameterized by w
# noinspection PyShadowingNames
def policy(self, state):
z = np.sum(state.dot(self.w))
return self.sigmoid(z)
def sigmoid(self, x):
s = 1 / (1 + np.exp(-x))
return s
def sigmoid_grad(self, sig_x):
return sig_x * (1 - sig_x)
def grad(self, probs, action, state):
grad = state.T.dot(probs - action)
return -grad
def update_with(self, grads, rewards):
if len(grads) < 50:
return
for i in range(len(grads)):
# Loop through everything that happened in the episode
# and update towards the log policy gradient times **FUTURE** reward
total_grad_effect = 0
for t, r in enumerate(rewards[i:]):
total_grad_effect += r * (GAMMA ** r)
self.w += LEARNING_RATE * grads[i] * total_grad_effect
def main(argv):
env = gym.make('CartPole-v0')
np.random.seed(1)
agent = Agent()
complete_scores = []
for e in range(NUM_EPISODES):
state = env.reset()[None, :]
state = agent.poly.fit_transform(state)
rewards = []
grads = []
score = 0
while True:
probs = agent.policy(state)
action_space = env.action_space.n
action = np.random.choice(action_space, p=[1 - probs, probs])
next_state, reward, done, _ = env.step(action)
next_state = next_state[None, :]
next_state = agent.poly.fit_transform(next_state.reshape(1, 4))
grad = agent.grad(probs, action, state)
grads.append(grad)
rewards.append(reward)
score += reward
state = next_state
if done:
break
agent.update_with(grads, rewards)
complete_scores.append(score)
env.close()
plt.plot(np.arange(NUM_EPISODES),
complete_scores)
plt.savefig('image1.png')
if __name__ == '__main__':
main(None)

Score being minimised by pytorch NN with the Cartpole problem

I am trying to solve the CartPole problem in openAI gym by training a simple 2 layer NN in pytorch. The method used is DQN yet results converge on a maximum score of around 8 or 9 and is not seen to improve over time or with training. Instead the score gets lower with training. How this might be improved/what is wrong in the code that is making it do this? Below is the code used:
import gym
import random
import math
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
from collections import namedtuple
import numpy as np
class network(nn.Module):
def __init__(self):
nn.Module.__init__(self)
# network takes 4 inputs (state, action, next_state, reward), hidden layer then has
# 256 inputs and the network has 2 outputs (the q value of going left or right)
# in this network the index of the output references the action.
self.l1 = nn.Linear(4, 256)
self.l2 = nn.Linear(256, 2)
def forward(self, x):
# forward function defines how the model will run
x = F.relu(self.l1(x))
x = self.l2(x)
return (x)
class replay_memory():
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
def save(self, transition):
# saves all transitions for the environment in a tensor
self.memory.append(transition)
if len(self.memory) > self.capacity:
del self.memory[0]
def sample(self, batch_size):
# generates a random sample from the memory
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class agent():
def __init__(self, env, model):
self.epsilon = 1 # exploration rate
self.epsilon_min = 0.001 # smallest exploration value
self.epsilon_decay = 0.995 # rate at which exploration occurs
self.learning_rate = 0.001
def act(self, state, model):
# define actions, random or optimal based on exploration rate DOES NOT ACCOUNT FOR THE DECAY
if random.uniform(0, 1) <= self.epsilon:
action = torch.LongTensor([[random.randrange(2)]])
action_np = (action.numpy())[0][0]
else:
action = model(Variable(torch.FloatTensor([state])).type(torch.FloatTensor)).max(1)[1].view(1,1)
action_np = (action.numpy())[0][0]
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
else:
self.epsilon = self.epsilon_min
return action, action_np
def trained_act(self, episodes, network, env):
for e in range (episodes):
state = env.reset()
for t in range (200):
action, action_np = agent.act(state, network)
next_state, reward, done, info = env.step(action_np)
env.render()
if done:
break
print(t)
env.close()
def learn(batch_size, gamma, memory, optimizer):
BATCH_SIZE = batch_size
if len(memory) < BATCH_SIZE:
return
# random transition batch is taken from experience replay memory.
transitions = memory.sample(BATCH_SIZE)
batch_state, batch_action, batch_reward, batch_next_state = zip(*transitions)
batch_state = Variable(torch.cat(batch_state))
batch_action = Variable(torch.cat(batch_action))
batch_reward = Variable(torch.cat(batch_reward))
batch_next_state = Variable(torch.cat(batch_next_state))
current_q_values = network.forward(batch_state).gather(1, batch_action.unsqueeze(-1))
max_next_q_values = network.forward(batch_next_state).detach().max(1)[0]
expected_q_values = batch_reward + (gamma * max_next_q_values)
# loss is measured from error between current and newly expected Q values
loss = F.smooth_l1_loss(expected_q_values, current_q_values)
# backpropagation of loss to NN
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss
env = gym.make('CartPole-v0')
env.reset()
network = network()
agent = agent(env, network)
batch_size = 50
episode = 500
T = 200
gamma = 0.95
memory = replay_memory(100)
optimizer = optim.SGD(network.parameters(), 0.001)
l = []
s = []
for e in range (episode):
state = env.reset()
for t in range (T):
action, action_np = agent.act(state, network)
next_state, reward, done, info = env.step(action_np)
if done:
reward = -2
transition = torch.FloatTensor([state]),torch.LongTensor([action]), torch.FloatTensor([reward]),torch.FloatTensor([next_state])
memory.save(transition)
state = next_state
loss = learn(batch_size, gamma, memory, optimizer)
l.append(loss)
if done:
print('Loss = {}, Episode = {}, finsited after {} steps'.format(loss, e, t))
s.append(t)
break
I would rewrite your training algorithm to:
for e in range (episode):
state = env.reset()
done = False
t = 0
while not done:
action, action_np = agent.act(state, network)
next_state, reward, done, info = env.step(action_np)
transition = torch.FloatTensor([state]),torch.LongTensor([action]), torch.FloatTensor([reward]),torch.FloatTensor([next_state])
memory.save(transition)
state = next_state
loss = learn(batch_size, gamma, memory, optimizer)
l.append(loss)
if t < T:
t += 1
else:
done = True
if done:
print('Loss = {}, Episode = {}, finsited after {} steps'.format(loss, e, t))
s.append(t)
break

Categories

Resources