-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodel.py
More file actions
73 lines (61 loc) · 3.08 KB
/
model.py
File metadata and controls
73 lines (61 loc) · 3.08 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
# This is inspired from:
# https://github.com/udacity/deep-reinforcement-learning/blob/55474449a112fa72323f484c4b7a498c8dc84be1/ddpg-bipedal/model.py
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# use for NN weight initialization
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return (-lim, lim)
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=128, fc2_units=256):
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
# After reading the slack channel, one suggest improvement using a batch normalisation layer
# I do not notice better convergence with this additional layer, so I remove it before the submission
#self.bn1 = nn.BatchNorm1d(fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
# I do not notice better convergence with this additional batch normalization layer, so I remove it before the submission
#x = F.relu(self.bn1(self.fc1(state)))
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return F.tanh(self.fc3(x))
class Critic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=128, fc2_units=256):
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
# After reading the slack channel, one suggest improvement using a batch normalisation layer
# I do not notice better convergence with this additional layer, so I remove it before the submission
#self.bn1 = nn.BatchNorm1d(fc1_units)
self.fc2 = nn.Linear(fc1_units+action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, 1)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
# I do not notice better convergence with this additional batch normalization layer, so I remove it before the submission
# x = F.relu(self.bn1(self.fc1(state)))
x = F.relu(self.fc1(state))
# Let's try leaky relu as suggested in the slack channel (it seems to be much slower)
#x = F.leaky_relu(self.fc1(state))
x = torch.cat((x, action), dim=1)
x = F.relu(self.fc2(x))
#x = F.leaky_relu(self.fc2(x))
return self.fc3(x)