-
Notifications
You must be signed in to change notification settings - Fork 0
/
DQN.py
56 lines (46 loc) · 1.7 KB
/
DQN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import os
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torch as T
class DuelingLinearDeepQNetwork(nn.Module):
def __init__(self, alpha, n_actions, input_dims, view_reduced=False):
super(DuelingLinearDeepQNetwork, self).__init__()
self.fc1 = nn.Linear(*input_dims, 256)
self.fc2 = nn.Linear(256, 512)
self.fc3 = nn.Linear(512, 256)
# Sub networks State and Actions
self.preV = nn.Linear(260, 128)
self.V = nn.Linear(128, 1)
# Sub networks State and Actions
self.preA = nn.Linear(260, 128)
self.A = nn.Linear(128, n_actions)
self.optimizer = optim.Adam(self.parameters(), lr=alpha)
self.loss = nn.MSELoss()
self.device = T.device('cuda' if T.cuda.is_available() else 'cpu')
self.to(self.device)
self.view_reduced = view_reduced
def forward(self, state):
if self.view_reduced:
l1 = F.relu(self.fc1(state))
l2 = F.relu(self.fc2(l1))
l3 = F.relu(self.fc3(l2))
prV = F.relu(self.preV(l3))
V = self.V(prV)
prA = F.relu(self.preA(l3))
A = self.A(prA)
else:
data = state[:, -4:]
l1 = F.relu(self.fc1(state))
l2 = F.relu(self.fc2(l1))
l3 = F.relu(self.fc3(l2))
l4 = T.cat((l3, data), dim=1)
prV = F.relu(self.preV(l4))
V = self.V(prV)
prA = F.relu(self.preA(l4))
A = self.A(prA)
return V, A
def save_checkpoint(self, file):
T.save(self.state_dict(), file)
def load_checkpoint(self, file):
self.load_state_dict(T.load(file))