-
Notifications
You must be signed in to change notification settings - Fork 1
/
train_cartpole.py
74 lines (56 loc) · 2.32 KB
/
train_cartpole.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import gym
from tqdm import tqdm
import matplotlib.pyplot as plt
from core.solver_pg import VanillaPolicyGradient, tf
import core.logger as log
def main():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8)
DISPLAY = True
DISPLAY_REWARD_THRESHOLD = 400
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
env = gym.make('CartPole-v0')
env.seed(1)
env = env.unwrapped
log.setFileHandler('vanilla_policy.log')
log.setVerbosity('1')
log.info('Starting Policy Gradient optimization')
log.info('Cartpole vanilla policy gradient training')
log.info(str(env.action_space))
log.info(str(env.observation_space))
log.info(str(env.observation_space.high))
log.info(str(env.observation_space.low))
pg = VanillaPolicyGradient(
n_actions=env.action_space.n,
n_features=env.observation_space.shape[0],
sess=sess,
learning_rate=0.02,
reward_decay=0.99,
)
for i_episode in tqdm(range(0, 3000), ncols=70, initial=0):
observation = env.reset()
while True:
if DISPLAY:
env.render()
action = pg.choose_action(observation)
observation_, reward, done, info = env.step(action)
pg.store_transition(observation, action, reward)
if done:
ep_rs_sum = sum(pg.ep_rs)
if 'running_reward' not in globals():
running_reward = ep_rs_sum
else:
running_reward = running_reward * 0.99 + ep_rs_sum * 0.01
if running_reward > DISPLAY_REWARD_THRESHOLD:
DISPLAY = True # rendering
log.info('episode:%d, reward: %d' %
(i_episode, int(running_reward)))
vt = pg.learn()
if i_episode == 0:
plt.plot(vt) # plot the episode vt
plt.xlabel('episode steps')
plt.ylabel('normalized state-action value')
plt.show()
break
observation = observation_
if __name__ == '__main__':
main()