forked from Pdbz199/Koopman-RL-Old
-
Notifications
You must be signed in to change notification settings - Fork 0
/
test_model.py
58 lines (44 loc) · 1.15 KB
/
test_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
#%%
# import gym
import observables
import numpy as np
from general_model import GeneratorModel
# from cartpole_reward import cartpoleReward
from simple_env import SimpleTestEnv
#%%
# X = (np.load('random-cartpole-states.npy'))[:2].T # states
# U = (np.load('random-cartpole-actions.npy'))[:2].T # actions
X = np.array([[0, 0],
[0, 0]])
U = np.array([10, 5])
# print(X.shape)
# print(U.shape)
psi = observables.monomials(6)
#%%
env = SimpleTestEnv()
#%%
model = GeneratorModel(psi, env.reward, [-1.0, 1.0])
#%%
model.fit(X, U)
#%%
# print(model.sample_action(0))
#%%
episode_rewards = []
#%%
episodes = 1
for episode in range(episodes):
episode_reward = 0
state_num = 0
current_state = env.reset()
done = False
while done == False:
# env.render()
action = np.random.uniform(-1.0, 1.0)
current_state, reward, done, _ = env.step(action)
episode_reward += reward
model.update_model(current_state, action)
model.update_policy(3)
print(f"episode {episode+1} reward:", episode_reward)
episode_rewards.append(episode_reward)
print("\naverage reward:", np.mean(episode_rewards))
# %%