-
Notifications
You must be signed in to change notification settings - Fork 0
/
run.py
164 lines (128 loc) · 5.1 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
from stable_baselines3 import PPO, SAC
import gymnasium
from gymnasium.wrappers import FilterObservation, FlattenObservation
import panda_gym
import numpy as np
import calendar
import time
import os
import csv
from matplotlib import animation
import matplotlib.pyplot as plt
def save_frames_as_gif(frames, path='./', filename='gym_animation.gif'):
# Mess with this to change frame size
plt.figure(figsize=(frames[0].shape[1] / 72.0, frames[0].shape[0] / 72.0), dpi=72)
patch = plt.imshow(frames[0])
plt.axis('off')
def animate(i):
patch.set_data(frames[i])
anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames), interval=50)
anim.save(path + filename, writer='imagemagick', fps=5)
def runModel(model, vec_env, GIF):
# Run the model
episodes = 15
if GIF:
frames = []
dir = os.getcwd()
dir = dir + '/test' # Carpeta en la que se guarda el log
current_GMT = time.gmtime()
time_stamp = calendar.timegm(current_GMT)
log_file = dir + '/log_{}.csv'.format(time_stamp)
with open(log_file, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['episode', 'step', 'action1', 'action2', 'action3', 'obs1', 'obs2', 'obs3', 'obs4',
'obs5', 'obs6', 'obs7', 'obs8', 'obs9', 'reward', 'terminated', 'truncated'])
for ep in range(1, episodes+1):
obs, _ = vec_env.reset()
terminated = False
i = 0
# log initial observation
fields = [ep, i, "", "", "", obs[0], obs[1], obs[2], obs[3], obs[4], obs[5], obs[6], obs[7], obs[8], "",
terminated, ""]
with open(log_file, 'a', newline='') as f:
writer = csv.writer(f)
writer.writerow(fields)
ret = 0
if GIF:
frames.append(vec_env.render())
time.sleep(.2)
else:
vec_env.render()
time.sleep(.2)
while not terminated:
i += 1
action, _state = model.predict(obs, deterministic=True)
obs, reward, terminated, truncated, info = vec_env.step(action)
if GIF:
frames.append(vec_env.render())
time.sleep(.2)
else:
vec_env.render()
time.sleep(.2)
print('Step: {}, rew: {}'.format(i, reward))
print('Actions: {}\nObs: {}\nState: {}'.format(action, obs, _state))
# log
fields = [ep, i, action[0], action[1], action[2], obs[0], obs[1], obs[2], obs[3], obs[4], obs[5], obs[6],
obs[7], obs[8], reward, terminated, truncated]
with open(log_file, 'a', newline='') as f:
writer = csv.writer(f)
writer.writerow(fields)
ret += reward
if i > 50:
break
print('Episode {} finished with return: {} \n'.format(ep, ret))
vec_env.close()
if GIF:
save_frames_as_gif(frames, dir, '/animation_{}.gif'.format(time_stamp))
if __name__ == '__main__':
# INITIAL DATA
entorno = 'PandaReach-v3' # Define el robot, la tarea y el tipo de reward
algoritmo = 'PPO_1' # Subcarpeta en la que está el modelo
modelo = 'modelo1.zip'
PRECISION = 0.05 # precisión al goal, por defecto 0.05 m
GOAL_RANGE = 0.3 # define el volumen en el que aparece el goal, por defecto 0.3 --> 0.3x0.3x0.3 m3
# RENDER DATA
GIF = True # si queremos guardar un .gif del render (True) o no (False)
render = 'rgb_array'
renderer = 'OpenGL'
render_width = 720
render_height = 480
render_target_position = [0., 0., 0.]
render_distance = 1.0
render_yaw = 45
render_pitch = -30
render_roll = 0
# MAIN
env = gymnasium.make(
entorno,
render_mode=render, # render_mode="human"
renderer=renderer,
render_width=render_width, # 720
render_height=render_height, # 480
render_target_position=render_target_position, # [0., 0., 0.]
render_distance=render_distance, # 1.4
render_yaw=render_yaw, # 45
render_pitch=render_pitch, # -30
render_roll=render_roll # 0
)
env.unwrapped.task.distance_threshold = PRECISION
env.unwrapped.task.goal_range = GOAL_RANGE
env.unwrapped.task.goal_range_low = \
np.array([-GOAL_RANGE / 2, -GOAL_RANGE / 2, 0]) # el origen de coordenadas es el centro de la mesa
env.unwrapped.task.goal_range_high = \
np.array([GOAL_RANGE / 2, GOAL_RANGE / 2, GOAL_RANGE])
env.reset()
env = FilterObservation(
env, filter_keys=['desired_goal', 'observation']) # filter_keys=['desired_goal', 'observation']
env = FlattenObservation(env)
dir = os.getcwd()
dir = dir + '/train/_models' # Carpeta en la que están los modelos
model_path = f"{dir}/{algoritmo}/{modelo}"
if 'PPO' in algoritmo:
model = PPO.load(model_path, env=env)
runModel(model, env, GIF)
elif 'SAC' in algoritmo:
model = SAC.load(model_path, env=env)
runModel(model, env, GIF)
else:
print('Se debe elegir un modelo PPO o SAC')