-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathrun.py
134 lines (113 loc) · 3.4 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
import os
import json
import argparse
import numpy as np
import torch
import gym
from models.pg import PolicyGradient
from models.ac import ActorCritic
from models.trpo import TRPO
from models.gae import GAE
from models.ppo import PPO
def main(env_name, model_name, num_episodes, render):
ckpt_path = "ckpts"
ckpt_path = os.path.join(ckpt_path, model_name)
ckpt_path = os.path.join(ckpt_path, env_name)
with open(os.path.join(ckpt_path, "model_config.json")) as f:
config = json.load(f)
if env_name not in ["CartPole-v1", "Pendulum-v0", "BipedalWalker-v3"]:
print("The environment name is wrong!")
return
env = gym.make(env_name)
env.reset()
state_dim = len(env.observation_space.high)
if env_name in ["CartPole-v1"]:
discrete = True
action_dim = env.action_space.n
else:
discrete = False
action_dim = env.action_space.shape[0]
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
if model_name == "pg":
model = PolicyGradient(
state_dim, action_dim, discrete, **config
).to(device)
elif model_name == "ac":
model = ActorCritic(
state_dim, action_dim, discrete, **config
).to(device)
elif model_name == "trpo":
model = TRPO(
state_dim, action_dim, discrete, **config
).to(device)
elif model_name == "gae":
model = GAE(
state_dim, action_dim, discrete, **config
).to(device)
elif model_name == "ppo":
model = PPO(
state_dim, action_dim, discrete, **config
).to(device)
if hasattr(model, "pi"):
model.pi.load_state_dict(
torch.load(
os.path.join(ckpt_path, "policy.ckpt"), map_location=device
)
)
rwd_mean = []
for i in range(1, num_episodes + 1):
rwds = []
done = False
ob = env.reset()
while not done:
act = model.act(ob)
if render:
env.render()
ob, rwd, done, info = env.step(act)
rwds.append(rwd)
rwd_sum = sum(rwds)
print("The total reward of the episode %i = %f" % (i, rwd_sum))
rwd_mean.append(rwd_sum)
env.close()
rwd_std = np.std(rwd_mean)
rwd_mean = np.mean(rwd_mean)
print("Mean = %f" % rwd_mean)
print("Standard Deviation = %f" % rwd_std)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--env_name",
type=str,
default="CartPole-v1",
help="Type the environment name to run. \
The possible environments are \
[CartPole-v1, Pendulum-v0, BipedalWalker-v3]"
)
parser.add_argument(
"--model_name",
type=str,
default="pg",
help="Type the model name to train. \
The possible models are [pg, ac, trpo, gae, ppo]"
)
parser.add_argument(
"--num_episodes",
type=int,
default=1,
help="Type the number of episodes to run this agent"
)
parser.add_argument(
"--render",
type=str,
default="True",
help="Type whether the render is on or not"
)
args = parser.parse_args()
if args.render == "True":
render = True
else:
render = False
main(args.env_name, args.model_name, args.num_episodes, render)