-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrainer_dqn_mlp_std_simple.py
55 lines (40 loc) · 1.3 KB
/
trainer_dqn_mlp_std_simple.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
from customized_environments.envs.my_agent import CustomAgent
import gym
from stable_baselines.deepq.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv
from stable_baselines import DQN
from absl import flags
FLAGS = flags.FLAGS
FLAGS([''])
name = "dqn_mlp_std_simple"
learn_type='DQN'
start_value = 0
# create vectorized environment
env = DummyVecEnv([lambda: CustomAgent(learn_type=learn_type)])
model = DQN(
MlpPolicy,
env,
learning_rate = 0.3,
exploration_fraction = 0.2,
double_q = True,
verbose=0,
tensorboard_log="gym_ouput/" + name + "/log/"
)
model.setup_model()
if start_value > 0:
try:
model.load("gym_ouput/" + name + "/it" + str(start_value + 1), env=env)
print("\n\nOBS! this is not the latest NN load point\n\n")
except:
try:
model.load("gym_ouput/" + name + "/it" + str(start_value), env=env)
except:
print("\n\nOBS! invalid load point\n\n")
print("obs space: " + str(model.observation_space))
print("act space: " + str(model.action_space))
i = 1
while True:
save_name = "gym_ouput/" + name + "/it" + (i+start_value).__str__()
model.learn(total_timesteps=int(8e3), tb_log_name="log", reset_num_timesteps=False)
model.save(save_name)
i += 1