forked from ZikangXiong/VRL_CodeReview
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpendulum_change_l.py
More file actions
96 lines (79 loc) · 3.2 KB
/
pendulum_change_l.py
File metadata and controls
96 lines (79 loc) · 3.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
# -*- coding: utf-8 -*-
# -------------------------------
# Author: Zikang Xiong
# Email: zikangxiong@gmail.com
# Date: 2018-10-23 17:04:25
# Last Modified by: Zikang Xiong
# Last Modified time: 2019-02-22 16:44:32
# -------------------------------
from main import *
import numpy as np
from DDPG import *
from shield import Shield
from Environment import Environment
import argparse
def pendulum(learning_eposides, critic_structure, actor_structure, train_dir, learning_method, number_of_rollouts, simulation_steps,\
nn_test=False, retrain_shield=False, shield_test=False, test_episodes=100):
m = 1.
l = 1.2
g = 10.
#Dynamics that are continuous
A = np.matrix([
[ 0., 1.],
[g/l, 0.]
])
B = np.matrix([
[ 0.],
[1./(m*l**2.)]
])
#intial state space
s_min = np.array([[-0.35],[-0.35]])
s_max = np.array([[ 0.35],[ 0.35]])
#reward function
Q = np.matrix([[1., 0.],[0., 1.]])
R = np.matrix([[.005]])
#safety constraint
x_min = np.array([[-0.5],[-0.5]])
x_max = np.array([[ 0.5],[ 0.5]])
u_min = np.array([[-15.]])
u_max = np.array([[ 15.]])
env = Environment(A, B, u_min, u_max, s_min, s_max, x_min, x_max, Q, R, continuous=True)
args = { 'actor_lr': 0.0001,
'critic_lr': 0.001,
'actor_structure': actor_structure,
'critic_structure': critic_structure,
'buffer_size': 1000000,
'gamma': 0.99,
'max_episode_len': 1,
'max_episodes': learning_eposides,
'minibatch_size': 64,
'random_seed': 6553,
'tau': 0.005,
'model_path': train_dir+"model.chkp",
'enable_test': nn_test,
'test_episodes': test_episodes,
'test_episodes_len': 3000}
actor = DDPG(env, args)
#################### Shield #################
model_path = os.path.split(args['model_path'])[0]+'/'
linear_func_model_name = 'K.model'
model_path = model_path+linear_func_model_name+'.npy'
def rewardf(x, Q, u, R):
return env.reward(x, u)
shield = Shield(env, actor, model_path, force_learning=retrain_shield, debug=False)
shield.train_shield(learning_method, number_of_rollouts, simulation_steps, rewardf=rewardf, eq_err=1e-2, explore_mag = 0.3, step_size = 0.3)
if shield_test:
shield.test_shield(test_episodes, 3000, mode="single")
actor.sess.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Running Options')
parser.add_argument('--nn_test', action="store_true", dest="nn_test")
parser.add_argument('--retrain_shield', action="store_true", dest="retrain_shield")
parser.add_argument('--shield_test', action="store_true", dest="shield_test")
parser.add_argument('--test_episodes', action="store", dest="test_episodes", type=int)
parser_res = parser.parse_args()
nn_test = parser_res.nn_test
retrain_shield = parser_res.retrain_shield
shield_test = parser_res.shield_test
test_episodes = parser_res.test_episodes if parser_res.test_episodes is not None else 100
pendulum(0, [1200,900], [1000,900,800], "ddpg_chkp/perfect_model/pendulum/change_l/", "random_search", 100, 2000, nn_test=nn_test, retrain_shield=retrain_shield, shield_test=shield_test, test_episodes=test_episodes)