File size: 1,938 Bytes
079c32c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
from easydict import EasyDict
from functools import partial
import ding.envs.gym_env
cfg = dict(
exp_name='LunarLanderContinuous-V2-DDPG',
seed=0,
env=dict(
env_id='LunarLanderContinuous-v2',
collector_env_num=8,
evaluator_env_num=8,
n_evaluator_episode=8,
stop_value=260,
act_scale=True,
),
policy=dict(
cuda=True,
random_collect_size=0,
model=dict(
obs_shape=8,
action_shape=2,
twin_critic=True,
action_space='regression',
),
learn=dict(
update_per_collect=2,
batch_size=128,
learning_rate_actor=0.001,
learning_rate_critic=0.001,
ignore_done=False, # TODO(pu)
# (int) When critic network updates once, how many times will actor network update.
# Delayed Policy Updates in original TD3 paper(https://arxiv.org/pdf/1802.09477.pdf).
# Default 1 for DDPG, 2 for TD3.
actor_update_freq=1,
# (bool) Whether to add noise on target network's action.
# Target Policy Smoothing Regularization in original TD3 paper(https://arxiv.org/pdf/1802.09477.pdf).
# Default True for TD3, False for DDPG.
noise=False,
noise_sigma=0.1,
noise_range=dict(
min=-0.5,
max=0.5,
),
),
collect=dict(
n_sample=48,
noise_sigma=0.1,
collector=dict(collect_print_freq=1000, ),
),
eval=dict(evaluator=dict(eval_freq=100, ), ),
other=dict(replay_buffer=dict(replay_buffer_size=20000, ), ),
),
wandb_logger=dict(
gradient_logger=True, video_logger=True, plot_logger=True, action_logger=True, return_logger=False
),
)
cfg = EasyDict(cfg)
env = partial(ding.envs.gym_env.env, continuous=True)
|