|
from easydict import EasyDict |
|
|
|
collector_env_num = 32 |
|
evaluator_env_num = 5 |
|
nstep = 5 |
|
max_env_step = int(10e6) |
|
|
|
pitfall_ngu_config = dict( |
|
|
|
|
|
|
|
exp_name='pitfall_ngu_seed0', |
|
env=dict( |
|
collector_env_num=collector_env_num, |
|
evaluator_env_num=evaluator_env_num, |
|
n_evaluator_episode=5, |
|
env_id='PitfallNoFrameskip-v4', |
|
|
|
obs_plus_prev_action_reward=True, |
|
stop_value=int(1e5), |
|
frame_stack=4, |
|
), |
|
rnd_reward_model=dict( |
|
intrinsic_reward_type='add', |
|
learning_rate=1e-4, |
|
obs_shape=[4, 84, 84], |
|
action_shape=18, |
|
batch_size=320, |
|
update_per_collect=10, |
|
only_use_last_five_frames_for_icm_rnd=False, |
|
clear_buffer_per_iters=10, |
|
nstep=nstep, |
|
hidden_size_list=[128, 128, 64], |
|
type='rnd-ngu', |
|
), |
|
episodic_reward_model=dict( |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
last_nonzero_reward_rescale=False, |
|
|
|
|
|
last_nonzero_reward_weight=1, |
|
intrinsic_reward_type='add', |
|
learning_rate=1e-4, |
|
obs_shape=[4, 84, 84], |
|
action_shape=18, |
|
batch_size=320, |
|
update_per_collect=10, |
|
only_use_last_five_frames_for_icm_rnd=False, |
|
clear_buffer_per_iters=10, |
|
nstep=nstep, |
|
hidden_size_list=[128, 128, 64], |
|
type='episodic', |
|
), |
|
policy=dict( |
|
cuda=True, |
|
on_policy=False, |
|
priority=True, |
|
priority_IS_weight=True, |
|
discount_factor=0.997, |
|
nstep=nstep, |
|
burnin_step=20, |
|
|
|
|
|
|
|
learn_unroll_len=80, |
|
model=dict( |
|
obs_shape=[4, 84, 84], |
|
action_shape=18, |
|
encoder_hidden_size_list=[128, 128, 512], |
|
collector_env_num=collector_env_num, |
|
), |
|
learn=dict( |
|
update_per_collect=8, |
|
batch_size=64, |
|
learning_rate=0.0005, |
|
target_update_theta=0.001, |
|
), |
|
collect=dict( |
|
|
|
|
|
|
|
|
|
|
|
|
|
n_sample=32, |
|
traj_len_inf=True, |
|
env_num=collector_env_num, |
|
), |
|
eval=dict(env_num=evaluator_env_num, ), |
|
other=dict( |
|
eps=dict( |
|
type='exp', |
|
start=0.95, |
|
end=0.05, |
|
decay=1e5, |
|
), |
|
replay_buffer=dict( |
|
replay_buffer_size=int(3e3), |
|
|
|
alpha=0.6, |
|
|
|
beta=0.4, |
|
) |
|
), |
|
), |
|
) |
|
pitfall_ngu_config = EasyDict(pitfall_ngu_config) |
|
main_config = pitfall_ngu_config |
|
pitfall_ngu_create_config = dict( |
|
env=dict( |
|
type='atari', |
|
import_names=['dizoo.atari.envs.atari_env'], |
|
), |
|
env_manager=dict(type='subprocess'), |
|
policy=dict(type='ngu'), |
|
rnd_reward_model=dict(type='rnd-ngu'), |
|
episodic_reward_model=dict(type='episodic'), |
|
) |
|
pitfall_ngu_create_config = EasyDict(pitfall_ngu_create_config) |
|
create_config = pitfall_ngu_create_config |
|
|
|
if __name__ == "__main__": |
|
from ding.entry import serial_pipeline_reward_model_ngu |
|
serial_pipeline_reward_model_ngu([main_config, create_config], seed=0, max_env_step=max_env_step) |
|
|