zjowowen commited on
Commit
31d2902
1 Parent(s): fec613a

Upload policy_config.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. policy_config.py +118 -0
policy_config.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ exp_config = {
2
+ 'main_config': {
3
+ 'exp_name': 'MsPacmanNoFrameskip-v4-SampledEfficientZero',
4
+ 'seed': 0,
5
+ 'env': {
6
+ 'env_id': 'MsPacmanNoFrameskip-v4',
7
+ 'env_name': 'MsPacmanNoFrameskip-v4',
8
+ 'obs_shape': [4, 96, 96],
9
+ 'collector_env_num': 8,
10
+ 'evaluator_env_num': 3,
11
+ 'n_evaluator_episode': 3,
12
+ 'manager': {
13
+ 'shared_memory': False
14
+ }
15
+ },
16
+ 'policy': {
17
+ 'on_policy': False,
18
+ 'cuda': True,
19
+ 'multi_gpu': False,
20
+ 'bp_update_sync': True,
21
+ 'traj_len_inf': False,
22
+ 'model': {
23
+ 'observation_shape': [4, 96, 96],
24
+ 'frame_stack_num': 4,
25
+ 'action_space_size': 9,
26
+ 'downsample': True,
27
+ 'continuous_action_space': False,
28
+ 'num_of_sampled_actions': 5,
29
+ 'discrete_action_encoding_type': 'one_hot',
30
+ 'norm_type': 'BN'
31
+ },
32
+ 'use_rnd_model': False,
33
+ 'sampled_algo': True,
34
+ 'gumbel_algo': False,
35
+ 'mcts_ctree': True,
36
+ 'collector_env_num': 8,
37
+ 'evaluator_env_num': 3,
38
+ 'env_type': 'not_board_games',
39
+ 'action_type': 'fixed_action_space',
40
+ 'battle_mode': 'play_with_bot_mode',
41
+ 'monitor_extra_statistics': True,
42
+ 'game_segment_length': 400,
43
+ 'transform2string': False,
44
+ 'gray_scale': False,
45
+ 'use_augmentation': True,
46
+ 'augmentation': ['shift', 'intensity'],
47
+ 'ignore_done': False,
48
+ 'update_per_collect': 1000,
49
+ 'model_update_ratio': 0.1,
50
+ 'batch_size': 256,
51
+ 'optim_type': 'SGD',
52
+ 'learning_rate': 0.2,
53
+ 'target_update_freq': 100,
54
+ 'target_update_freq_for_intrinsic_reward': 1000,
55
+ 'weight_decay': 0.0001,
56
+ 'momentum': 0.9,
57
+ 'grad_clip_value': 10,
58
+ 'n_episode': 8,
59
+ 'num_simulations': 50,
60
+ 'discount_factor': 0.997,
61
+ 'td_steps': 5,
62
+ 'num_unroll_steps': 5,
63
+ 'reward_loss_weight': 1,
64
+ 'value_loss_weight': 0.25,
65
+ 'policy_loss_weight': 1,
66
+ 'policy_entropy_loss_weight': 0,
67
+ 'ssl_loss_weight': 2,
68
+ 'lr_piecewise_constant_decay': True,
69
+ 'threshold_training_steps_for_final_lr': 50000,
70
+ 'manual_temperature_decay': False,
71
+ 'threshold_training_steps_for_final_temperature': 100000,
72
+ 'fixed_temperature_value': 0.25,
73
+ 'use_ture_chance_label_in_chance_encoder': False,
74
+ 'use_priority': True,
75
+ 'priority_prob_alpha': 0.6,
76
+ 'priority_prob_beta': 0.4,
77
+ 'root_dirichlet_alpha': 0.3,
78
+ 'root_noise_weight': 0.25,
79
+ 'random_collect_episode_num': 0,
80
+ 'eps': {
81
+ 'eps_greedy_exploration_in_collect': False,
82
+ 'type': 'linear',
83
+ 'start': 1.0,
84
+ 'end': 0.05,
85
+ 'decay': 100000
86
+ },
87
+ 'cfg_type': 'SampledEfficientZeroPolicyDict',
88
+ 'init_w': 0.003,
89
+ 'normalize_prob_of_sampled_actions': False,
90
+ 'policy_loss_type': 'cross_entropy',
91
+ 'lstm_horizon_len': 5,
92
+ 'cos_lr_scheduler': False,
93
+ 'reanalyze_ratio': 0.0,
94
+ 'eval_freq': 2000,
95
+ 'replay_buffer_size': 1000000
96
+ },
97
+ 'wandb_logger': {
98
+ 'gradient_logger': False,
99
+ 'video_logger': False,
100
+ 'plot_logger': False,
101
+ 'action_logger': False,
102
+ 'return_logger': False
103
+ }
104
+ },
105
+ 'create_config': {
106
+ 'env': {
107
+ 'type': 'atari_lightzero',
108
+ 'import_names': ['zoo.atari.envs.atari_lightzero_env']
109
+ },
110
+ 'env_manager': {
111
+ 'type': 'subprocess'
112
+ },
113
+ 'policy': {
114
+ 'type': 'sampled_efficientzero',
115
+ 'import_names': ['lzero.policy.sampled_efficientzero']
116
+ }
117
+ }
118
+ }