from easydict import EasyDict agent_num = 10 collector_env_num = 16 evaluator_env_num = 8 main_config = dict( exp_name='smac_MMM_coma_seed0', env=dict( map_name='MMM', difficulty=7, reward_only_positive=True, mirror_opponent=False, agent_num=agent_num, collector_env_num=collector_env_num, evaluator_env_num=evaluator_env_num, stop_value=0.999, n_evaluator_episode=32, manager=dict( shared_memory=False, reset_timeout=6000, ), ), policy=dict( model=dict( # (int) agent_num: The number of the agent. # For SMAC 3s5z, agent_num=8; for 2c_vs_64zg, agent_num=2. agent_num=agent_num, # (int) obs_shape: The shapeension of observation of each agent. # For 3s5z, obs_shape=150; for 2c_vs_64zg, agent_num=404. # (int) global_obs_shape: The shapeension of global observation. # For 3s5z, obs_shape=216; for 2c_vs_64zg, agent_num=342. obs_shape=dict( agent_state=186, global_state=290, ), # (int) action_shape: The number of action which each agent can take. # action_shape= the number of common action (6) + the number of enemies. # For 3s5z, obs_shape=14 (6+8); for 2c_vs_64zg, agent_num=70 (6+64). action_shape=16, # (List[int]) The size of hidden layer actor_hidden_size_list=[64], ), # used in state_num of hidden_state collect=dict( n_episode=32, unroll_len=10, env_num=collector_env_num, ), eval=dict(env_num=evaluator_env_num, evaluator=dict(eval_freq=100, )), other=dict( eps=dict( type='exp', start=0.5, end=0.01, decay=200000, ), replay_buffer=dict( # (int) max size of replay buffer replay_buffer_size=5000, # (int) max use count of data, if count is bigger than this value, the data will be removed from buffer max_use=10, ), ), ), ) main_config = EasyDict(main_config) create_config = dict( env=dict( type='smac', import_names=['dizoo.smac.envs.smac_env'], ), env_manager=dict(type='subprocess'), policy=dict(type='coma'), collector=dict(type='episode', get_train_sample=True), ) create_config = EasyDict(create_config) if __name__ == '__main__': from ding.entry import serial_pipeline serial_pipeline((main_config, create_config), seed=0)