|
from easydict import EasyDict |
|
|
|
agent_num = 27 |
|
collector_env_num = 8 |
|
evaluator_env_num = 8 |
|
special_global_state = True |
|
|
|
main_config = dict( |
|
exp_name='smac_27m30m_mappo_seed0', |
|
env=dict( |
|
map_name='27m_vs_30m', |
|
difficulty=7, |
|
reward_only_positive=True, |
|
mirror_opponent=False, |
|
agent_num=agent_num, |
|
collector_env_num=collector_env_num, |
|
evaluator_env_num=evaluator_env_num, |
|
n_evaluator_episode=32, |
|
stop_value=0.99, |
|
death_mask=False, |
|
special_global_state=special_global_state, |
|
manager=dict( |
|
shared_memory=False, |
|
reset_timeout=6000, |
|
), |
|
), |
|
policy=dict( |
|
cuda=True, |
|
on_policy=True, |
|
multi_agent=True, |
|
continuous=False, |
|
model=dict( |
|
|
|
|
|
agent_num=agent_num, |
|
|
|
|
|
|
|
|
|
agent_obs_shape=348, |
|
global_obs_shape=1454, |
|
|
|
|
|
|
|
action_shape=36, |
|
|
|
|
|
|
|
actor_head_hidden_size=512, |
|
critic_head_hidden_size=1024, |
|
), |
|
|
|
learn=dict( |
|
epoch_per_collect=5, |
|
batch_size=3200, |
|
learning_rate=5e-4, |
|
|
|
|
|
|
|
|
|
value_weight=0.5, |
|
|
|
entropy_weight=0.01, |
|
|
|
clip_ratio=0.2, |
|
|
|
adv_norm=False, |
|
value_norm=True, |
|
ppo_param_init=True, |
|
grad_clip_type='clip_norm', |
|
grad_clip_value=10, |
|
ignore_done=False, |
|
), |
|
collect=dict(env_num=collector_env_num, n_sample=3200), |
|
eval=dict( |
|
evaluator=dict(eval_freq=100, ), |
|
env_num=evaluator_env_num, |
|
), |
|
), |
|
) |
|
main_config = EasyDict(main_config) |
|
create_config = dict( |
|
env=dict( |
|
type='smac', |
|
import_names=['dizoo.smac.envs.smac_env'], |
|
), |
|
env_manager=dict(type='subprocess'), |
|
policy=dict(type='ppo'), |
|
) |
|
create_config = EasyDict(create_config) |
|
|
|
if __name__ == '__main__': |
|
|
|
from ding.entry import serial_pipeline_onpolicy |
|
serial_pipeline_onpolicy((main_config, create_config), seed=0) |
|
|