File size: 19,412 Bytes
079c32c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 |
from typing import Optional, Tuple
import torch
import torch.nn as nn
from ding.utils import MODEL_REGISTRY, SequenceType
from .common import RepresentationNetworkMLP, PredictionNetworkMLP
from .muzero_model_mlp import DynamicsNetwork
from .stochastic_muzero_model import StochasticMuZeroModel, ChanceEncoder
from .utils import renormalize
@MODEL_REGISTRY.register('StochasticMuZeroModelMLP')
class StochasticMuZeroModelMLP(StochasticMuZeroModel):
def __init__(
self,
observation_shape: int = 2,
action_space_size: int = 6,
chance_space_size: int = 2,
latent_state_dim: int = 256,
fc_reward_layers: SequenceType = [32],
fc_value_layers: SequenceType = [32],
fc_policy_layers: SequenceType = [32],
reward_support_size: int = 601,
value_support_size: int = 601,
proj_hid: int = 1024,
proj_out: int = 1024,
pred_hid: int = 512,
pred_out: int = 1024,
self_supervised_learning_loss: bool = False,
categorical_distribution: bool = True,
activation: Optional[nn.Module] = nn.ReLU(inplace=True),
last_linear_layer_init_zero: bool = True,
state_norm: bool = False,
discrete_action_encoding_type: str = 'one_hot',
norm_type: Optional[str] = 'BN',
res_connection_in_dynamics: bool = False,
*args,
**kwargs
):
"""
Overview:
The definition of the network model of Stochastic, which is a generalization version for 1D vector obs. \
The networks are mainly built on fully connected layers. \
The representation network is an MLP network which maps the raw observation to a latent state. \
The dynamics network is an MLP network which predicts the next latent state, and reward given the current latent state and action. \
The prediction network is an MLP network which predicts the value and policy given the current latent state.
Arguments:
- observation_shape (:obj:`int`): Observation space shape, e.g. 8 for Lunarlander.
- action_space_size: (:obj:`int`): Action space size, usually an integer number for discrete action space.
- action_space_size: (:obj:`int`): Action space size, e.g. 4 for Lunarlander.
- latent_state_dim (:obj:`int`): The dimension of latent state, such as 256.
- fc_reward_layers (:obj:`SequenceType`): The number of hidden layers of the reward head (MLP head).
- fc_value_layers (:obj:`SequenceType`): The number of hidden layers used in value head (MLP head).
- fc_policy_layers (:obj:`SequenceType`): The number of hidden layers used in policy head (MLP head).
- reward_support_size (:obj:`int`): The size of categorical reward output
- value_support_size (:obj:`int`): The size of categorical value output.
- proj_hid (:obj:`int`): The size of projection hidden layer.
- proj_out (:obj:`int`): The size of projection output layer.
- pred_hid (:obj:`int`): The size of prediction hidden layer.
- pred_out (:obj:`int`): The size of prediction output layer.
- self_supervised_learning_loss (:obj:`bool`): Whether to use self_supervised_learning related networks in Stochastic model, default set it to False.
- categorical_distribution (:obj:`bool`): Whether to use discrete support to represent categorical distribution for value, reward/value_prefix.
- activation (:obj:`Optional[nn.Module]`): Activation function used in network, which often use in-place \
operation to speedup, e.g. ReLU(inplace=True).
- last_linear_layer_init_zero (:obj:`bool`): Whether to use zero initializations for the last layer of value/policy mlp, default sets it to True.
- state_norm (:obj:`bool`): Whether to use normalization for latent states, default sets it to True.
- discrete_action_encoding_type (:obj:`str`): The encoding type of discrete action, which can be 'one_hot' or 'not_one_hot'.
- norm_type (:obj:`str`): The type of normalization in networks. defaults to 'BN'.
- res_connection_in_dynamics (:obj:`bool`): Whether to use residual connection for dynamics network, default set it to False.
"""
super(StochasticMuZeroModelMLP, self).__init__()
self.categorical_distribution = categorical_distribution
if not self.categorical_distribution:
self.reward_support_size = 1
self.value_support_size = 1
else:
self.reward_support_size = reward_support_size
self.value_support_size = value_support_size
self.action_space_size = action_space_size
self.chance_space_size = chance_space_size
self.continuous_action_space = False
# The dim of action space. For discrete action space, it is 1.
# For continuous action space, it is the dimension of continuous action.
self.action_space_dim = action_space_size if self.continuous_action_space else 1
assert discrete_action_encoding_type in ['one_hot', 'not_one_hot'], discrete_action_encoding_type
self.discrete_action_encoding_type = discrete_action_encoding_type
if self.continuous_action_space:
self.action_encoding_dim = action_space_size
else:
if self.discrete_action_encoding_type == 'one_hot':
self.action_encoding_dim = action_space_size
elif self.discrete_action_encoding_type == 'not_one_hot':
self.action_encoding_dim = 1
self.latent_state_dim = latent_state_dim
self.proj_hid = proj_hid
self.proj_out = proj_out
self.pred_hid = pred_hid
self.pred_out = pred_out
self.self_supervised_learning_loss = self_supervised_learning_loss
self.last_linear_layer_init_zero = last_linear_layer_init_zero
self.state_norm = state_norm
self.res_connection_in_dynamics = res_connection_in_dynamics
self.representation_network = RepresentationNetworkMLP(
observation_shape=observation_shape, hidden_channels=self.latent_state_dim, norm_type=norm_type
)
# TODO(pu): different input data type for chance_encoder
# here, the input is two concatenated frames
self.chance_encoder = ChanceEncoder(observation_shape * 2, chance_space_size, encoder_backbone_type='mlp')
self.dynamics_network = DynamicsNetwork(
action_encoding_dim=self.action_encoding_dim,
num_channels=self.latent_state_dim + self.action_encoding_dim,
common_layer_num=2,
fc_reward_layers=fc_reward_layers,
output_support_size=self.reward_support_size,
last_linear_layer_init_zero=self.last_linear_layer_init_zero,
norm_type=norm_type,
res_connection_in_dynamics=self.res_connection_in_dynamics,
)
self.prediction_network = PredictionNetworkMLP(
action_space_size=action_space_size,
num_channels=latent_state_dim,
fc_value_layers=fc_value_layers,
fc_policy_layers=fc_policy_layers,
output_support_size=self.value_support_size,
last_linear_layer_init_zero=self.last_linear_layer_init_zero,
norm_type=norm_type
)
self.afterstate_dynamics_network = AfterstateDynamicsNetwork(
action_encoding_dim=self.action_encoding_dim,
num_channels=self.latent_state_dim + self.action_encoding_dim,
common_layer_num=2,
fc_reward_layers=fc_reward_layers,
output_support_size=self.reward_support_size,
last_linear_layer_init_zero=self.last_linear_layer_init_zero,
norm_type=norm_type,
res_connection_in_dynamics=self.res_connection_in_dynamics,
)
self.afterstate_prediction_network = AfterstatePredictionNetworkMLP(
chance_space_size=chance_space_size,
num_channels=latent_state_dim,
fc_value_layers=fc_value_layers,
fc_policy_layers=fc_policy_layers,
output_support_size=self.value_support_size,
last_linear_layer_init_zero=self.last_linear_layer_init_zero,
norm_type=norm_type
)
if self.self_supervised_learning_loss:
# self_supervised_learning_loss related network proposed in EfficientZero
self.projection_input_dim = latent_state_dim
self.projection = nn.Sequential(
nn.Linear(self.projection_input_dim, self.proj_hid), nn.BatchNorm1d(self.proj_hid), activation,
nn.Linear(self.proj_hid, self.proj_hid), nn.BatchNorm1d(self.proj_hid), activation,
nn.Linear(self.proj_hid, self.proj_out), nn.BatchNorm1d(self.proj_out)
)
self.prediction_head = nn.Sequential(
nn.Linear(self.proj_out, self.pred_hid),
nn.BatchNorm1d(self.pred_hid),
activation,
nn.Linear(self.pred_hid, self.pred_out),
)
def _dynamics(self, latent_state: torch.Tensor, action: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Overview:
Concatenate ``latent_state`` and ``action`` and use the dynamics network to predict ``next_latent_state`` \
``reward`` and ``next_reward_hidden_state``.
Arguments:
- latent_state (:obj:`torch.Tensor`): The encoding latent state of input state.
- reward_hidden_state (:obj:`Tuple[torch.Tensor]`): The input hidden state of LSTM about reward.
- action (:obj:`torch.Tensor`): The predicted action to rollout.
Returns:
- next_latent_state (:obj:`torch.Tensor`): The predicted latent state of the next timestep.
- next_reward_hidden_state (:obj:`Tuple[torch.Tensor]`): The output hidden state of LSTM about reward.
- reward (:obj:`torch.Tensor`): The predicted reward for input state.
Shapes:
- latent_state (:obj:`torch.Tensor`): :math:`(B, H)`, where B is batch_size, H is the dimension of latent state.
- action (:obj:`torch.Tensor`): :math:`(B, )`, where B is batch_size.
- next_latent_state (:obj:`torch.Tensor`): :math:`(B, H)`, where B is batch_size, H is the dimension of latent state.
- reward (:obj:`torch.Tensor`): :math:`(B, reward_support_size)`, where B is batch_size.
"""
# NOTE: the discrete action encoding type is important for some environments
# discrete action space
if self.discrete_action_encoding_type == 'one_hot':
# Stack latent_state with the one hot encoded action
if len(action.shape) == 1:
# (batch_size, ) -> (batch_size, 1)
# e.g., torch.Size([8]) -> torch.Size([8, 1])
action = action.unsqueeze(-1)
# transform action to one-hot encoding.
# action_one_hot shape: (batch_size, action_space_size), e.g., (8, 4)
action_one_hot = torch.zeros(action.shape[0], self.action_space_size, device=action.device)
# transform action to torch.int64
action = action.long()
action_one_hot.scatter_(1, action, 1)
action_encoding = action_one_hot
elif self.discrete_action_encoding_type == 'not_one_hot':
action_encoding = action / self.action_space_size
if len(action_encoding.shape) == 1:
# (batch_size, ) -> (batch_size, 1)
# e.g., torch.Size([8]) -> torch.Size([8, 1])
action_encoding = action_encoding.unsqueeze(-1)
action_encoding = action_encoding.to(latent_state.device).float()
# state_action_encoding shape: (batch_size, latent_state[1] + action_dim]) or
# (batch_size, latent_state[1] + action_space_size]) depending on the discrete_action_encoding_type.
state_action_encoding = torch.cat((latent_state, action_encoding), dim=1)
next_latent_state, reward = self.dynamics_network(state_action_encoding)
if not self.state_norm:
return next_latent_state, reward
else:
next_latent_state_normalized = renormalize(next_latent_state)
return next_latent_state_normalized, reward
def _afterstate_dynamics(self, latent_state: torch.Tensor, action: torch.Tensor) -> Tuple[
torch.Tensor, torch.Tensor]:
"""
Overview:
Concatenate ``latent_state`` and ``action`` and use the dynamics network to predict ``next_latent_state`` \
``reward`` and ``next_reward_hidden_state``.
Arguments:
- latent_state (:obj:`torch.Tensor`): The encoding latent state of input state.
- reward_hidden_state (:obj:`Tuple[torch.Tensor]`): The input hidden state of LSTM about reward.
- action (:obj:`torch.Tensor`): The predicted action to rollout.
Returns:
- next_latent_state (:obj:`torch.Tensor`): The predicted latent state of the next timestep.
- next_reward_hidden_state (:obj:`Tuple[torch.Tensor]`): The output hidden state of LSTM about reward.
- reward (:obj:`torch.Tensor`): The predicted reward for input state.
Shapes:
- latent_state (:obj:`torch.Tensor`): :math:`(B, H)`, where B is batch_size, H is the dimension of latent state.
- action (:obj:`torch.Tensor`): :math:`(B, )`, where B is batch_size.
- next_latent_state (:obj:`torch.Tensor`): :math:`(B, H)`, where B is batch_size, H is the dimension of latent state.
- reward (:obj:`torch.Tensor`): :math:`(B, reward_support_size)`, where B is batch_size.
"""
# NOTE: the discrete action encoding type is important for some environments
# discrete action space
if self.discrete_action_encoding_type == 'one_hot':
# Stack latent_state with the one hot encoded action
if len(action.shape) == 1:
# (batch_size, ) -> (batch_size, 1)
# e.g., torch.Size([8]) -> torch.Size([8, 1])
action = action.unsqueeze(-1)
# transform action to one-hot encoding.
# action_one_hot shape: (batch_size, action_space_size), e.g., (8, 4)
action_one_hot = torch.zeros(action.shape[0], self.action_space_size, device=action.device)
# transform action to torch.int64
action = action.long()
action_one_hot.scatter_(1, action, 1)
action_encoding = action_one_hot
elif self.discrete_action_encoding_type == 'not_one_hot':
action_encoding = action / self.action_space_size
if len(action_encoding.shape) == 1:
# (batch_size, ) -> (batch_size, 1)
# e.g., torch.Size([8]) -> torch.Size([8, 1])
action_encoding = action_encoding.unsqueeze(-1)
action_encoding = action_encoding.to(latent_state.device).float()
# state_action_encoding shape: (batch_size, latent_state[1] + action_dim]) or
# (batch_size, latent_state[1] + action_space_size]) depending on the discrete_action_encoding_type.
state_action_encoding = torch.cat((latent_state, action_encoding), dim=1)
next_latent_state, reward = self.dynamics_network(state_action_encoding)
if not self.state_norm:
return next_latent_state, reward
else:
next_latent_state_normalized = renormalize(next_latent_state)
return next_latent_state_normalized, reward
def project(self, latent_state: torch.Tensor, with_grad=True) -> torch.Tensor:
"""
Overview:
Project the latent state to a lower dimension to calculate the self-supervised loss, which is \
proposed in EfficientZero. For more details, please refer to the paper ``Exploring Simple Siamese Representation Learning``.
Arguments:
- latent_state (:obj:`torch.Tensor`): The encoding latent state of input state.
- with_grad (:obj:`bool`): Whether to calculate gradient for the projection result.
Returns:
- proj (:obj:`torch.Tensor`): The result embedding vector of projection operation.
Shapes:
- latent_state (:obj:`torch.Tensor`): :math:`(B, H)`, where B is batch_size, H is the dimension of latent state.
- proj (:obj:`torch.Tensor`): :math:`(B, projection_output_dim)`, where B is batch_size.
Examples:
>>> latent_state = torch.randn(256, 64)
>>> output = self.project(latent_state)
>>> output.shape # (256, 1024)
"""
proj = self.projection(latent_state)
if with_grad:
# with grad, use prediction_head
return self.prediction_head(proj)
else:
return proj.detach()
AfterstateDynamicsNetwork = DynamicsNetwork
class AfterstatePredictionNetworkMLP(PredictionNetworkMLP):
def __init__(
self,
chance_space_size,
num_channels,
common_layer_num: int = 2,
fc_value_layers: SequenceType = [32],
fc_policy_layers: SequenceType = [32],
output_support_size: int = 601,
last_linear_layer_init_zero: bool = True,
activation: Optional[nn.Module] = nn.ReLU(inplace=True),
norm_type: Optional[str] = 'BN',
):
"""
Overview:
The definition of policy and value prediction network with Multi-Layer Perceptron (MLP), \
which is used to predict value and policy by the given latent state.
Arguments:
- chance_space_size: (:obj:`int`): Chance space size, usually an integer number. For discrete action \
space, it is the number of discrete chance outcomes.
- num_channels (:obj:`int`): The channels of latent states.
- fc_value_layers (:obj:`SequenceType`): The number of hidden layers used in value head (MLP head).
- fc_policy_layers (:obj:`SequenceType`): The number of hidden layers used in policy head (MLP head).
- output_support_size (:obj:`int`): The size of categorical value output.
- last_linear_layer_init_zero (:obj:`bool`): Whether to use zero initializations for the last layer of \
dynamics/prediction mlp, default sets it to True.
- activation (:obj:`Optional[nn.Module]`): Activation function used in network, which often use in-place \
operation to speedup, e.g. ReLU(inplace=True).
- norm_type (:obj:`str`): The type of normalization in networks. defaults to 'BN'.
"""
super(AfterstatePredictionNetworkMLP, self).__init__(chance_space_size, num_channels, common_layer_num,
fc_value_layers, fc_policy_layers, output_support_size,
last_linear_layer_init_zero
, activation, norm_type)
|