diff --git a/baselines/lompo/__init__.py b/baselines/lompo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/baselines/lompo/buffer.py b/baselines/lompo/buffer.py new file mode 100644 index 0000000000000000000000000000000000000000..177f952c91d924fa5e18dc097024f7f5cc028b34 --- /dev/null +++ b/baselines/lompo/buffer.py @@ -0,0 +1,126 @@ +import h5py +import numpy as np + + +class Buffer(): + def __init__(self): + self._obs = None + self._actions = None + self._rewards = None + self._next_obs = None + self._terminals = None + + +class LatentReplayBuffer(object): + def __init__(self, + real_size: int, + latent_size: int, + obs_dim: int, + action_dim: int, + immutable: bool = False, + load_from: str = None, + silent: bool = False, + seed: int = 0): + + self.immutable = immutable + + self.buffers = dict() + self.sizes = {'real': real_size, 'latent': latent_size} + for key in ['real', 'latent']: + self.buffers[key] = Buffer() + self.buffers[key]._obs = np.full((self.sizes[key], obs_dim), float('nan'), dtype=np.float32) + self.buffers[key]._actions = np.full((self.sizes[key], action_dim), float('nan'), dtype=np.float32) + self.buffers[key]._rewards = np.full((self.sizes[key], 1), float('nan'), dtype=np.float32) + self.buffers[key]._next_obs = np.full((self.sizes[key], obs_dim), float('nan'), dtype=np.float32) + self.buffers[key]._terminals = np.full((self.sizes[key], 1), float('nan'), dtype=np.float32) + + self._real_stored_steps = 0 + self._real_write_location = 0 + + self._latent_stored_steps = 0 + self._latent_write_location = 0 + + self._stored_steps = 0 + self._random = np.random.RandomState(seed) + + @property + def obs_dim(self): + return self._obs.shape[-1] + + @property + def action_dim(self): + return self._actions.shape[-1] + + def __len__(self): + return self._stored_steps + + def save(self, location: str): + f = h5py.File(location, 'w') + f.create_dataset('obs', data=self.buffers['real']._obs[:self._real_stored_steps], compression='lzf') + f.create_dataset('actions', data=self.buffers['real']._actions[:self._real_stored_steps], compression='lzf') + f.create_dataset('rewards', data=self.buffers['real']._rewards[:self._real_stored_steps], compression='lzf') + f.create_dataset('next_obs', data=self.buffers['real']._next_obs[:self._real_stored_steps], compression='lzf') + f.create_dataset('terminals', data=self.buffers['real']._terminals[:self._real_stored_steps], compression='lzf') + f.close() + + def load(self, location: str): + with h5py.File(location, "r") as f: + obs = np.array(f['obs']) + self._real_stored_steps = obs.shape[0] + self._real_write_location = obs.shape[0] % self.sizes['real'] + + self.buffers['real']._obs[:self._real_stored_steps] = np.array(f['obs']) + self.buffers['real']._actions[:self._real_stored_steps] = np.array(f['actions']) + self.buffers['real']._rewards[:self._real_stored_steps] = np.array(f['rewards']) + self.buffers['real']._next_obs[:self._real_stored_steps] = np.array(f['next_obs']) + self.buffers['real']._terminals[:self._real_stored_steps] = np.array(f['terminals']) + + def add_samples(self, obs_feats, actions, next_obs_feats, rewards, terminals, sample_type='latent'): + if sample_type == 'real': + for obsi, actsi, nobsi, rewi, termi in zip(obs_feats, actions, next_obs_feats, rewards, terminals): + self.buffers['real']._obs[self._real_write_location] = obsi + self.buffers['real']._actions[self._real_write_location] = actsi + self.buffers['real']._next_obs[self._real_write_location] = nobsi + self.buffers['real']._rewards[self._real_write_location] = rewi + self.buffers['real']._terminals[self._real_write_location] = termi + + self._real_write_location = (self._real_write_location + 1) % self.sizes['real'] + self._real_stored_steps = min(self._real_stored_steps + 1, self.sizes['real']) + + else: + for obsi, actsi, nobsi, rewi, termi in zip(obs_feats, actions, next_obs_feats, rewards, terminals): + self.buffers['latent']._obs[self._latent_write_location] = obsi + self.buffers['latent']._actions[self._latent_write_location] = actsi + self.buffers['latent']._next_obs[self._latent_write_location] = nobsi + self.buffers['latent']._rewards[self._latent_write_location] = rewi + self.buffers['latent']._terminals[self._latent_write_location] = termi + + self._latent_write_location = (self._latent_write_location + 1) % self.sizes['latent'] + self._latent_stored_steps = min(self._latent_stored_steps + 1, self.sizes['latent']) + + self._stored_steps = self._real_stored_steps + self._latent_stored_steps + + def sample(self, batch_size, return_dict: bool = False): + real_idxs = self._random.choice(self._real_stored_steps, batch_size) + latent_idxs = self._random.choice(self._latent_stored_steps, batch_size) + + obs = np.concatenate([self.buffers['real']._obs[real_idxs], + self.buffers['latent']._obs[latent_idxs]], axis=0) + actions = np.concatenate([self.buffers['real']._actions[real_idxs], + self.buffers['latent']._actions[latent_idxs]], axis=0) + next_obs = np.concatenate([self.buffers['real']._next_obs[real_idxs], + self.buffers['latent']._next_obs[latent_idxs]], axis=0) + rewards = np.concatenate([self.buffers['real']._rewards[real_idxs], + self.buffers['latent']._rewards[latent_idxs]], axis=0) + terminals = np.concatenate([self.buffers['real']._terminals[real_idxs], + self.buffers['latent']._terminals[latent_idxs]], axis=0) + + data = { + 'obs': obs, + 'actions': actions, + 'next_obs': next_obs, + 'rewards': rewards, + 'terminals': terminals + } + + return data diff --git a/baselines/lompo/models.py b/baselines/lompo/models.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5c272cc112f19091ef41ba2843b24bda2b61fd --- /dev/null +++ b/baselines/lompo/models.py @@ -0,0 +1,250 @@ +import numpy as np +import tensorflow as tf +from tensorflow.keras import layers as tfkl +from tensorflow_probability import distributions as tfd +from tensorflow.keras.mixed_precision import experimental as prec + +from . import tools + + +class RSSME(tools.Module): + def __init__(self, stoch=30, deter=200, hidden=200, num_models=7, act=tf.nn.elu): + super().__init__() + self._activation = act + self._stoch_size = stoch + self._deter_size = deter + self._hidden_size = hidden + self._cell = tfkl.GRUCell(self._deter_size) + self._k = num_models + + def initial(self, batch_size): + dtype = prec.global_policy().compute_dtype + return dict( + mean=tf.zeros([batch_size, self._stoch_size], dtype), + std=tf.zeros([batch_size, self._stoch_size], dtype), + stoch=tf.zeros([batch_size, self._stoch_size], dtype), + deter=self._cell.get_initial_state(None, batch_size, dtype)) + + def observe(self, embed, action, state=None): + if state is None: + state = self.initial(tf.shape(action)[0]) + embed = tf.transpose(embed, [1, 0, 2]) + action = tf.transpose(action, [1, 0, 2]) + post, prior = tools.static_scan(lambda prev, inputs: self.obs_step(prev[0], *inputs), + (action, embed), (state, state)) + post = {k: tf.transpose(v, [1, 0, 2]) for k, v in post.items()} + prior = {k: tf.transpose(v, [1, 0, 2]) for k, v in prior.items()} + return post, prior + + def imagine(self, action, state=None): + if state is None: + state = self.initial(tf.shape(action)[0]) + assert isinstance(state, dict), state + action = tf.transpose(action, [1, 0, 2]) + prior = tools.static_scan(self.img_step, action, state) + prior = {k: tf.transpose(v, [1, 0, 2]) for k, v in prior.items()} + return prior + + def get_feat(self, state): + return tf.concat([state['stoch'], state['deter']], -1) + + def get_feat_size(self, state): + return self._stoch_size + self._deter_size + + def get_dist(self, state): + return tfd.MultivariateNormalDiag(state['mean'], state['std']) + + def obs_step(self, prev_state, prev_action, embed): + prior = self.img_step(prev_state, prev_action) + x = tf.concat([prior['deter'], embed], -1) + x = self.get('obs1', tfkl.Dense, self._hidden_size, self._activation)(x) + x = self.get('obs2', tfkl.Dense, 2 * self._stoch_size, None)(x) + mean, std = tf.split(x, 2, -1) + std = tf.nn.softplus(std) + 0.1 + stoch = self.get_dist({'mean': mean, 'std': std}).sample() + post = {'mean': mean, 'std': std, 'stoch': stoch, 'deter': prior['deter']} + return post, prior + + def img_step(self, prev_state, prev_action, k=None): + if k is None: + k = np.random.choice(self._k) + x = tf.concat([prev_state['stoch'], prev_action], -1) + x = self.get('img1', tfkl.Dense, self._hidden_size, self._activation)(x) + x, deter = self._cell(x, [prev_state['deter']]) + deter = deter[0] # Keras wraps the state in a list. + x = self.get('img2_{}'.format(k), tfkl.Dense, self._hidden_size, self._activation)(x) + x = self.get('img3_{}'.format(k), tfkl.Dense, 2 * self._stoch_size, None)(x) + mean, std = tf.split(x, 2, -1) + std = tf.nn.softplus(std) + 0.1 + stoch = self.get_dist({'mean': mean, 'std': std}).sample() + prior = {'mean': mean, 'std': std, 'stoch': stoch, 'deter': deter} + return prior + + +class MultivariateNormalDiag(tools.Module): + def __init__(self, hidden_size, latent_size, scale=None): + super().__init__() + self.latent_size = latent_size + self.scale = scale + self.dense1 = tf.keras.layers.Dense(hidden_size, activation=tf.nn.leaky_relu) + self.dense2 = tf.keras.layers.Dense(hidden_size, activation=tf.nn.leaky_relu) + self.output_layer = tf.keras.layers.Dense(2 * latent_size if self.scale + is None else latent_size) + + def __call__(self, *inputs): + if len(inputs) > 1: + inputs = tf.concat(inputs, axis=-1) + else: + inputs, = inputs + out = self.dense1(inputs) + out = self.dense2(out) + out = self.output_layer(out) + loc = out[..., :self.latent_size] + if self.scale is None: + assert out.shape[-1] == 2 * self.latent_size + scale_diag = tf.nn.softplus(out[..., self.latent_size:]) + 1e-5 + else: + assert out.shape[-1].value == self.latent_size + scale_diag = tf.ones_like(loc) * self.scale + return loc, scale_diag + + +class ConstantMultivariateNormalDiag(tools.Module): + def __init__(self, latent_size, scale=None): + super().__init__() + self.latent_size = latent_size + self.scale = scale + + def __call__(self, *inputs): + # first input should not have any dimensions after the batch_shape, step_type + batch_shape = tf.shape(inputs[0]) # input is only used to infer batch_shape + shape = tf.concat([batch_shape, [self.latent_size]], axis=0) + loc = tf.zeros(shape) + if self.scale is None: + scale_diag = tf.ones(shape) + else: + scale_diag = tf.ones(shape) * self.scale + return loc, scale_diag + + +class ConvEncoderLarge(tools.Module): + def __init__(self, depth=32, act=tf.nn.relu): + self._act = act + self._depth = depth + + def __call__(self, obs): + kwargs = dict(strides=2, activation=self._act) + x = tf.reshape(obs['image'], (-1,) + tuple(obs['image'].shape[-3:])) + x = self.get('h1', tfkl.Conv2D, 1 * self._depth, 4, **kwargs)(x) + x = self.get('h2', tfkl.Conv2D, 2 * self._depth, 4, **kwargs)(x) + x = self.get('h3', tfkl.Conv2D, 4 * self._depth, 4, **kwargs)(x) + x = self.get('h4', tfkl.Conv2D, 8 * self._depth, 4, **kwargs)(x) + x = self.get('h5', tfkl.Conv2D, 8 * self._depth, 4, **kwargs)(x) + shape = tf.concat([tf.shape(obs['image'])[:-3], [32 * self._depth]], 0) + return tf.reshape(x, shape) + + +class ConvDecoderLarge(tools.Module): + def __init__(self, depth=32, act=tf.nn.relu, shape=(128, 128, 3)): + self._act = act + self._depth = depth + self._shape = shape + + def __call__(self, features): + kwargs = dict(strides=2, activation=self._act) + x = self.get('h1', tfkl.Dense, 32 * self._depth, None)(features) + x = tf.reshape(x, [-1, 1, 1, 32 * self._depth]) + x = self.get('h2', tfkl.Conv2DTranspose, 4 * self._depth, 5, **kwargs)(x) + x = self.get('h3', tfkl.Conv2DTranspose, 2 * self._depth, 5, **kwargs)(x) + x = self.get('h4', tfkl.Conv2DTranspose, 1 * self._depth, 5, **kwargs)(x) + x = self.get('h5', tfkl.Conv2DTranspose, 1 * self._depth, 6, **kwargs)(x) + x = self.get('h6', tfkl.Conv2DTranspose, self._shape[-1], 6, strides=2)(x) + mean = tf.reshape(x, tf.concat([tf.shape(features)[:-1], self._shape], 0)) + return tfd.Independent(tfd.Normal(mean, 1), len(self._shape)) + + +class ConvEncoder(tools.Module): + def __init__(self, depth=32, act=tf.nn.relu): + self._act = act + self._depth = depth + + def __call__(self, obs): + kwargs = dict(strides=2, activation=self._act) + x = tf.reshape(obs['image'], (-1,) + tuple(obs['image'].shape[-3:])) + x = self.get('h1', tfkl.Conv2D, 1 * self._depth, 4, **kwargs)(x) + x = self.get('h2', tfkl.Conv2D, 2 * self._depth, 4, **kwargs)(x) + x = self.get('h3', tfkl.Conv2D, 4 * self._depth, 4, **kwargs)(x) + x = self.get('h4', tfkl.Conv2D, 8 * self._depth, 4, **kwargs)(x) + shape = tf.concat([tf.shape(obs['image'])[:-3], [32 * self._depth]], 0) + return tf.reshape(x, shape) + + +class ConvDecoder(tools.Module): + def __init__(self, depth=32, act=tf.nn.relu, shape=(64, 64, 3)): + self._act = act + self._depth = depth + self._shape = shape + + def __call__(self, features): + kwargs = dict(strides=2, activation=self._act) + x = self.get('h1', tfkl.Dense, 32 * self._depth, None)(features) + x = tf.reshape(x, [-1, 1, 1, 32 * self._depth]) + x = self.get('h2', tfkl.Conv2DTranspose, 4 * self._depth, 5, **kwargs)(x) + x = self.get('h3', tfkl.Conv2DTranspose, 2 * self._depth, 5, **kwargs)(x) + x = self.get('h4', tfkl.Conv2DTranspose, 1 * self._depth, 6, **kwargs)(x) + x = self.get('h5', tfkl.Conv2DTranspose, self._shape[-1], 6, strides=2)(x) + mean = tf.reshape(x, tf.concat([tf.shape(features)[:-1], self._shape], 0)) + return tfd.Independent(tfd.Normal(mean, 1), len(self._shape)) + + +class DenseDecoder(tools.Module): + def __init__(self, shape, layers, units, dist='normal', act=tf.nn.elu): + self._shape = shape + self._layers = layers + self._units = units + self._dist = dist + self._act = act + + def __call__(self, features): + x = features + for index in range(self._layers): + x = self.get(f'h{index}', tfkl.Dense, self._units, self._act)(x) + x = self.get(f'hout', tfkl.Dense, np.prod(self._shape))(x) + x = tf.reshape(x, tf.concat([tf.shape(features)[:-1], self._shape], 0)) + if self._dist == 'normal': + return tfd.Independent(tfd.Normal(x, 1), len(self._shape)) + if self._dist == 'binary': + return tfd.Independent(tfd.Bernoulli(x), len(self._shape)) + raise NotImplementedError(self._dist) + + +class DenseNetwork(tools.Module): + def __init__(self, shape, layers, units, act=tf.nn.elu): + self._shape = shape + self._layers = layers + self._units = units + self._act = act + + def __call__(self, features): + x = features + for index in range(self._layers): + x = self.get(f'h{index}', tfkl.Dense, self._units, self._act)(x) + x = self.get(f'hout', tfkl.Dense, self._shape)(x) + return x + + +class ActorNetwork(tools.Module): + def __init__(self, shape, layers, units, act=tf.nn.elu, mean_scale=1.0): + self._shape = shape + self._layers = layers + self._units = units + self._act = act + self._mean_scale = mean_scale + + def __call__(self, features): + x = features + for index in range(self._layers): + x = self.get(f'h{index}', tfkl.Dense, self._units, self._act)(x) + x = self.get(f'hout', tfkl.Dense, self._shape)(x) + x = self._mean_scale * tf.tanh(x) + return x diff --git a/baselines/lompo/run.py b/baselines/lompo/run.py new file mode 100644 index 0000000000000000000000000000000000000000..6981788804b620d3ec94b55dafb1507991820f3c --- /dev/null +++ b/baselines/lompo/run.py @@ -0,0 +1,723 @@ +import argparse +from copy import deepcopy +import functools +import os +import pathlib + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + +import numpy as np +import random +import tensorflow as tf +from tensorflow.keras.mixed_precision import experimental as prec + +tf.get_logger().setLevel('ERROR') + +from tensorflow_probability import distributions as tfd + +from baselines.lompo import buffer +from baselines.lompo import models +from baselines.lompo import tools +from baselines.lompo import wrappers + + +def define_config(): + config = tools.AttrDict() + # General. + config.logdir = pathlib.Path('.logdir') + config.loaddir = pathlib.Path('.logdir') + config.datadir = pathlib.Path('.datadir/walker') + config.seed = 0 + config.log_every = 1000 + config.save_every = 5000 + config.log_scalars = True + config.log_images = True + config.gpu_growth = True + + # Environment. + config.task = 'dmc_walker_walk' + config.envs = 1 + config.parallel = 'none' + config.action_repeat = 2 + config.time_limit = 1000 + config.im_size = 64 + config.eval_noise = 0.0 + config.clip_rewards = 'none' + config.precision = 32 + + # Model. + config.deter_size = 256 + config.stoch_size = 64 + config.num_models = 7 + config.num_units = 256 + config.proprio = False + config.penalty_type = 'log_prob' + config.dense_act = 'elu' + config.cnn_act = 'relu' + config.cnn_depth = 32 + config.pcont = False + config.kl_scale = 1.0 + config.pcont_scale = 10.0 + config.weight_decay = 0.0 + config.weight_decay_pattern = r'.*' + + # Training. + config.load_model = False + config.load_agent = False + config.load_buffer = False + config.train_steps = 100000 + config.model_train_steps = 25000 + config.model_batch_size = 64 + config.model_batch_length = 50 + config.agent_batch_size = 256 + config.cql_samples = 16 + config.start_training = 50000 + config.agent_train_steps = 150000 + config.agent_iters_per_step = 200 + config.buffer_size = 2e6 + config.model_lr = 6e-4 + config.q_lr = 3e-4 + config.actor_lr = 3e-4 + config.grad_clip = 100.0 + config.tau = 5e-3 + config.target_update_interval = 1 + config.dataset_balance = False + + # Behavior. + config.lmbd = 5.0 + config.alpha = 0.0 + config.sample = True + config.discount = 0.99 + config.disclam = 0.95 + config.horizon = 5 + config.done_treshold = 0.5 + config.action_dist = 'tanh_normal' + config.action_init_std = 5.0 + config.expl = 'additive_gaussian' + config.expl_amount = 0.2 + config.expl_decay = 0.0 + config.expl_min = 0.0 + return config + + +class Lompo(tools.Module): + + def __init__(self, config, datadir, actspace, writer): + self._c = config + self._actspace = actspace + self._actdim = actspace.n if hasattr(actspace, 'n') else actspace.shape[0] + episodes, steps = tools.count_episodes(datadir) + self.latent_buffer = buffer.LatentReplayBuffer(steps, + steps, + self._c.deter_size + self._c.stoch_size, + self._actdim) + self.lmbd = config.lmbd + self.alpha = config.alpha + + self._writer = writer + tf.summary.experimental.set_step(0) + self._metrics = dict() + + self._agent_step = 0 + self._model_step = 0 + + self._random = np.random.RandomState(config.seed) + self._float = prec.global_policy().compute_dtype + self._dataset = iter(load_dataset(datadir, self._c)) + self._episode_iterator = episode_iterator(datadir, self._c) + + self._build_model() + for _ in range(10): + self._model_train_step(next(self._dataset), prefix='eval') + + def __call__(self, obs, reset, state=None, training=True): + if state is not None and reset.any(): + mask = tf.cast(1 - reset, self._float)[:, None] + state = tf.nest.map_structure(lambda x: x * mask, state) + action, state = self.policy(obs, state, training) + return action, state + + def load(self, filename): + try: + self.load_model(filename) + except: + pass + + try: + self.load_agent(filename) + except: + pass + + def save(self, filename): + self.save_model(filename) + self.save_agentl(filename) + + def load_model(self, filename): + self._encode.load(filename / 'encode.pkl') + self._dynamics.load(filename / 'dynamic.pkl') + self._decode.load(filename / 'decode.pkl') + self._reward.load(filename / 'reward.pkl') + if self._c.pcont: + self._pcont.load(filename / 'pcont.pkl') + if self._c.proprio: + self._proprio.load(filename / 'proprio.pkl') + + def save_model(self, filename): + filename.mkdir(parents=True, exist_ok=True) + self._encode.save(filename / 'encode.pkl') + self._dynamics.save(filename / 'dynamic.pkl') + self._decode.save(filename / 'decode.pkl') + self._reward.save(filename / 'reward.pkl') + if self._c.pcont: + self._pcont.save(filename / 'pcont.pkl') + if self._c.proprio: + self._proprio.save(filename / 'proprio.pkl') + + def load_agent(self, filename): + self._qf1.load(filename / 'qf1.pkl') + self._qf2.load(filename / 'qf2.pkl') + self._target_qf1.load(filename / 'target_qf1.pkl') + self._target_qf2.load(filename / 'target_qf2.pkl') + self._actor.load(filename / 'actor.pkl') + + def save_agent(self, filename): + filename.mkdir(parents=True, exist_ok=True) + self._qf1.save(filename / 'qf1.pkl') + self._qf2.save(filename / 'qf2.pkl') + self._target_qf1.save(filename / 'target_qf1.pkl') + self._target_qf2.save(filename / 'target_qf2.pkl') + self._actor.save(filename / 'actor.pkl') + + def policy(self, obs, state, training): + if state is None: + latent = self._dynamics.initial(len(obs['image'])) + action = tf.zeros((len(obs['image']), self._actdim), self._float) + else: + latent, action = state + + embed = self._encode(preprocess_raw(obs, self._c)) + latent, _ = self._dynamics.obs_step(latent, action, embed) + feat = self._dynamics.get_feat(latent) + action = self._exploration(self._actor(feat), training) + state = (latent, action) + return action, state + + def _build_model(self): + acts = dict(elu=tf.nn.elu, relu=tf.nn.relu, + swish=tf.nn.swish, leaky_relu=tf.nn.leaky_relu) + cnn_act = acts[self._c.cnn_act] + act = acts[self._c.dense_act] + + # Create encoder based on environment observations + if self._c.proprio: + if self._c.im_size == 64: + self._encode = models.ConvEncoderProprio(self._c.cnn_depth, cnn_act) + else: + self._encode = models.ConvEncoderProprioLarge(self._c.cnn_depth, cnn_act) + else: + if self._c.im_size == 64: + self._encode = models.ConvEncoder(self._c.cnn_depth, cnn_act) + else: + self._encode = models.ConvEncoderLarge(self._c.cnn_depth, cnn_act) + # RSSM model with ensembles + self._dynamics = models.RSSME(self._c.stoch_size, self._c.deter_size, + self._c.deter_size, num_models=self._c.num_models) + # Create decoder based on image size + if self._c.im_size == 64: + self._decode = models.ConvDecoder(self._c.cnn_depth, cnn_act, + shape=(self._c.im_size, self._c.im_size, 3)) + else: + self._decode = models.ConvDecoderLarge(self._c.cnn_depth, cnn_act, + shape=(self._c.im_size, self._c.im_size, 3)) + if self._c.proprio: + self._proprio = models.DenseDecoder((self._propriodim,), 3, self._c.num_units, act=act) + if self._c.pcont: + self._pcont = models.DenseDecoder((), 3, self._c.num_units, 'binary', act=act) + self._reward = models.DenseDecoder((), 2, self._c.num_units, act=act) + + model_modules = [self._encode, self._dynamics, self._decode, self._reward] + if self._c.proprio: + model_modules.append(self._proprio) + if self._c.pcont: + model_modules.append(self._pcont) + + # Build actor-critic networks + self._qf1 = models.DenseNetwork(1, 3, self._c.num_units, act=act) + self._qf2 = models.DenseNetwork(1, 3, self._c.num_units, act=act) + self._target_qf1 = deepcopy(self._qf2) + self._target_qf2 = deepcopy(self._qf1) + self._qf_criterion = tf.keras.losses.Huber() + self._actor = models.ActorNetwork(self._actdim, 4, self._c.num_units, act=act) + + # Initialize optimizers + Optimizer = functools.partial(tools.Adam, + wd=self._c.weight_decay, + clip=self._c.grad_clip, + wdpattern=self._c.weight_decay_pattern) + + self._model_opt = Optimizer('model', model_modules, self._c.model_lr) + self._qf_opt = Optimizer('qf', [self._qf1, self._qf2], self._c.q_lr) + self._actor_opt = Optimizer('actor', [self._actor], self._c.actor_lr) + + def _exploration(self, action, training): + if training: + amount = self._c.expl_amount + if self._c.expl_decay: + amount *= 0.5 ** (tf.cast(self._agent_step, tf.float32) / self._c.expl_decay) + if self._c.expl_min: + amount = tf.maximum(self._c.expl_min, amount) + self._metrics['expl_amount'] = amount + elif self._c.eval_noise: + amount = self._c.eval_noise + else: + return action + if self._c.expl == 'additive_gaussian': + return tf.clip_by_value(tfd.Normal(action, amount).sample(), -1, 1) + if self._c.expl == 'completely_random': + return tf.random.uniform(action.shape, -1, 1) + raise NotImplementedError(self._c.expl) + + def fit_model(self, iters): + for iter in range(iters): + data = next(self._dataset) + self._model_train_step(data) + if iter % self._c.save_every == 0: + self.save_model(self._c.logdir / 'model_step_{}'.format(iter)) + self.save_model(self._c.logdir / 'final_model') + + def _model_train_step(self, data, prefix='train'): + with tf.GradientTape() as model_tape: + embed = self._encode(data) + post, prior = self._dynamics.observe(embed, data['action']) + feat = self._dynamics.get_feat(post) + image_pred = self._decode(feat) + reward_pred = self._reward(feat) + likes = tools.AttrDict() + likes.image = tf.reduce_mean(tf.boolean_mask(image_pred.log_prob(data['image']), + data['mask'])) + likes.reward = tf.reduce_mean(tf.boolean_mask(reward_pred.log_prob(data['reward']), + data['mask'])) + if self._c.pcont: + pcont_pred = self._pcont(feat) + pcont_target = data['terminal'] + likes.pcont = tf.reduce_mean(tf.boolean_mask(pcont_pred.log_prob(pcont_target), + data['mask'])) + likes.pcont *= self._c.pcont_scale + + for key in prior.keys(): + prior[key] = tf.boolean_mask(prior[key], data['mask']) + post[key] = tf.boolean_mask(post[key], data['mask']) + + prior_dist = self._dynamics.get_dist(prior) + post_dist = self._dynamics.get_dist(post) + div = tf.reduce_mean(tfd.kl_divergence(post_dist, prior_dist)) + model_loss = self._c.kl_scale * div - sum(likes.values()) + + if prefix == 'train': + model_norm = self._model_opt(model_tape, model_loss) + self._model_step += 1 + + if self._model_step % self._c.log_every == 0: + self._image_summaries(data, embed, image_pred, self._model_step, prefix) + model_summaries = dict() + model_summaries['model_train/KL Divergence'] = tf.reduce_mean(div) + model_summaries['model_train/image_recon'] = tf.reduce_mean(likes.image) + model_summaries['model_train/reward_recon'] = tf.reduce_mean(likes.reward) + model_summaries['model_train/model_loss'] = tf.reduce_mean(model_loss) + if prefix == 'train': + model_summaries['model_train/model_norm'] = tf.reduce_mean(model_norm) + if self._c.pcont: + model_summaries['model_train/terminal_recon'] = tf.reduce_mean(likes.pcont) + self._write_summaries(model_summaries, self._model_step) + + def train_agent(self, iters): + for iter in range(iters): + data = preprocess_latent(self.latent_buffer.sample(self._c.agent_batch_size)) + self._agent_train_step(data) + if self._agent_step % self._c.target_update_interval == 0: + self._update_target_critics() + if iter % self._c.save_every == 0: + self.save_agent(self._c.logdir) + self.save_agent(self._c.logdir / 'final_agent') + + def _agent_train_step(self, data): + obs = data['obs'] + actions = data['actions'] + next_obs = data['next_obs'] + rewards = data['rewards'] + terminals = data['terminals'] + + with tf.GradientTape() as q_tape: + q1_pred = self._qf1(tf.concat([obs, actions], axis=-1)) + q2_pred = self._qf2(tf.concat([obs, actions], axis=-1)) + # new_next_actions = self._exploration(self._actor(next_obs), True) + new_actions = self._actor(obs) + new_next_actions = self._actor(next_obs) + + target_q_values = tf.reduce_min([self._target_qf1(tf.concat([next_obs, new_next_actions], axis=-1)), + self._target_qf2(tf.concat([next_obs, new_next_actions], axis=-1))], + axis=0) + q_target = rewards + self._c.discount * (1.0 - terminals) * target_q_values + + expanded_actions = tf.expand_dims(actions, 0) + tilled_actions = tf.tile(expanded_actions, [self._c.cql_samples, 1, 1]) + tilled_actions = tf.random.uniform(tilled_actions.shape, minval=-1, maxval=1) + tilled_actions = tf.concat([tilled_actions, tf.expand_dims(new_actions, 0)], axis=0) + + expanded_obs = tf.expand_dims(obs, 0) + tilled_obs = tf.tile(expanded_obs, [self._c.cql_samples + 1, 1, 1]) + + q1_values = self._qf1(tf.concat([tilled_obs, tilled_actions], axis=-1)) + q2_values = self._qf2(tf.concat([tilled_obs, tilled_actions], axis=-1)) + q1_penalty = tf.math.reduce_logsumexp(q1_values, axis=0) + q2_penalty = tf.math.reduce_logsumexp(q2_values, axis=0) + + qf1_loss = self.alpha * (tf.reduce_mean(q1_penalty) - tf.reduce_mean(q1_pred[:self._c.agent_batch_size])) + \ + tf.reduce_mean((q1_pred - tf.stop_gradient(q_target)) ** 2) + qf2_loss = self.alpha * (tf.reduce_mean(q2_penalty) - tf.reduce_mean(q2_pred[:self._c.agent_batch_size])) + \ + tf.reduce_mean((q2_pred - tf.stop_gradient(q_target)) ** 2) + + q_loss = qf1_loss + qf2_loss + + with tf.GradientTape() as actor_tape: + new_obs_actions = self._actor(obs) + q_new_actions = tf.reduce_min([self._qf1(tf.concat([obs, new_obs_actions], axis=-1)), + self._qf2(tf.concat([obs, new_obs_actions], axis=-1))], axis=0) + actor_loss = -tf.reduce_mean(q_new_actions) + + q_norm = self._qf_opt(q_tape, q_loss) + actor_norm = self._actor_opt(actor_tape, actor_loss) + self._agent_step += 1 + + if self._agent_step % self._c.log_every == 0: + agent_summaries = dict() + agent_summaries['agent/Q1_value'] = tf.reduce_mean(q1_pred) + agent_summaries['agent/Q2_value'] = tf.reduce_mean(q2_pred) + agent_summaries['agent/Q_target'] = tf.reduce_mean(q_target) + agent_summaries['agent/Q_loss'] = q_loss + agent_summaries['agent/actor_loss'] = actor_loss + agent_summaries['agent/Q_grad_norm'] = q_norm + agent_summaries['agent/actor_grad_norm'] = actor_norm + self._write_summaries(agent_summaries, self._agent_step) + + def _update_target_critics(self): + tau = tf.constant(self._c.tau) + for source_weight, target_weight in zip(self._qf1.trainable_variables, + self._target_qf1.trainable_variables): + target_weight.assign(tau * source_weight + (1.0 - tau) * target_weight) + for source_weight, target_weight in zip(self._qf2.trainable_variables, + self._target_qf2.trainable_variables): + target_weight.assign(tau * source_weight + (1.0 - tau) * target_weight) + + def _generate_latent_data(self, data): + embed = self._encode(data) + post, prior = self._dynamics.observe(embed, data['action']) + if self._c.pcont: # Last step could be terminal. + post = {k: v[:, :-1] for k, v in post.items()} + for key in post.keys(): + post[key] = tf.boolean_mask(post[key], data['mask']) + start = post + + policy = lambda state: tf.stop_gradient( + self._exploration(self._actor(self._dynamics.get_feat(state)), True)) + + obs = [[] for _ in tf.nest.flatten(start)] + next_obs = [[] for _ in tf.nest.flatten(start)] + actions = [] + full_posts = [[[] for _ in tf.nest.flatten(start)] for _ in range(self._c.num_models)] + prev = start + + for index in range(self._c.horizon): + [o.append(l) for o, l in zip(obs, tf.nest.flatten(prev))] + a = policy(prev) + actions.append(a) + for i in range(self._c.num_models): + p = self._dynamics.img_step(prev, a, k=i) + [o.append(l) for o, l in zip(full_posts[i], tf.nest.flatten(p))] + prev = self._dynamics.img_step(prev, a, k=np.random.choice(self._c.num_models, 1)[0]) + [o.append(l) for o, l in zip(next_obs, tf.nest.flatten(prev))] + + obs = self._dynamics.get_feat(tf.nest.pack_sequence_as(start, [tf.stack(x, 0) for x in obs])) + stoch = tf.nest.pack_sequence_as(start, [tf.stack(x, 0) for x in next_obs])['stoch'] + next_obs = self._dynamics.get_feat(tf.nest.pack_sequence_as(start, [tf.stack(x, 0) for x in next_obs])) + actions = tf.stack(actions, 0) + rewards = self._reward(next_obs).mode() + if self._c.pcont: + dones = 1.0 * (self._pcont(next_obs).mean().numpy() > self._c.done_treshold) + else: + dones = tf.zeros_like(rewards) + + dists = [self._dynamics.get_dist( + tf.nest.pack_sequence_as(start, [tf.stack(x, 0) for x in full_posts[i]])) + for i in range(self._c.num_models)] + + # Compute penalty based on specification + if self._c.penalty_type == 'log_prob': + log_prob_vars = tf.math.reduce_std( + tf.stack([d.log_prob(stoch) for d in dists], 0), + axis=0) + modified_rewards = rewards - self.lmbd * log_prob_vars + elif self._c.penalty_type == 'max_var': + max_std = tf.reduce_max( + tf.stack([tf.norm(d.stddev(), 2, -1) for d in dists], 0), + axis=0) + modified_rewards = rewards - self.lmbd * max_std + elif self._c.penalty_type == 'mean': + mean_prediction = tf.reduce_mean(tf.stack([d.mean() for d in dists], 0), axis=0) + mean_disagreement = tf.reduce_mean( + tf.stack([tf.norm(d.mean() - mean_prediction, 2, -1) for d in dists], 0), + axis=0) + modified_rewards = rewards - self.lmbd * mean_disagreement + else: + modified_rewards = rewards + + self.latent_buffer.add_samples(flatten(obs).numpy(), + flatten(actions).numpy(), + flatten(next_obs).numpy(), + flatten(modified_rewards).numpy(), + flatten(dones), + sample_type='latent') + + obs = [[] for _ in tf.nest.flatten(start)] + next_obs = [[] for _ in tf.nest.flatten(start)] + actions = [] + full_posts = [[[] for _ in tf.nest.flatten(start)] for _ in range(self._c.num_models)] + + for key in prev.keys(): + prev[key] = tf.boolean_mask(prev[key], flatten(1.0 - dones)) + + def _add_data(self, num_episodes=1): + self._process_data_to_latent(num_episodes=num_episodes) + self._generate_latent_data(next(self._dataset)) + + def _process_data_to_latent(self, num_episodes=None): + if num_episodes is None: + num_episodes, _ = tools.count_episodes(self._c.datadir) + + for _ in range(num_episodes): + filename = next(self._episode_iterator) + try: + with filename.open('rb') as f: + episode = np.load(f) + episode = {k: episode[k] for k in episode.keys()} + except Exception as e: + print(f'Could not load episode: {e}') + continue + + obs = preprocess_raw(episode, self._c) + if not self._c.pcont: + obs['terminal'] = tf.zeros_like(obs['reward']) + with tf.GradientTape(watch_accessed_variables=False) as _: + embed = self._encode(obs) + post, prior = self._dynamics.observe(tf.expand_dims(embed, 0), + tf.expand_dims(obs['action'], 0)) + feat = flatten(self._dynamics.get_feat(post)) + self.latent_buffer.add_samples(feat.numpy()[:-1], + obs['action'].numpy()[1:], + feat.numpy()[1:], + obs['reward'].numpy()[1:], + obs['terminal'].numpy()[1:], + sample_type='real') + + def _image_summaries(self, data, embed, image_pred, step=None, prefix='train'): + truth = data['image'][:6] + 0.5 + recon = image_pred.mode()[:6] + init, _ = self._dynamics.observe(embed[:6, :5], data['action'][:6, :5]) + init = {k: v[:, -1] for k, v in init.items()} + prior = self._dynamics.imagine(data['action'][:6, 5:], init) + openl = self._decode(self._dynamics.get_feat(prior)).mode() + model = tf.concat([recon[:, :5] + 0.5, openl + 0.5], 1) + error_prior = (model - truth + 1) / 2 + error_posterior = (recon + 0.5 - truth + 1) / 2 + openl = tf.concat([truth, recon + 0.5, model, error_prior, error_posterior], 2) + with self._writer.as_default(): + tools.video_summary('agent/' + prefix, openl.numpy(), step=step) + + def _write_summaries(self, metrics, step=None): + step = int(step) + metrics = [(k, float(v)) for k, v in metrics.items()] + with self._writer.as_default(): + tf.summary.experimental.set_step(step) + [tf.summary.scalar(k, m, step=step) for k, m in metrics] + print(f'[{step}]', ' / '.join(f'{k} {v:.1f}' for k, v in metrics)) + self._writer.flush() + + +def preprocess_raw(obs, config): + dtype = prec.global_policy().compute_dtype + obs = obs.copy() + + with tf.device('cpu:0'): + obs['image'] = tf.cast(obs['image'], dtype) / 255.0 - 0.5 + if 'image_128' in obs.keys(): + obs['image_128'] = tf.cast(obs['image_128'], dtype) / 255.0 - 0.5 + clip_rewards = dict(none=lambda x: x, tanh=tf.tanh)[config.clip_rewards] + obs['reward'] = clip_rewards(obs['reward']) + for k in obs.keys(): + obs[k] = tf.cast(obs[k], dtype) + return obs + + +def flatten(x): + return tf.reshape(x, [-1] + list(x.shape[2:])) + + +def preprocess_latent(batch): + dtype = prec.global_policy().compute_dtype + batch = batch.copy() + with tf.device('cpu:0'): + for key in batch.keys(): + batch[key] = tf.cast(batch[key], dtype) + return batch + + +def count_steps(datadir, config): + return tools.count_episodes(datadir)[1] * config.action_repeat + + +def load_dataset(directory, config): + episode = next(tools.load_episodes(directory, 1000, load_episodes=1)) + types = {k: v.dtype for k, v in episode.items()} + shapes = {k: (None,) + v.shape[1:] for k, v in episode.items()} + generator = lambda: tools.load_episodes(directory, config.train_steps, + config.model_batch_length, config.dataset_balance) + dataset = tf.data.Dataset.from_generator(generator, types, shapes) + dataset = dataset.batch(config.model_batch_size, drop_remainder=True) + dataset = dataset.map(functools.partial(preprocess_raw, config=config)) + dataset = dataset.prefetch(10) + return dataset + + +def episode_iterator(datadir, config): + while True: + filenames = list(datadir.glob('*.npz')) + for filename in list(filenames): + yield filename + + +def summarize_episode(episode, config, datadir, writer, prefix): + length = (len(episode['reward']) - 1) * config.action_repeat + ret = episode['reward'].sum() + print(f'{prefix.title()} episode of length {length} with return {ret:.1f}.') + metrics = [ + (f'{prefix}/return', float(episode['reward'].sum())), + (f'{prefix}/length', len(episode['reward']) - 1)] + with writer.as_default(): # Env might run in a different thread. + [tf.summary.scalar('sim/' + k, v) for k, v in metrics] + if prefix == 'test': + tools.video_summary(f'sim/{prefix}/video', episode['image'][None]) + + +def make_env(config, writer, prefix, datadir, store): + suite, task = config.task.split('_', 1) + if suite == 'dmc': + env = wrappers.DeepMindControl(task) + env = wrappers.ActionRepeat(env, config.action_repeat) + env = wrappers.NormalizeActions(env) + elif suite == 'gym': + env = wrappers.Gym(task, config, size=(128, 128)) + env = wrappers.ActionRepeat(env, config.action_repeat) + env = wrappers.NormalizeActions(env) + elif task == 'door': + env = wrappers.DoorOpen(config, size=(128, 128)) + env = wrappers.ActionRepeat(env, config.action_repeat) + env = wrappers.NormalizeActions(env) + elif task == 'drawer': + env = wrappers.DrawerOpen(config, size=(128, 128)) + env = wrappers.ActionRepeat(env, config.action_repeat) + env = wrappers.NormalizeActions(env) + else: + raise NotImplementedError(suite) + env = wrappers.TimeLimit(env, config.time_limit / config.action_repeat) + callbacks = [] + if store: + callbacks.append(lambda ep: tools.save_episodes(datadir, [ep])) + if prefix == 'test': + callbacks.append( + lambda ep: summarize_episode(ep, config, datadir, writer, prefix)) + env = wrappers.Collect(env, callbacks, config.precision) + env = wrappers.RewardObs(env) + return env + + +def main(config): + print(config) + + # Set random seeds + os.environ['PYTHONHASHSEED'] = str(config.seed) + os.environ['TF_CUDNN_DETERMINISTIC'] = '1' + random.seed(config.seed) + np.random.seed(config.seed) + tf.random.set_seed(config.seed) + + if config.gpu_growth: + for gpu in tf.config.experimental.list_physical_devices('GPU'): + tf.config.experimental.set_memory_growth(gpu, True) + + config.logdir = config.logdir / config.task + config.logdir = config.logdir / 'seed_{}'.format(config.seed) + config.logdir.mkdir(parents=True, exist_ok=True) + tf_dir = config.logdir / 'tensorboard' + writer = tf.summary.create_file_writer(str(tf_dir), max_queue=1000, flush_millis=20000) + writer.set_as_default() + + # Create environments. + train_envs = [wrappers.Async(lambda: make_env( + config, writer, 'train', '.', store=False), config.parallel) + for _ in range(config.envs)] + test_envs = [wrappers.Async(lambda: make_env( + config, writer, 'test', '.', store=False), config.parallel) + for _ in range(config.envs)] + actspace = train_envs[0].action_space + + # Train and regularly evaluate the agent. + agent = Lompo(config, config.datadir, actspace, writer) + + if agent._c.load_model: + agent.load_model(config.loaddir / 'final_model') + print('Load pretrained model') + else: + agent.fit_model(agent._c.model_train_steps) + agent.save_model(config.logdir) + + if agent._c.load_buffer: + agent.latent_buffer.load(agent._c.loaddir / 'buffer.h5py') + else: + agent._process_data_to_latent() + agent.latent_buffer.save(agent._c.logdir / 'buffer.h5py') + + if agent._c.load_agent: + agent.load_agent(config.loaddir) + print('Load pretrained actor') + + while agent.latent_buffer._latent_stored_steps < agent._c.start_training: + agent._generate_latent_data(next(agent._dataset)) + + while agent._agent_step < int(config.agent_train_steps): + print('Start evaluation.') + tools.simulate( + functools.partial(agent, training=False), test_envs, episodes=1) + writer.flush() + print('Start collection.') + agent.train_agent(agent._c.agent_iters_per_step) + + if config.sample: + agent._add_data(num_episodes=1) + else: + agent._process_data_to_latent(num_episodes=1) + + for env in train_envs + test_envs: + env.close() + + +if __name__ == '__main__': + try: + import colored_traceback + + colored_traceback.add_hook() + except ImportError: + pass + parser = argparse.ArgumentParser() + for key, value in define_config().items(): + parser.add_argument(f'--{key}', type=tools.args_type(value), default=value) + main(parser.parse_args()) diff --git a/baselines/lompo/tools.py b/baselines/lompo/tools.py new file mode 100644 index 0000000000000000000000000000000000000000..97ed79bc2057a4852ae8970e3b991be1c85dee83 --- /dev/null +++ b/baselines/lompo/tools.py @@ -0,0 +1,243 @@ +import copy +import datetime +import io +import pathlib +import pickle +import uuid + +import numpy as np +import tensorflow as tf +import tensorflow.compat.v1 as tf1 +from tensorflow_probability import distributions as tfd + + +class AttrDict(dict): + __setattr__ = dict.__setitem__ + __getattr__ = dict.__getitem__ + + +class Module(tf.Module): + def save(self, filename): + values = tf.nest.map_structure(lambda x: x.numpy(), self.variables) + with pathlib.Path(filename).open('wb') as f: + pickle.dump(values, f) + + def load(self, filename): + with pathlib.Path(filename).open('rb') as f: + values = pickle.load(f) + tf.nest.map_structure(lambda x, y: x.assign(y), self.variables, values) + + def get(self, name, ctor, *args, **kwargs): + # Create or get layer by name to avoid mentioning it in the constructor. + if not hasattr(self, '_modules'): + self._modules = {} + if name not in self._modules: + self._modules[name] = ctor(*args, **kwargs) + return self._modules[name] + + +def video_summary(name, video, step=None, fps=20): + name = name if isinstance(name, str) else name.decode('utf-8') + if np.issubdtype(video.dtype, np.floating): + video = np.clip(255 * video, 0, 255).astype(np.uint8) + B, T, H, W, C = video.shape + try: + frames = video.transpose((1, 2, 0, 3, 4)).reshape((T, H, B * W, C)) + summary = tf1.Summary() + image = tf1.Summary.Image(height=B * H, width=T * W, colorspace=C) + image.encoded_image_string = encode_gif(frames, fps) + summary.value.add(tag=name + '/gif', image=image) + tf.summary.experimental.write_raw_pb(summary.SerializeToString(), step) + except (IOError, OSError) as e: + print('GIF summaries require ffmpeg in $PATH.', e) + frames = video.transpose((0, 2, 1, 3, 4)).reshape((1, B * H, T * W, C)) + tf.summary.image(name + '/grid', frames, step) + + +def encode_gif(frames, fps): + from subprocess import Popen, PIPE + h, w, c = frames[0].shape + pxfmt = {1: 'gray', 3: 'rgb24'}[c] + cmd = ' '.join([ + f'ffmpeg -y -f rawvideo -vcodec rawvideo', + f'-r {fps:.02f} -s {w}x{h} -pix_fmt {pxfmt} -i - -filter_complex', + f'[0:v]split[x][z];[z]palettegen[y];[x]fifo[x];[x][y]paletteuse', + f'-r {fps:.02f} -f gif -']) + proc = Popen(cmd.split(' '), stdin=PIPE, stdout=PIPE, stderr=PIPE) + for image in frames: + proc.stdin.write(image.tostring()) + out, err = proc.communicate() + if proc.returncode: + raise IOError('\n'.join([' '.join(cmd), err.decode('utf8')])) + del proc + return out + + +def simulate(agent, envs, steps=0, episodes=0, state=None): + # Initialize or unpack simulation state. + if state is None: + step, episode = 0, 0 + done = np.ones(len(envs), np.bool) + length = np.zeros(len(envs), np.int32) + obs = [None] * len(envs) + agent_state = None + else: + step, episode, done, length, obs, agent_state = state + while (steps and step < steps) or (episodes and episode < episodes): + # Reset envs if necessary. + if done.any(): + indices = [index for index, d in enumerate(done) if d] + promises = [envs[i].reset(blocking=False) for i in indices] + for index, promise in zip(indices, promises): + obs[index] = promise() + # Step agents. + obs = {k: np.stack([o[k] for o in obs]) for k in obs[0]} + action, agent_state = agent(obs, done, agent_state) + action = np.array(action) + assert len(action) == len(envs) + # Step envs. + promises = [e.step(a, blocking=False) for e, a in zip(envs, action)] + obs, _, done = zip(*[p()[:3] for p in promises]) + obs = list(obs) + done = np.stack(done) + episode += int(done.sum()) + length += 1 + step += (done * length).sum() + length *= (1 - done) + # Return new state to allow resuming the simulation. + return step - steps, episode - episodes, done, length, obs, agent_state + + +def count_episodes(directory): + filenames = directory.glob('*.npz') + lengths = [int(n.stem.rsplit('-', 1)[-1]) - 1 for n in filenames] + episodes, steps = len(lengths), sum(lengths) + return episodes, steps + + +def save_episodes(directory, episodes): + directory = pathlib.Path(directory).expanduser() + directory.mkdir(parents=True, exist_ok=True) + timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S') + for episode in episodes: + identifier = str(uuid.uuid4().hex) + length = len(episode['reward']) + filename = directory / f'{timestamp}-{identifier}-{length}.npz' + with io.BytesIO() as f1: + np.savez_compressed(f1, **episode) + f1.seek(0) + with filename.open('wb') as f2: + f2.write(f1.read()) + + +def load_episodes(directory, rescan, length=None, balance=False, seed=0, load_episodes=1000): + directory = pathlib.Path(directory).expanduser() + random = np.random.RandomState(seed) + filenames = list(directory.glob('*.npz')) + load_episodes = min(len(filenames), load_episodes) + if load_episodes is None: + load_episodes = int(count_episodes(directory)[0] / 20) + + while True: + cache = {} + for filename in random.choice(list(directory.glob('*.npz')), + load_episodes, + replace=False): + try: + with filename.open('rb') as f: + episode = np.load(f) + episode = {k: episode[k] for k in episode.keys() if k not in ['image_128']} + # episode['reward'] = copy.deepcopy(episode['success']) + if 'discount' not in episode: + episode['discount'] = np.where(episode['is_terminal'], 0., 1.) + except Exception as e: + print(f'Could not load episode: {e}') + continue + cache[filename] = episode + + keys = list(cache.keys()) + for index in random.choice(len(keys), rescan): + episode = copy.deepcopy(cache[keys[index]]) + if length: + total = len(next(iter(episode.values()))) + available = total - length + if available < 0: + for key in episode.keys(): + shape = episode[key].shape + episode[key] = np.concatenate([episode[key], + np.zeros([abs(available)] + list(shape[1:]))], + axis=0) + episode['mask'] = np.ones(length) + episode['mask'][available:] = 0.0 + elif available > 0: + if balance: + index = min(random.randint(0, total), available) + else: + index = int(random.randint(0, available)) + episode = {k: v[index: index + length] for k, v in episode.items()} + episode['mask'] = np.ones(length) + else: + episode['mask'] = np.ones_like(episode['reward']) + else: + episode['mask'] = np.ones_like(episode['reward']) + yield episode + + +class Adam(tf.Module): + def __init__(self, name, modules, lr, clip=None, wd=None, wdpattern=r'.*'): + self._name = name + self._modules = modules + self._clip = clip + self._wd = wd + self._wdpattern = wdpattern + self._opt = tf.optimizers.Adam(lr) + + @property + def variables(self): + return self._opt.variables() + + def __call__(self, tape, loss): + variables = [module.variables for module in self._modules] + self._variables = tf.nest.flatten(variables) + assert len(loss.shape) == 0, loss.shape + grads = tape.gradient(loss, self._variables) + norm = tf.linalg.global_norm(grads) + if self._clip: + grads, _ = tf.clip_by_global_norm(grads, self._clip, norm) + self._opt.apply_gradients(zip(grads, self._variables)) + return norm + + +def args_type(default): + if isinstance(default, bool): + return lambda x: bool(['False', 'True'].index(x)) + if isinstance(default, int): + return lambda x: float(x) if ('e' in x or '.' in x) else int(x) + if isinstance(default, pathlib.Path): + return lambda x: pathlib.Path(x).expanduser() + return type(default) + + +def static_scan(fn, inputs, start, reverse=False): + last = start + outputs = [[] for _ in tf.nest.flatten(start)] + indices = range(len(tf.nest.flatten(inputs)[0])) + if reverse: + indices = reversed(indices) + for index in indices: + inp = tf.nest.map_structure(lambda x: x[index], inputs) + last = fn(last, inp) + [o.append(l) for o, l in zip(outputs, tf.nest.flatten(last))] + if reverse: + outputs = [list(reversed(x)) for x in outputs] + outputs = [tf.stack(x, 0) for x in outputs] + return tf.nest.pack_sequence_as(start, outputs) + + +def _mnd_sample(self, sample_shape=(), seed=None, name='sample'): + return tf.random.normal( + tuple(sample_shape) + tuple(self.event_shape), + self.mean(), self.stddev(), self.dtype, seed, name) + + +tfd.MultivariateNormalDiag.sample = _mnd_sample diff --git a/baselines/lompo/wrappers.py b/baselines/lompo/wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..cbee2a304d9160fe7bdc1e388b71226e4b298948 --- /dev/null +++ b/baselines/lompo/wrappers.py @@ -0,0 +1,508 @@ +import atexit +import functools +import sys +import traceback + +import gym +import numpy as np + + +class DrawerOpen: + def __init__(self, config, size=(128, 128)): + import metaworld + import mujoco_py + self._env = metaworld.envs.mujoco.sawyer_xyz.v2.sawyer_drawer_open_v2.SawyerDrawerOpenEnvV2() + self._env._last_rand_vec = np.array([-0.1, 0.9, 0.0]) + self._env._set_task_called = True + self.size = size + + # Setup camera in environment + self.viewer = mujoco_py.MjRenderContextOffscreen(self._env.sim, -1) + self.viewer.cam.elevation = -22.5 + self.viewer.cam.azimuth = 15 + self.viewer.cam.distance = 0.75 + self.viewer.cam.lookat[0] = -0.15 + self.viewer.cam.lookat[1] = 0.7 + self.viewer.cam.lookat[2] = 0.10 + + def __getattr__(self, attr): + if attr == '_wrapped_env': + raise AttributeError() + return getattr(self._env, attr) + + def step(self, action): + state, reward, done, info = self._env.step(action) + img = self.render(mode='rgb_array', width=self.size[0], height=self.size[1]) + obs = {'state': state, 'image': img} + reward = 1.0 * info['success'] + return obs, reward, done, info + + def reset(self): + state = self._env.reset() + state = self._env.reset() + img = self.render(mode='rgb_array', width=self.size[0], height=self.size[1]) + if self.use_transform: + img = img[self.pad:-self.pad, self.pad:-self.pad, :] + obs = {'state': state, 'image': img} + return obs + + def render(self, mode, width=128, height=128): + self.viewer.render(width=width, height=width) + img = self.viewer.read_pixels(self.size[0], self.size[1], depth=False) + img = img[::-1] + return img + + +class Hammer: + def __init__(self, config, size=(128, 128)): + import metaworld + import mujoco_py + self._env = metaworld.envs.mujoco.sawyer_xyz.v2.sawyer_hammer_v2.SawyerHammerEnvV2() + self._env._last_rand_vec = np.array([-0.06, 0.4, 0.02]) + self._env._set_task_called = True + self.size = size + + # Setup camera in environment + self.viewer = mujoco_py.MjRenderContextOffscreen(self._env.sim, -1) + self.viewer.cam.elevation = -15 + self.viewer.cam.azimuth = 137.5 + self.viewer.cam.distance = 0.9 + self.viewer.cam.lookat[0] = -0. + self.viewer.cam.lookat[1] = 0.6 + self.viewer.cam.lookat[2] = 0.175 + + def __getattr__(self, attr): + if attr == '_wrapped_env': + raise AttributeError() + return getattr(self._env, attr) + + def step(self, action): + state, reward, done, info = self._env.step(action) + img = self.render(mode='rgb_array', width=self.size[0], height=self.size[1]) + obs = {'state': state, 'image': img} + return obs, reward, done, info + + def reset(self): + state = self._env.reset() + img = self.render(mode='rgb_array', width=self.size[0], height=self.size[1]) + obs = {'state': state, 'image': img} + return obs + + def render(self, mode, width=128, height=128): + self.viewer.render(width=width, height=width) + img = self.viewer.read_pixels(self.size[0], self.size[1], depth=False) + img = img[::-1] + return img + + +class DoorOpen: + def __init__(self, config, size=(128, 128)): + import metaworld + import mujoco_py + self._env = metaworld.envs.mujoco.sawyer_xyz.v2.sawyer_door_v2.SawyerDoorEnvV2() + self._env._last_rand_vec = np.array([0.0, 1.0, .1525]) + self._env._set_task_called = True + self.size = size + + # Setup camera in environment + self.viewer = mujoco_py.MjRenderContextOffscreen(self._env.sim, -1) + self.viewer.cam.elevation = -12.5 + self.viewer.cam.azimuth = 115 + self.viewer.cam.distance = 1.05 + self.viewer.cam.lookat[0] = 0.075 + self.viewer.cam.lookat[1] = 0.75 + self.viewer.cam.lookat[2] = 0.15 + + def __getattr__(self, attr): + if attr == '_wrapped_env': + raise AttributeError() + return getattr(self._env, attr) + + def step(self, action): + state, reward, done, info = self._env.step(action) + img = self.render(mode='rgb_array', width=self.size[0], height=self.size[1]) + obs = {'state': state, 'image': img} + reward = 1.0 * info['success'] + return obs, reward, done, info + + def reset(self): + state = self._env.reset() + img = self.render(mode='rgb_array', width=self.size[0], height=self.size[1]) + obs = {'state': state, 'image': img} + return obs + + def render(self, mode, width=128, height=128): + self.viewer.render(width=width, height=width) + img = self.viewer.read_pixels(self.size[0], self.size[1], depth=False) + img = img[::-1] + return img + + +class Gym: + def __init__(self, name, config, size=(64, 64)): + self._env = gym.make(name) + self.size = size + self.use_transform = config.use_transform + self.pad = int(config.pad / 2) + + def __getattr__(self, attr): + if attr == '_wrapped_env': + raise AttributeError() + return getattr(self._env, attr) + + def step(self, action): + state, reward, done, info = self.env.step(action) + img = self._env.render(mode='rgb_array', width=self.size[0], height=self.size[1]) + if self.use_transform: + img = img[self.pad:-self.pad, self.pad:-self.pad, :] + obs = {'state': state, 'image': img} + return obs, reward, done, info + + def reset(self): + state = self._env.reset() + img = self._env.render(mode='rgb_array', width=self.size[0], height=self.size[1]) + if self.use_transform: + img = img[self.pad:-self.pad, self.pad:-self.pad, :] + obs = {'state': state, 'image': img} + return obs + + def render(self, *args, **kwargs): + if kwargs.get('mode', 'rgb_array') != 'rgb_array': + raise ValueError("Only render mode 'rgb_array' is supported.") + return self._env.render(mode='rgb_array', width=self.size[0], height=self.size[1]) + + +class DeepMindControl: + def __init__(self, name, size=(64, 64), camera=None): + domain, task = name.split('_', 1) + if domain == 'cup': # Only domain with multiple words. + domain = 'ball_in_cup' + if isinstance(domain, str): + from dm_control import suite + self._env = suite.load(domain, task) + else: + assert task is None + self._env = domain() + self._size = size + if camera is None: + camera = dict(quadruped=2).get(domain, 0) + self._camera = camera + + @property + def observation_space(self): + spaces = {} + for key, value in self._env.observation_spec().items(): + spaces[key] = gym.spaces.Box(-np.inf, np.inf, value.shape, dtype=np.float32) + spaces['image'] = gym.spaces.Box(0, 255, self._size + (3,), dtype=np.uint8) + return gym.spaces.Dict(spaces) + + @property + def action_space(self): + spec = self._env.action_spec() + return gym.spaces.Box(spec.minimum, spec.maximum, dtype=np.float32) + + def step(self, action): + time_step = self._env.step(action) + obs = dict(time_step.observation) + obs['image'] = self.render() + reward = time_step.reward or 0 + done = time_step.last() + info = {'discount': np.array(time_step.discount, np.float32)} + return obs, reward, done, info + + def reset(self): + time_step = self._env.reset() + obs = dict(time_step.observation) + obs['image'] = self.render() + return obs + + def render(self, *args, **kwargs): + if kwargs.get('mode', 'rgb_array') != 'rgb_array': + raise ValueError("Only render mode 'rgb_array' is supported.") + return self._env.physics.render(*self._size, camera_id=self._camera) + + +class Collect: + def __init__(self, env, callbacks=None, precision=32): + self._env = env + self._callbacks = callbacks or () + self._precision = precision + self._episode = None + + def __getattr__(self, name): + return getattr(self._env, name) + + def step(self, action): + obs, reward, done, info = self._env.step(action) + obs = {k: self._convert(v) for k, v in obs.items()} + transition = obs.copy() + transition['action'] = action + transition['reward'] = reward + transition['discount'] = info.get('discount', np.array(1 - float(done))) + self._episode.append(transition) + if done: + episode = {k: [t[k] for t in self._episode] for k in self._episode[0]} + episode = {k: self._convert(v) for k, v in episode.items()} + info['episode'] = episode + for callback in self._callbacks: + callback(episode) + return obs, reward, done, info + + def reset(self): + obs = self._env.reset() + transition = obs.copy() + transition['action'] = np.zeros(self._env.action_space.shape) + transition['reward'] = 0.0 + transition['discount'] = 1.0 + self._episode = [transition] + return obs + + def _convert(self, value): + value = np.array(value) + if np.issubdtype(value.dtype, np.floating): + dtype = {16: np.float16, 32: np.float32, 64: np.float64}[self._precision] + elif np.issubdtype(value.dtype, np.signedinteger): + dtype = {16: np.int16, 32: np.int32, 64: np.int64}[self._precision] + elif np.issubdtype(value.dtype, np.uint8): + dtype = np.uint8 + else: + raise NotImplementedError(value.dtype) + return value.astype(dtype) + + +class TimeLimit: + def __init__(self, env, duration): + self._env = env + self._duration = duration + self._step = None + + def __getattr__(self, name): + return getattr(self._env, name) + + def step(self, action): + assert self._step is not None, 'Must reset environment.' + obs, reward, done, info = self._env.step(action) + self._step += 1 + if self._step >= self._duration: + done = True + if 'discount' not in info: + info['discount'] = np.array(1.0).astype(np.float32) + self._step = None + return obs, reward, done, info + + def reset(self): + self._step = 0 + return self._env.reset() + + +class ActionRepeat: + def __init__(self, env, amount): + self._env = env + self._amount = amount + + def __getattr__(self, name): + return getattr(self._env, name) + + def step(self, action): + done = False + total_reward = 0 + current_step = 0 + while current_step < self._amount and not done: + obs, reward, done, info = self._env.step(action) + total_reward += reward + current_step += 1 + return obs, total_reward, done, info + + +class NormalizeActions: + def __init__(self, env): + self._env = env + self._mask = np.logical_and(np.isfinite(env.action_space.low), + np.isfinite(env.action_space.high)) + self._low = np.where(self._mask, env.action_space.low, -1) + self._high = np.where(self._mask, env.action_space.high, 1) + + def __getattr__(self, name): + return getattr(self._env, name) + + @property + def action_space(self): + low = np.where(self._mask, -np.ones_like(self._low), self._low) + high = np.where(self._mask, np.ones_like(self._low), self._high) + return gym.spaces.Box(low, high, dtype=np.float32) + + def step(self, action): + original = (action + 1) / 2 * (self._high - self._low) + self._low + original = np.where(self._mask, original, action) + return self._env.step(original) + + +class ObsDict: + def __init__(self, env, key='obs'): + self._env = env + self._key = key + + def __getattr__(self, name): + return getattr(self._env, name) + + @property + def observation_space(self): + spaces = {self._key: self._env.observation_space} + return gym.spaces.Dict(spaces) + + @property + def action_space(self): + return self._env.action_space + + def step(self, action): + obs, reward, done, info = self._env.step(action) + obs = {self._key: np.array(obs)} + return obs, reward, done, info + + def reset(self): + obs = self._env.reset() + obs = {self._key: np.array(obs)} + return obs + + +class RewardObs: + def __init__(self, env): + self._env = env + + def __getattr__(self, name): + return getattr(self._env, name) + + @property + def observation_space(self): + spaces = self._env.observation_space.spaces + assert 'reward' not in spaces + spaces['reward'] = gym.spaces.Box(-np.inf, np.inf, dtype=np.float32) + return gym.spaces.Dict(spaces) + + def step(self, action): + obs, reward, done, info = self._env.step(action) + obs['reward'] = reward + return obs, reward, done, info + + def reset(self): + obs = self._env.reset() + obs['reward'] = 0.0 + return obs + + +class Async: + _ACCESS = 1 + _CALL = 2 + _RESULT = 3 + _EXCEPTION = 4 + _CLOSE = 5 + + def __init__(self, ctor, strategy='process'): + self._strategy = strategy + if strategy == 'none': + self._env = ctor() + elif strategy == 'thread': + import multiprocessing.dummy as mp + elif strategy == 'process': + import multiprocessing as mp + else: + raise NotImplementedError(strategy) + if strategy != 'none': + self._conn, conn = mp.Pipe() + self._process = mp.Process(target=self._worker, args=(ctor, conn)) + atexit.register(self.close) + self._process.start() + self._obs_space = None + self._action_space = None + + @property + def observation_space(self): + if not self._obs_space: + self._obs_space = self.__getattr__('observation_space') + return self._obs_space + + @property + def action_space(self): + if not self._action_space: + self._action_space = self.__getattr__('action_space') + return self._action_space + + def __getattr__(self, name): + if self._strategy == 'none': + return getattr(self._env, name) + self._conn.send((self._ACCESS, name)) + return self._receive() + + def call(self, name, *args, **kwargs): + blocking = kwargs.pop('blocking', True) + if self._strategy == 'none': + return functools.partial(getattr(self._env, name), *args, **kwargs) + payload = name, args, kwargs + self._conn.send((self._CALL, payload)) + promise = self._receive + return promise() if blocking else promise + + def close(self): + if self._strategy == 'none': + try: + self._env.close() + except AttributeError: + pass + return + try: + self._conn.send((self._CLOSE, None)) + self._conn.close() + except IOError: + # The connection was already closed. + pass + self._process.join() + + def step(self, action, blocking=True): + return self.call('step', action, blocking=blocking) + + def reset(self, blocking=True): + return self.call('reset', blocking=blocking) + + def _receive(self): + try: + message, payload = self._conn.recv() + except ConnectionResetError: + raise RuntimeError('Environment worker crashed.') + # Re-raise exceptions in the main process. + if message == self._EXCEPTION: + stacktrace = payload + raise Exception(stacktrace) + if message == self._RESULT: + return payload + raise KeyError(f'Received message of unexpected type {message}') + + def _worker(self, ctor, conn): + try: + env = ctor() + while True: + try: + # Only block for short times to have keyboard exceptions be raised. + if not conn.poll(0.1): + continue + message, payload = conn.recv() + except (EOFError, KeyboardInterrupt): + break + if message == self._ACCESS: + name = payload + result = getattr(env, name) + conn.send((self._RESULT, result)) + continue + if message == self._CALL: + name, args, kwargs = payload + result = getattr(env, name)(*args, **kwargs) + conn.send((self._RESULT, result)) + continue + if message == self._CLOSE: + assert payload is None + break + raise KeyError(f'Received message of unknown type {message}') + except Exception: + stacktrace = ''.join(traceback.format_exception(*sys.exc_info())) + print(f'Error in environment process: {stacktrace}') + conn.send((self._EXCEPTION, stacktrace)) + conn.close() diff --git a/conversion_scripts/hdf5_to_npz.py b/conversion_scripts/hdf5_to_npz.py new file mode 100644 index 0000000000000000000000000000000000000000..75cc881ddcaa7e3fa258493ad000c1454edc945f --- /dev/null +++ b/conversion_scripts/hdf5_to_npz.py @@ -0,0 +1,66 @@ +import datetime +import io +import pathlib +import uuid +import numpy as np +import h5py +import argparse + + +def save_episode(directory, episode): + timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S') + identifier = str(uuid.uuid4().hex) + length = len(episode['action']) + filename = directory / f'{timestamp}-{identifier}-{length}.npz' + with io.BytesIO() as f1: + np.savez_compressed(f1, **episode) + f1.seek(0) + with filename.open('wb') as f2: + f2.write(f1.read()) + return filename + + +def main(): + # Include argument parser + parser = argparse.ArgumentParser(description='Convert hdf5 files to npz.') + parser.add_argument('--input_dir', type=str, required=True, + help='Path to input files') + parser.add_argument('--output_dir', type=str, required=True, + help='Path to output files') + args = parser.parse_args() + + is_last = np.zeros(501, dtype=bool) + is_last[500] = True + is_first = np.zeros(501, dtype=bool) + is_first[0] = True + is_terminal = np.zeros(501, dtype=bool) + + out_dir = pathlib.Path(args.output_dir) + out_dir.mkdir(parents=True, exist_ok=True) + + filenames = sorted(pathlib.Path(args.input_dir).glob('*.hdf5')) + for filename in filenames: + with h5py.File(filename, "r") as f: + actions = f['action'][:] + observations = f['observation'][:] + rewards = f['reward'][:] + discounts = f['discount'][:] + while len(actions) > 0: + ep = { + 'image': observations[:501].transpose(0, 2, 3, 1), + 'action': actions[:501], + 'reward': rewards[:501], + 'discount': discounts[:501], + 'is_last': is_last, + 'is_first': is_first, + 'is_terminal': is_terminal, + } + actions = actions[501:] + observations = observations[501:] + rewards = rewards[501:] + discounts = discounts[501:] + save_episode(out_dir, ep) + + +if __name__ == '__main__': + main() diff --git a/conversion_scripts/npz_to_hdf5.py b/conversion_scripts/npz_to_hdf5.py new file mode 100644 index 0000000000000000000000000000000000000000..72b3f8f83d52726a3474cc527ec0487c62580591 --- /dev/null +++ b/conversion_scripts/npz_to_hdf5.py @@ -0,0 +1,81 @@ +import pathlib +import numpy as np +import h5py +import cv2 +import argparse + + +def load_episodes(directory, capacity=None): + # The returned directory from filenames to episodes is guaranteed to be in + # temporally sorted order. + filenames = sorted(directory.glob('*.npz')) + if capacity: + num_steps = 0 + num_episodes = 0 + for filename in reversed(filenames): + length = int(str(filename).split('-')[-1][:-4]) + num_steps += length + num_episodes += 1 + if num_steps >= capacity: + break + filenames = filenames[-num_episodes:] + episodes = {} + for filename in filenames: + try: + with filename.open('rb') as f: + episode = np.load(f) + episode = {k: episode[k] for k in episode.keys()} + # Conversion for older versions of npz files. + if 'is_terminal' not in episode: + episode['is_terminal'] = episode['discount'] == 0. + except Exception as e: + print(f'Could not load episode {str(filename)}: {e}') + continue + episodes[str(filename)] = episode + return episodes + + +def main(): + # Include argument parser + parser = argparse.ArgumentParser(description='Convert npz files to hdf5.') + parser.add_argument('--input_dir', type=str, required=True, + help='Path to input files') + parser.add_argument('--output_dir', type=str, required=True, + help='Path to output files') + args = parser.parse_args() + + step_type = np.ones(501) + step_type[0] = 0 + step_type[500] = 2 + + output = {} + episodes = load_episodes(pathlib.Path(args.input_dir)) + episodes = list(episodes.values()) + + actions = [e['action'] for e in episodes] + discounts = [e['discount'] for e in episodes] + observations = [] + for e in episodes: + resized_images = np.empty((501, 84, 84, 3), dtype=e['image'].dtype) + for (k, i) in enumerate(e['image']): + resized_images[k] = cv2.resize(i, dsize=(84, 84), interpolation=cv2.INTER_CUBIC) + observations.append(resized_images.transpose(0, 3, 1, 2)) + rewards = [e['reward'] for e in episodes] + step_types = [step_type for _ in episodes] + + output['action'] = np.concatenate(actions) + output['discount'] = np.concatenate(discounts) + output['observation'] = np.concatenate(observations) + output['reward'] = np.concatenate(rewards) + output['step_type'] = np.concatenate(step_types) + + out_dir = pathlib.Path(args.output_dir) + out_dir.mkdir(parents=True, exist_ok=True) + + with h5py.File(out_dir / 'data.hdf5', 'w') as shard_file: + for k, v in output.items(): + shard_file.create_dataset(k, data=v, compression='gzip') + + +if __name__ == '__main__': + main() diff --git a/conversion_scripts/split_hdf5_shards.py b/conversion_scripts/split_hdf5_shards.py new file mode 100644 index 0000000000000000000000000000000000000000..04ac193073c6828cf24106bc34e0dc602ebdedbc --- /dev/null +++ b/conversion_scripts/split_hdf5_shards.py @@ -0,0 +1,40 @@ +import pathlib +import numpy as np +import h5py +import argparse + + +def main(): + # Include argument parser + parser = argparse.ArgumentParser(description='Split hdf5 shards to smaller.') + parser.add_argument('--input_dir', type=str, required=True, + help='Path to input files') + parser.add_argument('--output_dir', type=str, required=True, + help='Path to output files') + parser.add_argument('--split_size', type=int, default=5) + args = parser.parse_args() + + in_dir = pathlib.Path(args.input_dir) + out_dir = pathlib.Path(args.output_dir) + out_dir.mkdir(parents=True, exist_ok=True) + + counter = 0 + for filename in in_dir.glob('*.hdf5'): + print(filename) + with h5py.File(filename, "r") as episodes: + episodes = {k: episodes[k][:] for k in episodes.keys()} + print(episodes['action'].shape[0]) + + split_episodes = {k: np.array_split(v, args.split_size) for k, v in episodes.items()} + split_episodes = [{k: v[idx] for k, v in split_episodes.items()} for idx in range(args.split_size)] + + for eps in split_episodes: + with h5py.File(out_dir / f'{counter}.hdf5', 'w') as shard_file: + for k, v in eps.items(): + shard_file.create_dataset(k, data=v, compression='gzip') + print(counter) + counter += 1 + + +if __name__ == '__main__': + main() diff --git a/dockerfiles/Dockerfile-drqv2 b/dockerfiles/Dockerfile-drqv2 new file mode 100644 index 0000000000000000000000000000000000000000..8ac6e7f9e4b887b85766ee1fe2f888d8d055aa79 --- /dev/null +++ b/dockerfiles/Dockerfile-drqv2 @@ -0,0 +1,74 @@ +# FROM MAIN DIR: docker build -t offline-drqv2 -f dockerfiles/Dockerfile-drqv2 . + +FROM nvidia/cuda:11.3.0-cudnn8-devel-ubuntu18.04 +MAINTAINER Anonymous + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && apt-get install -y \ + vim wget unzip \ + libosmesa6-dev libgl1-mesa-glx libgl1-mesa-dev patchelf libglfw3 build-essential + +RUN apt-get update && apt-get install -y --no-install-recommends \ + tzdata \ + cmake \ + git \ + curl \ + ca-certificates \ + libjpeg-dev \ + libpng-dev \ + libglib2.0-0 + +# ZOO PACKAGES +RUN apt-get -y update \ + && apt-get -y install \ + ffmpeg \ + freeglut3-dev \ + swig \ + xvfb \ + libxrandr2 \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# ATARI PACKAGES +RUN apt-get -y update \ + && apt-get -y install \ + tmux \ + libsm6 \ + libxext6 \ + libxrender-dev \ + unrar \ + zlib1g \ + zlib1g-dev \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# mujoco +RUN mkdir -p /.mujoco \ + && wget https://mujoco.org/download/mujoco210-linux-x86_64.tar.gz -O mujoco.tar.gz \ + && tar -xf mujoco.tar.gz -C /.mujoco \ + && rm mujoco.tar.gz +ENV MUJOCO_PY_MUJOCO_PATH /.mujoco/mujoco210 +ENV LD_LIBRARY_PATH /.mujoco/mujoco210/bin:${LD_LIBRARY_PATH} +ENV LD_LIBRARY_PATH /usr/local/cuda/lib64:${LD_LIBRARY_PATH} +ENV MJLIB_PATH /.mujoco/mujoco210/bin/libmujoco210.so + +RUN useradd -u <> --create-home user +USER user +WORKDIR /home/user + +RUN wget -q https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ + bash Miniconda3-latest-Linux-x86_64.sh -b -p miniconda3 && \ + rm Miniconda3-latest-Linux-x86_64.sh +ENV PATH /home/user/miniconda3/bin:$PATH + +RUN pip install --upgrade pip +RUN pip install mujoco_py seaborn +RUN conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch +RUN pip install absl-py +RUN pip install dm_control +RUN pip install tb-nightly termcolor +RUN pip install imageio imageio-ffmpeg hydra-core hydra-submitit-launcher pandas +RUN pip install ipdb yapf sklearn matplotlib opencv-python + +WORKDIR /vd4rl diff --git a/dockerfiles/Dockerfile-dv2 b/dockerfiles/Dockerfile-dv2 new file mode 100644 index 0000000000000000000000000000000000000000..386b7adfdd45f7a2ecd7e837f3c1bbe2d0319f3f --- /dev/null +++ b/dockerfiles/Dockerfile-dv2 @@ -0,0 +1,58 @@ +# FROM MAIN DIR: docker build -t offline-dv2 -f dockerfiles/Dockerfile-dv2 . + +FROM tensorflow/tensorflow:2.6.0-gpu +MAINTAINER Anonymous + +# System packages. +RUN apt-get update && apt-get install -y \ + ffmpeg \ + libgl1-mesa-dev \ + libosmesa6-dev \ + python3-pip \ + unrar \ + wget \ + && apt-get clean + +RUN apt-get update && apt-get install -y \ + git \ + && apt-get clean + +RUN useradd -u <> --create-home user +USER user +WORKDIR /home/user + +RUN wget -q https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ + bash Miniconda3-latest-Linux-x86_64.sh -b -p miniconda3 && \ + rm Miniconda3-latest-Linux-x86_64.sh +ENV PATH /home/user/miniconda3/bin:$PATH + +# MuJoCo +ENV MUJOCO_GL egl +ENV MUJOCO_PY_MUJOCO_PATH /home/user/.mujoco/mujoco210 +RUN mkdir -p .mujoco \ + && wget https://mujoco.org/download/mujoco210-linux-x86_64.tar.gz -O mujoco.tar.gz \ + && tar -xf mujoco.tar.gz -C .mujoco \ + && rm mujoco.tar.gz +ENV LD_LIBRARY_PATH /home/user/.mujoco/mujoco210/bin:${LD_LIBRARY_PATH} + +# Python packages. +RUN pip3 install --no-cache-dir 'gym[atari]' +RUN pip3 install --no-cache-dir atari_py +RUN pip3 install --no-cache-dir crafter +RUN pip3 install --no-cache-dir dm_control +RUN pip3 install --no-cache-dir ruamel.yaml +RUN pip3 install --no-cache-dir tensorflow==2.6.0 tensorflow_probability==0.14.1 tensorflow-estimator==2.6.0 tensorboard==2.6.0 keras==2.6.0 +RUN pip3 install --no-cache-dir gym-minigrid sklearn + +# Atari ROMS. +RUN wget -L -nv http://www.atarimania.com/roms/Roms.rar && \ + unrar x Roms.rar && \ + unzip ROMS.zip && \ + python3 -m atari_py.import_roms ROMS && \ + rm -rf Roms.rar ROMS.zip ROMS + +RUN pip3 install -U 'mujoco-py<2.2,>=2.1' +RUN wget https://www.roboti.us/file/mjkey.txt -O /home/user/.mujoco/mjkey.txt +RUN conda install patchelf + +WORKDIR /vd4rl diff --git a/drqbc/.DS_Store b/drqbc/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..1a7c0e891550248232d4f90baa4238bc581b71de Binary files /dev/null and b/drqbc/.DS_Store differ diff --git a/drqbc/LICENSE b/drqbc/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..b96dcb0480a0b0be0727976e5202a1e7b23edc3f --- /dev/null +++ b/drqbc/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Facebook, Inc. and its affiliates. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/drqbc/bcagent.py b/drqbc/bcagent.py new file mode 100644 index 0000000000000000000000000000000000000000..776c2c0fd19ae039240bf7acd7d1fd7247a5d77a --- /dev/null +++ b/drqbc/bcagent.py @@ -0,0 +1,105 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from drqv2 import Actor, Encoder, RandomShiftsAug +import utils + + +class BCAgent: + def __init__(self, obs_shape, action_shape, device, lr, feature_dim, + hidden_dim, critic_target_tau, num_expl_steps, + update_every_steps, stddev_schedule, stddev_clip, use_tb, + augmentation=RandomShiftsAug(pad=4)): + self.device = device + self.critic_target_tau = critic_target_tau + self.update_every_steps = update_every_steps + self.use_tb = use_tb + self.num_expl_steps = num_expl_steps + self.stddev_schedule = stddev_schedule + self.stddev_clip = stddev_clip + + # models + self.encoder = Encoder(obs_shape).to(device) + self.actor = Actor(self.encoder.repr_dim, action_shape, feature_dim, + hidden_dim).to(device) + + # optimizers + self.encoder_opt = torch.optim.Adam(self.encoder.parameters(), lr=lr) + self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=lr) + + # data augmentation + self.aug = augmentation + + self.train() + + def train(self, training=True): + self.training = training + self.encoder.train(training) + self.actor.train(training) + + def act(self, obs, step, eval_mode): + obs = torch.as_tensor(obs, device=self.device) + obs = self.encoder(obs.unsqueeze(0)) + stddev = utils.schedule(self.stddev_schedule, step) + dist = self.actor(obs, stddev) + if eval_mode: + action = dist.mean + else: + action = dist.sample(clip=None) + if step < self.num_expl_steps: + action.uniform_(-1.0, 1.0) + return action.cpu().numpy()[0] + + def update_actor(self, obs, step, behavioural_action=None): + metrics = dict() + + stddev = utils.schedule(self.stddev_schedule, step) + dist = self.actor(obs, stddev) + action = dist.sample(clip=self.stddev_clip) + log_prob = dist.log_prob(action).sum(-1, keepdim=True) + + # offline BC Loss + actor_loss = F.mse_loss(action, behavioural_action) + + # optimize actor and encoder + self.encoder_opt.zero_grad(set_to_none=True) + self.actor_opt.zero_grad(set_to_none=True) + actor_loss.backward() + self.actor_opt.step() + self.encoder_opt.step() + + if self.use_tb: + metrics['actor_logprob'] = log_prob.mean().item() + metrics['actor_ent'] = dist.entropy().sum(dim=-1).mean().item() + metrics['actor_bc_loss'] = actor_loss.item() + + return metrics + + def update(self, replay_buffer, step): + metrics = dict() + + if step % self.update_every_steps != 0: + return metrics + + batch = next(replay_buffer) + obs, action, reward, _, _ = utils.to_torch( + batch, self.device) + + # augment + obs = self.aug(obs.float()) + # encode + obs = self.encoder(obs) + + if self.use_tb: + metrics['batch_reward'] = reward.mean().item() + + # update actor + metrics.update(self.update_actor(obs, step, action.detach())) + + return metrics diff --git a/drqbc/cfgs/.DS_Store b/drqbc/cfgs/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..9fa281e7ac8817658e68456135bbd1259be77cbe Binary files /dev/null and b/drqbc/cfgs/.DS_Store differ diff --git a/drqbc/cfgs/algo/bc.yaml b/drqbc/cfgs/algo/bc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..de8fc9af91e9ef0f689afb94f38a22cb18d0cfc7 --- /dev/null +++ b/drqbc/cfgs/algo/bc.yaml @@ -0,0 +1,20 @@ +defaults: + - _self_ + +# experiment +experiment: bc + +agent: + _target_: bcagent.BCAgent + obs_shape: ??? # to be specified later + action_shape: ??? # to be specified later + device: ${device} + lr: ${lr} + critic_target_tau: 0.01 + update_every_steps: 2 + use_tb: ${use_tb} + num_expl_steps: 2000 + hidden_dim: 1024 + feature_dim: ${feature_dim} + stddev_schedule: ${stddev_schedule} + stddev_clip: 0.3 \ No newline at end of file diff --git a/drqbc/cfgs/algo/cql.yaml b/drqbc/cfgs/algo/cql.yaml new file mode 100644 index 0000000000000000000000000000000000000000..23b16da162ec0433f9e415b90f55189055b927dc --- /dev/null +++ b/drqbc/cfgs/algo/cql.yaml @@ -0,0 +1,27 @@ +defaults: + - _self_ + +# experiment +experiment: cql + +agent: + _target_: cql.CQLAgent + obs_shape: ??? # to be specified later + action_shape: ??? # to be specified later + device: ${device} + lr: ${lr} + critic_target_tau: 0.01 + update_every_steps: 2 + use_tb: ${use_tb} + num_expl_steps: 2000 + hidden_dim: 1024 + feature_dim: ${feature_dim} + stddev_schedule: ${stddev_schedule} + stddev_clip: 0.3 + offline: ${offline} + cql_importance_sample: ${cql_importance_sample} + temp: ${temp} + min_q_weight: ${min_q_weight} + num_random: ${num_random} + with_lagrange: ${with_lagrange} + lagrange_thresh: ${lagrange_thresh} diff --git a/drqbc/cfgs/algo/drqv2.yaml b/drqbc/cfgs/algo/drqv2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4b70e2039d79530ad322614f31e59897e13cdc7d --- /dev/null +++ b/drqbc/cfgs/algo/drqv2.yaml @@ -0,0 +1,23 @@ +defaults: + - _self_ + +# experiment +experiment: drqv2 + +agent: + _target_: drqv2.DrQV2Agent + obs_shape: ??? # to be specified later + action_shape: ??? # to be specified later + device: ${device} + lr: ${lr} + critic_target_tau: 0.01 + update_every_steps: 2 + use_tb: ${use_tb} + num_expl_steps: 2000 + hidden_dim: 1024 + feature_dim: ${feature_dim} + stddev_schedule: ${stddev_schedule} + stddev_clip: 0.3 + offline: ${offline} + bc_weight: ${bc_weight} + use_bc: ${use_bc} \ No newline at end of file diff --git a/drqbc/cfgs/algo/noaug_bc.yaml b/drqbc/cfgs/algo/noaug_bc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..19a3153ab146f935f3618e2a639baefc4a52af1e --- /dev/null +++ b/drqbc/cfgs/algo/noaug_bc.yaml @@ -0,0 +1,10 @@ +defaults: + - bc + - _self_ + +# experiment +experiment: noaug_bc + +agent: + augmentation: + _target_: drqv2.NoShiftAug \ No newline at end of file diff --git a/drqbc/cfgs/algo/noaug_drqv2.yaml b/drqbc/cfgs/algo/noaug_drqv2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f82ba80a0bafe0b83e341fc5db66abb281517394 --- /dev/null +++ b/drqbc/cfgs/algo/noaug_drqv2.yaml @@ -0,0 +1,10 @@ +defaults: + - drqv2 + - _self_ + +# experiment +experiment: noaug_drqv2 + +agent: + augmentation: + _target_: drqv2.NoShiftAug diff --git a/drqbc/cfgs/config.yaml b/drqbc/cfgs/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..062b7e444b91a2cdfc9ae214ee3a8ba673a7c162 --- /dev/null +++ b/drqbc/cfgs/config.yaml @@ -0,0 +1,63 @@ +defaults: + - _self_ + - task@_global_: offline_dmc + - algo@_global_: drqv2 + - override hydra/launcher: submitit_local + +# task settings +frame_stack: 3 +action_repeat: 2 +discount: 0.99 +distracting_mode: null +# train settings +num_seed_frames: 4000 +# eval +eval_every_frames: 10000 +num_eval_episodes: 10 +eval_on_distracting: False +eval_on_multitask: False +eval_save_vid_every_step: 100000 +# snapshot +save_snapshot: False +# replay buffer +replay_buffer_size: 1_000_000 +replay_buffer_num_workers: 1 +nstep: 3 +batch_size: 256 +# misc +seed: 1 +device: cuda +save_video: True +save_train_video: False +use_tb: True +# experiment +experiment: exp +# agent +lr: 1e-4 +feature_dim: 50 +# offline +offline: False +bc_weight: 2.5 +use_bc: True +# cql +cql_importance_sample: False +temp: 1.0 +min_q_weight: 1.0 +num_random: 10 +with_lagrange: False +lagrange_thresh: 0.0 + +hydra: + run: + dir: drqv2_data/${experiment}/${task_name}/${now:%Y.%m.%d}/${now:%H%M%S}` + sweep: + dir: ./exp/${now:%Y.%m.%d}/${now:%H%M}_${agent_cfg.experiment} + subdir: ${hydra.job.num} + launcher: + timeout_min: 4300 + cpus_per_task: 10 + gpus_per_node: 1 + tasks_per_node: 1 + mem_gb: 160 + nodes: 1 + submitit_folder: ./exp/${now:%Y.%m.%d}/${now:%H%M%S}_${agent_cfg.experiment}/.slurm diff --git a/drqbc/cfgs/task/offline_dmc.yaml b/drqbc/cfgs/task/offline_dmc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5ad6f39b8e1e7018bcc1a17fb5e21a4835d6c838 --- /dev/null +++ b/drqbc/cfgs/task/offline_dmc.yaml @@ -0,0 +1,12 @@ +defaults: + - _self_ + +num_train_frames: 1100000 +stddev_schedule: 'linear(1.0,0.1,250000)' +nstep: 1 +show_train_stats_every_frames: 2000 +lr: 3e-4 +replay_buffer_size: 1_000_000 +offline: True +task_name: offline_walker_walk_random +offline_dir: ... diff --git a/drqbc/cfgs/task/online/acrobot_swingup.yaml b/drqbc/cfgs/task/online/acrobot_swingup.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7dc0a7ab5b75fd069b80c4f2e9746b274c2076ff --- /dev/null +++ b/drqbc/cfgs/task/online/acrobot_swingup.yaml @@ -0,0 +1,5 @@ +defaults: + - medium + - _self_ + +task_name: acrobot_swingup diff --git a/drqbc/cfgs/task/online/cartpole_balance.yaml b/drqbc/cfgs/task/online/cartpole_balance.yaml new file mode 100644 index 0000000000000000000000000000000000000000..589b7bc79035fcec8fd5fb7f5c2be357f5908fdb --- /dev/null +++ b/drqbc/cfgs/task/online/cartpole_balance.yaml @@ -0,0 +1,5 @@ +defaults: + - easy + - _self_ + +task_name: cartpole_balance diff --git a/drqbc/cfgs/task/online/cartpole_balance_sparse.yaml b/drqbc/cfgs/task/online/cartpole_balance_sparse.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bd780385bac000a1b43f51c105d79722a9914e96 --- /dev/null +++ b/drqbc/cfgs/task/online/cartpole_balance_sparse.yaml @@ -0,0 +1,5 @@ +defaults: + - easy + - _self_ + +task_name: cartpole_balance_sparse diff --git a/drqbc/cfgs/task/online/cartpole_swingup.yaml b/drqbc/cfgs/task/online/cartpole_swingup.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6ecef2f619fac8fa8a481e8c741119e1c74fb5a7 --- /dev/null +++ b/drqbc/cfgs/task/online/cartpole_swingup.yaml @@ -0,0 +1,5 @@ +defaults: + - easy + - _self_ + +task_name: cartpole_swingup diff --git a/drqbc/cfgs/task/online/cartpole_swingup_sparse.yaml b/drqbc/cfgs/task/online/cartpole_swingup_sparse.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ec40b19b1e91d6ed3240234cd4ffd7fce44f0b3c --- /dev/null +++ b/drqbc/cfgs/task/online/cartpole_swingup_sparse.yaml @@ -0,0 +1,5 @@ +defaults: + - medium + - _self_ + +task_name: cartpole_swingup_sparse diff --git a/drqbc/cfgs/task/online/cheetah_run.yaml b/drqbc/cfgs/task/online/cheetah_run.yaml new file mode 100644 index 0000000000000000000000000000000000000000..49d9d66327174baf54b77432f95453c71d5f8db3 --- /dev/null +++ b/drqbc/cfgs/task/online/cheetah_run.yaml @@ -0,0 +1,5 @@ +defaults: + - medium + - _self_ + +task_name: cheetah_run diff --git a/drqbc/cfgs/task/online/cup_catch.yaml b/drqbc/cfgs/task/online/cup_catch.yaml new file mode 100644 index 0000000000000000000000000000000000000000..704c2845c08e847fe4fe525fa423fe0e6c1f5f10 --- /dev/null +++ b/drqbc/cfgs/task/online/cup_catch.yaml @@ -0,0 +1,5 @@ +defaults: + - easy + - _self_ + +task_name: cup_catch diff --git a/drqbc/cfgs/task/online/easy.yaml b/drqbc/cfgs/task/online/easy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9fd93e5c3865cd4ef84271f14f8c228003b8aa72 --- /dev/null +++ b/drqbc/cfgs/task/online/easy.yaml @@ -0,0 +1,2 @@ +num_train_frames: 1100000 +stddev_schedule: 'linear(1.0,0.1,100000)' \ No newline at end of file diff --git a/drqbc/cfgs/task/online/finger_spin.yaml b/drqbc/cfgs/task/online/finger_spin.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e73329aafeb6477e171009df280019e1dc10ee89 --- /dev/null +++ b/drqbc/cfgs/task/online/finger_spin.yaml @@ -0,0 +1,5 @@ +defaults: + - easy + - _self_ + +task_name: finger_spin diff --git a/drqbc/cfgs/task/online/finger_turn_easy.yaml b/drqbc/cfgs/task/online/finger_turn_easy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6ab82a8d0f86cdd70874f9b0c9ca1c6838ce4ca8 --- /dev/null +++ b/drqbc/cfgs/task/online/finger_turn_easy.yaml @@ -0,0 +1,5 @@ +defaults: + - medium + - _self_ + +task_name: finger_turn_easy \ No newline at end of file diff --git a/drqbc/cfgs/task/online/finger_turn_hard.yaml b/drqbc/cfgs/task/online/finger_turn_hard.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2ce162ceaa8defd43b46fdaf39f84296c5118a47 --- /dev/null +++ b/drqbc/cfgs/task/online/finger_turn_hard.yaml @@ -0,0 +1,5 @@ +defaults: + - medium + - _self_ + +task_name: finger_turn_hard diff --git a/drqbc/cfgs/task/online/hard.yaml b/drqbc/cfgs/task/online/hard.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a7c36e90140df748ca6994adee61e387f8004ca0 --- /dev/null +++ b/drqbc/cfgs/task/online/hard.yaml @@ -0,0 +1,2 @@ +num_train_frames: 30100000 +stddev_schedule: 'linear(1.0,0.1,2000000)' \ No newline at end of file diff --git a/drqbc/cfgs/task/online/hopper_hop.yaml b/drqbc/cfgs/task/online/hopper_hop.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c7e6cdd0df855261799f84774189a4f8b4b21220 --- /dev/null +++ b/drqbc/cfgs/task/online/hopper_hop.yaml @@ -0,0 +1,5 @@ +defaults: + - medium + - _self_ + +task_name: hopper_hop diff --git a/drqbc/cfgs/task/online/hopper_stand.yaml b/drqbc/cfgs/task/online/hopper_stand.yaml new file mode 100644 index 0000000000000000000000000000000000000000..94dc8b05bbf38db9f6eb3fe4701f932d29c6ce2b --- /dev/null +++ b/drqbc/cfgs/task/online/hopper_stand.yaml @@ -0,0 +1,5 @@ +defaults: + - easy + - _self_ + +task_name: hopper_stand diff --git a/drqbc/cfgs/task/online/humanoid_run.yaml b/drqbc/cfgs/task/online/humanoid_run.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3e4c98980f0f4bbdcca49c1a28c995c2a92d2b85 --- /dev/null +++ b/drqbc/cfgs/task/online/humanoid_run.yaml @@ -0,0 +1,7 @@ +defaults: + - hard + - _self_ + +task_name: humanoid_run +lr: 8e-5 +feature_dim: 100 diff --git a/drqbc/cfgs/task/online/humanoid_stand.yaml b/drqbc/cfgs/task/online/humanoid_stand.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c786c83f71574e9297ea4a4aebf8ce364d5c4c04 --- /dev/null +++ b/drqbc/cfgs/task/online/humanoid_stand.yaml @@ -0,0 +1,7 @@ +defaults: + - hard + - _self_ + +task_name: humanoid_stand +lr: 8e-5 +feature_dim: 100 diff --git a/drqbc/cfgs/task/online/humanoid_walk.yaml b/drqbc/cfgs/task/online/humanoid_walk.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1aff67c7450542f7e60c6f3da6a1e9e3a5826867 --- /dev/null +++ b/drqbc/cfgs/task/online/humanoid_walk.yaml @@ -0,0 +1,7 @@ +defaults: + - hard + - _self_ + +task_name: humanoid_walk +lr: 8e-5 +feature_dim: 100 diff --git a/drqbc/cfgs/task/online/medium.yaml b/drqbc/cfgs/task/online/medium.yaml new file mode 100644 index 0000000000000000000000000000000000000000..56e47a3b44d4036952ac481bcae3a439a4fbf4c0 --- /dev/null +++ b/drqbc/cfgs/task/online/medium.yaml @@ -0,0 +1,2 @@ +num_train_frames: 3100000 +stddev_schedule: 'linear(1.0,0.1,500000)' \ No newline at end of file diff --git a/drqbc/cfgs/task/online/pendulum_swingup.yaml b/drqbc/cfgs/task/online/pendulum_swingup.yaml new file mode 100644 index 0000000000000000000000000000000000000000..73a14c3dab7dccb4f9902b7fb44f5c3a17327d30 --- /dev/null +++ b/drqbc/cfgs/task/online/pendulum_swingup.yaml @@ -0,0 +1,5 @@ +defaults: + - easy + - _self_ + +task_name: pendulum_swingup diff --git a/drqbc/cfgs/task/online/quadruped_run.yaml b/drqbc/cfgs/task/online/quadruped_run.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cae00bc3484c35640f58b42df75fbb4a6782a27b --- /dev/null +++ b/drqbc/cfgs/task/online/quadruped_run.yaml @@ -0,0 +1,6 @@ +defaults: + - medium + - _self_ + +task_name: quadruped_run +replay_buffer_size: 100000 \ No newline at end of file diff --git a/drqbc/cfgs/task/online/quadruped_walk.yaml b/drqbc/cfgs/task/online/quadruped_walk.yaml new file mode 100644 index 0000000000000000000000000000000000000000..86a8b24476d3e93a66e436595846ea5ac9511663 --- /dev/null +++ b/drqbc/cfgs/task/online/quadruped_walk.yaml @@ -0,0 +1,5 @@ +defaults: + - medium + - _self_ + +task_name: quadruped_walk diff --git a/drqbc/cfgs/task/online/reach_duplo.yaml b/drqbc/cfgs/task/online/reach_duplo.yaml new file mode 100644 index 0000000000000000000000000000000000000000..341a3c85a237b62aca807ddc640be6e5b26490fa --- /dev/null +++ b/drqbc/cfgs/task/online/reach_duplo.yaml @@ -0,0 +1,5 @@ +defaults: + - medium + - _self_ + +task_name: reach_duplo diff --git a/drqbc/cfgs/task/online/reacher_easy.yaml b/drqbc/cfgs/task/online/reacher_easy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5be502b21d0c94c543c9eb230c007ad8a32bf5ed --- /dev/null +++ b/drqbc/cfgs/task/online/reacher_easy.yaml @@ -0,0 +1,5 @@ +defaults: + - medium + - _self_ + +task_name: reacher_easy diff --git a/drqbc/cfgs/task/online/reacher_hard.yaml b/drqbc/cfgs/task/online/reacher_hard.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c604228e11185ec994f34f7a6f0b6b281bcdde58 --- /dev/null +++ b/drqbc/cfgs/task/online/reacher_hard.yaml @@ -0,0 +1,5 @@ +defaults: + - medium + - _self_ + +task_name: reacher_hard diff --git a/drqbc/cfgs/task/online/walker_run.yaml b/drqbc/cfgs/task/online/walker_run.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a62b0da7648085c4ab79516ee765e82559c11a02 --- /dev/null +++ b/drqbc/cfgs/task/online/walker_run.yaml @@ -0,0 +1,7 @@ +defaults: + - medium + - _self_ + +task_name: walker_run +nstep: 1 +batch_size: 512 \ No newline at end of file diff --git a/drqbc/cfgs/task/online/walker_stand.yaml b/drqbc/cfgs/task/online/walker_stand.yaml new file mode 100644 index 0000000000000000000000000000000000000000..72590b4e9e9622ca0fa313fd876ac68c9562d841 --- /dev/null +++ b/drqbc/cfgs/task/online/walker_stand.yaml @@ -0,0 +1,7 @@ +defaults: + - easy + - _self_ + +task_name: walker_stand +nstep: 1 +batch_size: 512 diff --git a/drqbc/cfgs/task/online/walker_walk.yaml b/drqbc/cfgs/task/online/walker_walk.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bb0250a06e913c9e663d102631b88d619ad932a1 --- /dev/null +++ b/drqbc/cfgs/task/online/walker_walk.yaml @@ -0,0 +1,7 @@ +defaults: + - easy + - _self_ + +task_name: walker_walk +nstep: 1 +batch_size: 512 diff --git a/drqbc/conda_env.yml b/drqbc/conda_env.yml new file mode 100644 index 0000000000000000000000000000000000000000..a8d73a836a7c7d6fdba82703f55d259e89c921bb --- /dev/null +++ b/drqbc/conda_env.yml @@ -0,0 +1,245 @@ +name: drqv2 +channels: + - pytorch + - nvidia + - defaults +dependencies: + - _libgcc_mutex=0.1 + - _openmp_mutex=4.5 + - absl-py=0.13.0 + - anyio=2.2.0 + - argon2-cffi=20.1.0 + - async_generator=1.10 + - attrs=21.2.0 + - babel=2.9.1 + - backcall=0.2.0 + - blas=1.0 + - bleach=4.1.0 + - blosc=1.21.0 + - brotli=1.0.9 + - brotlipy=0.7.0 + - brunsli=0.1 + - bzip2=1.0.8 + - c-ares=1.18.1 + - ca-certificates=2021.10.26 + - certifi=2021.10.8 + - cffi=1.14.6 + - cfitsio=3.470 + - charls=2.2.0 + - charset-normalizer=2.0.4 + - cloudpickle=2.0.0 + - cryptography=36.0.0 + - cudatoolkit=11.1.74 + - cycler=0.11.0 + - cytoolz=0.11.0 + - dask-core=2021.10.0 + - debugpy=1.5.1 + - defusedxml=0.7.1 + - entrypoints=0.3 + - ffmpeg=4.2.2 + - fonttools=4.25.0 + - freetype=2.11.0 + - fsspec=2021.10.1 + - giflib=5.2.1 + - gmp=6.2.1 + - gnutls=3.6.15 + - idna=3.3 + - imagecodecs=2021.8.26 + - imageio=2.9.0 + - importlib-metadata=4.8.2 + - importlib_metadata=4.8.2 + - intel-openmp=2021.4.0 + - ipykernel=6.4.1 + - ipython=7.29.0 + - ipython_genutils=0.2.0 + - jedi=0.18.0 + - jinja2=3.0.2 + - jpeg=9d + - json5=0.9.6 + - jsonschema=3.2.0 + - jupyter-packaging=0.7.12 + - jupyter_client=7.1.0 + - jupyter_core=4.9.1 + - jupyter_server=1.4.1 + - jupyterlab=3.0.14 + - jupyterlab_pygments=0.1.2 + - jupyterlab_server=2.10.2 + - jxrlib=1.1 + - kiwisolver=1.3.1 + - krb5=1.19.2 + - lame=3.100 + - lcms2=2.12 + - ld_impl_linux-64=2.35.1 + - lerc=3.0 + - libaec=1.0.4 + - libcurl=7.80.0 + - libdeflate=1.8 + - libedit=3.1.20210910 + - libev=4.33 + - libffi=3.3 + - libgcc-ng=9.3.0 + - libgfortran-ng=7.5.0 + - libgfortran4=7.5.0 + - libgomp=9.3.0 + - libidn2=2.3.2 + - libnghttp2=1.46.0 + - libopus=1.3.1 + - libpng=1.6.37 + - libsodium=1.0.18 + - libssh2=1.9.0 + - libstdcxx-ng=9.3.0 + - libtasn1=4.16.0 + - libtiff=4.2.0 + - libunistring=0.9.10 + - libuv=1.40.0 + - libvpx=1.7.0 + - libwebp=1.2.0 + - libwebp-base=1.2.0 + - libzopfli=1.0.3 + - locket=0.2.1 + - lz4-c=1.9.3 + - markupsafe=2.0.1 + - matplotlib-inline=0.1.2 + - mistune=0.8.4 + - mkl=2020.2 + - mkl-service=2.3.0 + - mkl_fft=1.3.0 + - mkl_random=1.1.1 + - munkres=1.1.4 + - nbclassic=0.2.6 + - nbclient=0.5.3 + - nbconvert=6.3.0 + - nbformat=5.1.3 + - ncurses=6.3 + - nest-asyncio=1.5.1 + - nettle=3.7.3 + - networkx=2.6.3 + - notebook=6.4.6 + - numpy=1.19.2 + - numpy-base=1.19.2 + - olefile=0.46 + - openh264=2.1.1 + - openjpeg=2.4.0 + - openssl=1.1.1m + - packaging=21.3 + - pandocfilters=1.4.3 + - parso=0.8.3 + - partd=1.2.0 + - pexpect=4.8.0 + - pickleshare=0.7.5 + - pillow=8.4.0 + - pip=21.1.3 + - prometheus_client=0.12.0 + - prompt-toolkit=3.0.20 + - ptyprocess=0.7.0 + - pycparser=2.21 + - pygments=2.10.0 + - pyopenssl=21.0.0 + - pyparsing=2.4.7 + - pyrsistent=0.18.0 + - pysocks=1.7.1 + - python=3.8.12 + - python-dateutil=2.8.2 + - pytorch=1.10.1 + - pytorch-mutex=1.0 + - pytz=2021.3 + - pywavelets=1.1.1 + - pyyaml=6.0 + - pyzmq=22.3.0 + - readline=8.1.2 + - requests=2.27.1 + - scikit-image=0.18.1 + - scipy=1.6.2 + - send2trash=1.8.0 + - setuptools=58.0.4 + - six=1.16.0 + - snappy=1.1.8 + - sniffio=1.2.0 + - sqlite=3.37.0 + - terminado=0.9.4 + - testpath=0.5.0 + - tifffile=2021.7.2 + - tk=8.6.11 + - toolz=0.11.2 + - torchaudio=0.10.1 + - torchvision=0.11.2 + - tornado=6.1 + - traitlets=5.1.1 + - typing_extensions=3.10.0.2 + - urllib3=1.26.7 + - wcwidth=0.2.5 + - webencodings=0.5.1 + - wheel=0.37.1 + - x264=1!157.20191217 + - xz=5.2.5 + - yaml=0.2.5 + - zeromq=4.3.4 + - zfp=0.5.5 + - zipp=3.7.0 + - zlib=1.2.11 + - zstd=1.4.9 + - pip: + - antlr4-python3-runtime==4.8 + - astunparse==1.6.3 + - cachetools==4.2.4 + - cython==0.29.26 + - decorator==4.4.2 + - dm-control==0.0.403778684 + - dm-env==1.5 + - dm-tree==0.1.6 + - dmc2gym==1.0.0 + - fasteners==0.16.3 + - flatbuffers==2.0 + - future==0.18.2 + - gast==0.4.0 + - glfw==2.5.0 + - google-auth==2.3.3 + - google-auth-oauthlib==0.4.6 + - google-pasta==0.2.0 + - grpcio==1.43.0 + - gym==0.21.0 + - h5py==3.6.0 + - hydra-core==1.1.0 + - hydra-submitit-launcher==1.1.5 + - imageio-ffmpeg==0.4.4 + - importlib-resources==5.4.0 + - ipdb==0.13.9 + - joblib==1.1.0 + - keras==2.7.0 + - keras-preprocessing==1.1.2 + - labmaze==1.0.5 + - libclang==12.0.0 + - lxml==4.7.1 + - markdown==3.3.6 + - matplotlib==3.4.2 + - moviepy==1.0.3 + - mujoco-py==2.1.2.14 + - oauthlib==3.1.1 + - omegaconf==2.1.1 + - opencv-python==4.5.3.56 + - opt-einsum==3.3.0 + - pandas==1.3.0 + - proglog==0.1.9 + - protobuf==3.19.3 + - pyasn1==0.4.8 + - pyasn1-modules==0.2.8 + - pyopengl==3.1.5 + - requests-oauthlib==1.3.0 + - rsa==4.8 + - scikit-learn==1.0.2 + - sklearn==0.0 + - submitit==1.4.1 + - tb-nightly==2.8.0a20220114 + - tensorboard==2.7.0 + - tensorboard-data-server==0.6.1 + - tensorboard-plugin-wit==1.8.1 + - tensorflow==2.7.0 + - tensorflow-estimator==2.7.0 + - tensorflow-io-gcs-filesystem==0.23.1 + - termcolor==1.1.0 + - threadpoolctl==3.0.0 + - toml==0.10.2 + - tqdm==4.62.3 + - werkzeug==2.0.2 + - wrapt==1.13.3 + - yapf==0.31.0 diff --git a/drqbc/cql.py b/drqbc/cql.py new file mode 100644 index 0000000000000000000000000000000000000000..db937affbd52700d9006580721f5c285ac6e2d76 --- /dev/null +++ b/drqbc/cql.py @@ -0,0 +1,249 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +import hydra +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +import utils +from drqv2 import Actor, Critic, Encoder, RandomShiftsAug, NoShiftAug + + +class CQLAgent: + def __init__( + self, + obs_shape, + action_shape, + device, + lr, + feature_dim, + hidden_dim, + critic_target_tau, + num_expl_steps, + update_every_steps, + stddev_schedule, + stddev_clip, + use_tb, + offline=False, + augmentation=RandomShiftsAug(pad=4), + # CQL + cql_importance_sample=False, + temp=1.0, + min_q_weight=1.0, + # sort of backup + num_random=10, + with_lagrange=False, + lagrange_thresh=0.0, + ): + self.device = device + self.critic_target_tau = critic_target_tau + self.update_every_steps = update_every_steps + self.use_tb = use_tb + self.num_expl_steps = num_expl_steps + self.stddev_schedule = stddev_schedule + self.stddev_clip = stddev_clip + self.offline = offline + + # models + self.encoder = Encoder(obs_shape).to(device) + self.actor = Actor(self.encoder.repr_dim, action_shape, feature_dim, hidden_dim).to(device) + + self.critic = Critic(self.encoder.repr_dim, action_shape, feature_dim, hidden_dim).to(device) + self.critic_target = Critic(self.encoder.repr_dim, action_shape, feature_dim, hidden_dim).to(device) + self.critic_target.load_state_dict(self.critic.state_dict()) + + # optimizers + self.encoder_opt = torch.optim.Adam(self.encoder.parameters(), lr=lr) + self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=lr) + self.critic_opt = torch.optim.Adam(self.critic.parameters(), lr=lr) + + # data augmentation + self.aug = augmentation + + # CQL + self.with_lagrange = with_lagrange + if self.with_lagrange: + self.target_action_gap = lagrange_thresh + self.log_alpha_prime = torch.zeros(1, requires_grad=True, device=device) + self.alpha_prime_optimizer = torch.optim.Adam([self.log_alpha_prime], lr=lr) + + ## min Q + self.temp = temp + self.cql_importance_sample = cql_importance_sample + self.min_q_weight = min_q_weight + self.num_random = num_random + + self.train() + self.critic_target.train() + + def train(self, training=True): + self.training = training + self.encoder.train(training) + self.actor.train(training) + self.critic.train(training) + + def act(self, obs, step, eval_mode): + obs = torch.as_tensor(obs, device=self.device) + obs = self.encoder(obs.unsqueeze(0)) + stddev = utils.schedule(self.stddev_schedule, step) + dist = self.actor(obs, stddev) + if eval_mode: + action = dist.mean + else: + action = dist.sample(clip=None) + if step < self.num_expl_steps: + action.uniform_(-1.0, 1.0) + return action.cpu().numpy()[0] + + def update_critic(self, obs, action, reward, discount, next_obs, step): + metrics = dict() + + with torch.no_grad(): + stddev = utils.schedule(self.stddev_schedule, step) + dist = self.actor(next_obs, stddev) + next_action = dist.sample(clip=self.stddev_clip) + target_Q1, target_Q2 = self.critic_target(next_obs, next_action) + target_V = torch.min(target_Q1, target_Q2) + target_Q = reward.float() + (discount * target_V) + + Q1, Q2 = self.critic(obs, action) + qf1_loss = F.mse_loss(Q1, target_Q) + qf2_loss = F.mse_loss(Q2, target_Q) + + # add CQL + if self.offline: + obs = obs.unsqueeze(1).repeat(1, self.num_random, 1) + next_obs = next_obs.unsqueeze(1).repeat(1, self.num_random, 1) + + random_actions_tensor = torch.FloatTensor(Q1.shape[0], self.num_random, action.shape[-1]) \ + .uniform_(-1, 1).to(self.device) + + with torch.no_grad(): + curr_dist = self.actor(obs, stddev) + curr_actions_tensor = curr_dist.sample(clip=self.stddev_clip) + curr_log_pis = curr_dist.log_prob(curr_actions_tensor).sum(dim=-1, keepdim=True) + + new_curr_dist = self.actor(next_obs, stddev) + new_curr_actions_tensor = new_curr_dist.sample(clip=self.stddev_clip) + new_log_pis = new_curr_dist.log_prob(new_curr_actions_tensor).sum(dim=-1, keepdim=True) + + q1_rand, q2_rand = self.critic(obs, random_actions_tensor) + q1_curr_actions, q2_curr_actions = self.critic(obs, curr_actions_tensor) + q1_next_actions, q2_next_actions = self.critic(obs, new_curr_actions_tensor) + + if self.cql_importance_sample: + random_density = np.log(0.5 ** curr_actions_tensor.shape[-1]) + cat_q1 = torch.cat( + [q1_rand - random_density, q1_next_actions - new_log_pis, q1_curr_actions - curr_log_pis], 1 + ) + cat_q2 = torch.cat( + [q2_rand - random_density, q2_next_actions - new_log_pis, q2_curr_actions - curr_log_pis], 1 + ) + else: + cat_q1 = torch.cat([q1_rand, Q1.unsqueeze(1), q1_next_actions, q1_curr_actions], 1) + cat_q2 = torch.cat([q2_rand, Q2.unsqueeze(1), q2_next_actions, q2_curr_actions], 1) + + min_qf1_loss = torch.logsumexp(cat_q1 / self.temp, dim=1, ).mean() * self.min_q_weight * self.temp + min_qf2_loss = torch.logsumexp(cat_q2 / self.temp, dim=1, ).mean() * self.min_q_weight * self.temp + + """Subtract the log likelihood of data""" + min_qf1_loss = min_qf1_loss - Q1.mean() * self.min_q_weight + min_qf2_loss = min_qf2_loss - Q2.mean() * self.min_q_weight + + if self.with_lagrange: + alpha_prime = torch.clamp(self.log_alpha_prime.exp(), min=0.0, max=1000000.0) + min_qf1_loss = alpha_prime * (min_qf1_loss - self.target_action_gap) + min_qf2_loss = alpha_prime * (min_qf2_loss - self.target_action_gap) + + self.alpha_prime_optimizer.zero_grad() + alpha_prime_loss = (-min_qf1_loss - min_qf2_loss) * 0.5 + alpha_prime_loss.backward(retain_graph=True) + self.alpha_prime_optimizer.step() + + qf1_loss = qf1_loss + min_qf1_loss + qf2_loss = qf2_loss + min_qf2_loss + + critic_loss = qf1_loss + qf2_loss + + if self.use_tb: + metrics['critic_target_q'] = target_Q.mean().item() + metrics['critic_q1'] = Q1.mean().item() + metrics['critic_q2'] = Q2.mean().item() + metrics['critic_loss'] = critic_loss.item() + if self.offline: + metrics['cql_critic_q1_rand'] = q1_rand.mean().item() + metrics['cql_critic_q2_rand'] = q2_rand.mean().item() + metrics['cql_critic_q1_curr_actions'] = q1_curr_actions.mean().item() + metrics['cql_critic_q2_curr_actions'] = q2_curr_actions.mean().item() + metrics['cql_critic_q1_next_actions'] = q1_next_actions.mean().item() + metrics['cql_critic_q2_next_actions'] = q2_next_actions.mean().item() + metrics['cql_critic_q1_loss'] = min_qf1_loss.item() + metrics['cql_critic_q2_loss'] = min_qf2_loss.item() + + # optimize encoder and critic + self.encoder_opt.zero_grad(set_to_none=True) + self.critic_opt.zero_grad(set_to_none=True) + critic_loss.backward() + self.critic_opt.step() + self.encoder_opt.step() + + return metrics + + def update_actor(self, obs, step): + metrics = dict() + + stddev = utils.schedule(self.stddev_schedule, step) + dist = self.actor(obs, stddev) + action = dist.sample(clip=self.stddev_clip) + log_prob = dist.log_prob(action).sum(-1, keepdim=True) + Q1, Q2 = self.critic(obs, action) + Q = torch.min(Q1, Q2) + + actor_policy_improvement_loss = -Q.mean() + actor_loss = actor_policy_improvement_loss + + # optimize actor + self.actor_opt.zero_grad(set_to_none=True) + actor_loss.backward() + self.actor_opt.step() + + if self.use_tb: + metrics['actor_loss'] = actor_policy_improvement_loss.item() + metrics['actor_logprob'] = log_prob.mean().item() + metrics['actor_ent'] = dist.entropy().sum(dim=-1).mean().item() + + return metrics + + def update(self, replay_buffer, step): + metrics = dict() + + if step % self.update_every_steps != 0: + return metrics + + batch = next(replay_buffer) + obs, action, reward, discount, next_obs = utils.to_torch(batch, self.device) + + # augment + obs = self.aug(obs.float()) + next_obs = self.aug(next_obs.float()) + # encode + obs = self.encoder(obs) + with torch.no_grad(): + next_obs = self.encoder(next_obs) + + if self.use_tb: + metrics['batch_reward'] = reward.mean().item() + + # update critic + metrics.update(self.update_critic(obs, action, reward, discount, next_obs, step)) + + # update actor + metrics.update(self.update_actor(obs.detach(), step)) + + # update critic target + utils.soft_update_params(self.critic, self.critic_target, self.critic_target_tau) + + return metrics diff --git a/drqbc/dmc.py b/drqbc/dmc.py new file mode 100644 index 0000000000000000000000000000000000000000..dd6e413b171ea9b3144f190bef877b660ee467ed --- /dev/null +++ b/drqbc/dmc.py @@ -0,0 +1,268 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +from collections import deque +from typing import Any, NamedTuple + +import dm_env +import numpy as np +from dm_control import manipulation, suite +from dm_control.suite.wrappers import action_scale, pixels +from dm_env import StepType, specs + +from envs.distracting_control.suite import distracting_wrapper +import envs.fb_mtenv_dmc as fb_mtenv_dmc + + +def get_unique_int(difficulty: str) -> int: + return int.from_bytes(f'{difficulty}_0'.encode(), 'little') % (2 ** 31) + + +distracting_kwargs_lookup = { + 'easy': {'difficulty': 'easy', 'fixed_distraction': False}, + 'medium': {'difficulty': 'medium', 'fixed_distraction': False}, + 'hard': {'difficulty': 'hard', 'fixed_distraction': False}, + 'fixed_easy': {'difficulty': 'easy', 'fixed_distraction': True, 'color_seed': get_unique_int('easy'), + 'background_seed': get_unique_int('easy'), 'camera_seed': get_unique_int('easy')}, + 'fixed_medium': {'difficulty': 'medium', 'fixed_distraction': True, 'color_seed': get_unique_int('medium'), + 'background_seed': get_unique_int('medium'), 'camera_seed': get_unique_int('medium')}, + 'fixed_hard': {'difficulty': 'hard', 'fixed_distraction': True, 'color_seed': get_unique_int('hard'), + 'background_seed': get_unique_int('hard'), 'camera_seed': get_unique_int('hard')}, +} + +multitask_modes = [f'len_{i}' for i in range(1, 11, 1)] + + +class ExtendedTimeStep(NamedTuple): + step_type: Any + reward: Any + discount: Any + observation: Any + action: Any + + def first(self): + return self.step_type == StepType.FIRST + + def mid(self): + return self.step_type == StepType.MID + + def last(self): + return self.step_type == StepType.LAST + + def __getitem__(self, attr): + return getattr(self, attr) + + +class ActionRepeatWrapper(dm_env.Environment): + def __init__(self, env, num_repeats): + self._env = env + self._num_repeats = num_repeats + + def step(self, action): + reward = 0.0 + discount = 1.0 + for i in range(self._num_repeats): + time_step = self._env.step(action) + reward += (time_step.reward or 0.0) * discount + discount *= time_step.discount + if time_step.last(): + break + + return time_step._replace(reward=reward, discount=discount) + + def observation_spec(self): + return self._env.observation_spec() + + def action_spec(self): + return self._env.action_spec() + + def reset(self): + return self._env.reset() + + def __getattr__(self, name): + return getattr(self._env, name) + + +class FrameStackWrapper(dm_env.Environment): + def __init__(self, env, num_frames, pixels_key='pixels'): + self._env = env + self._num_frames = num_frames + self._frames = deque([], maxlen=num_frames) + self._pixels_key = pixels_key + + wrapped_obs_spec = env.observation_spec() + assert pixels_key in wrapped_obs_spec + + pixels_shape = wrapped_obs_spec[pixels_key].shape + # remove batch dim + if len(pixels_shape) == 4: + pixels_shape = pixels_shape[1:] + self._obs_spec = specs.BoundedArray(shape=np.concatenate( + [[pixels_shape[2] * num_frames], pixels_shape[:2]], axis=0), + dtype=np.uint8, + minimum=0, + maximum=255, + name='observation') + + def _transform_observation(self, time_step): + assert len(self._frames) == self._num_frames + obs = np.concatenate(list(self._frames), axis=0) + return time_step._replace(observation=obs) + + def _extract_pixels(self, time_step): + pixels = time_step.observation[self._pixels_key] + # remove batch dim + if len(pixels.shape) == 4: + pixels = pixels[0] + return pixels.transpose(2, 0, 1).copy() + + def reset(self): + time_step = self._env.reset() + pixels = self._extract_pixels(time_step) + for _ in range(self._num_frames): + self._frames.append(pixels) + return self._transform_observation(time_step) + + def step(self, action): + time_step = self._env.step(action) + pixels = self._extract_pixels(time_step) + self._frames.append(pixels) + return self._transform_observation(time_step) + + def observation_spec(self): + return self._obs_spec + + def action_spec(self): + return self._env.action_spec() + + def __getattr__(self, name): + return getattr(self._env, name) + + +class ActionDTypeWrapper(dm_env.Environment): + def __init__(self, env, dtype): + self._env = env + wrapped_action_spec = env.action_spec() + self._action_spec = specs.BoundedArray(wrapped_action_spec.shape, + dtype, + wrapped_action_spec.minimum, + wrapped_action_spec.maximum, + 'action') + + def step(self, action): + action = action.astype(self._env.action_spec().dtype) + return self._env.step(action) + + def observation_spec(self): + return self._env.observation_spec() + + def action_spec(self): + return self._action_spec + + def reset(self): + return self._env.reset() + + def __getattr__(self, name): + return getattr(self._env, name) + + +class ExtendedTimeStepWrapper(dm_env.Environment): + def __init__(self, env): + self._env = env + + def reset(self): + time_step = self._env.reset() + return self._augment_time_step(time_step) + + def step(self, action): + time_step = self._env.step(action) + return self._augment_time_step(time_step, action) + + def _augment_time_step(self, time_step, action=None): + if action is None: + action_spec = self.action_spec() + action = np.zeros(action_spec.shape, dtype=action_spec.dtype) + return ExtendedTimeStep(observation=time_step.observation, + step_type=time_step.step_type, + action=action, + reward=time_step.reward or 0.0, + discount=time_step.discount or 1.0) + + def observation_spec(self): + return self._env.observation_spec() + + def action_spec(self): + return self._env.action_spec() + + def __getattr__(self, name): + return getattr(self._env, name) + + +def make(name, frame_stack, action_repeat, seed, distracting_mode: str = None, multitask_mode: str = None): + pixel_hw = 84 + if 'offline' in name: + name = '_'.join(name.split('_')[1:3]) + domain, task = name.split('_', 1) + # overwrite cup to ball_in_cup + domain = dict(cup='ball_in_cup').get(domain, domain) + + # make sure reward is not visualized + if multitask_mode is None: + if (domain, task) in suite.ALL_TASKS: + env = suite.load(domain, + task, + task_kwargs={'random': seed}, + visualize_reward=False) + pixels_key = 'pixels' + else: + name = f'{domain}_{task}_vision' + env = manipulation.load(name, seed=seed) + pixels_key = 'front_close' + else: + assert multitask_mode in multitask_modes, 'Unrecognised length setting' + idx = multitask_mode.split('_', 1)[1] + + if domain == 'walker' and task == 'walk': + xml = f'len_{idx}' + elif domain == 'cheetah' and task == 'run': + xml = f'torso_length_{idx}' + else: + raise Exception + + env = fb_mtenv_dmc.load( + domain_name=domain, + task_name=task, + task_kwargs={'xml_file_id': xml, 'random': seed}, + visualize_reward=False, + ) + pixels_key = 'pixels' + + # add wrappers + env = ActionDTypeWrapper(env, np.float32) + env = ActionRepeatWrapper(env, action_repeat) + env = action_scale.Wrapper(env, minimum=-1.0, maximum=+1.0) + # add renderings for clasical tasks + if (domain, task) in suite.ALL_TASKS: + # zoom in camera for quadruped + camera_id = dict(quadruped=2).get(domain, 0) + render_kwargs = dict(height=pixel_hw, width=pixel_hw, camera_id=camera_id) + if distracting_mode is not None: + assert distracting_mode in distracting_kwargs_lookup, 'Unrecognised distraction' + kwargs = distracting_kwargs_lookup[distracting_mode] + kwargs['pixels_only'] = True + kwargs['render_kwargs'] = render_kwargs + kwargs['background_dataset_path'] = "DAVIS/JPEGImages/480p/" + env = distracting_wrapper( + env, + domain, + **kwargs + ) + else: + env = pixels.Wrapper(env, + pixels_only=True, + render_kwargs=render_kwargs) + # stack several frames + env = FrameStackWrapper(env, frame_stack, pixels_key) + env = ExtendedTimeStepWrapper(env) + return env diff --git a/drqbc/drqv2.py b/drqbc/drqv2.py new file mode 100644 index 0000000000000000000000000000000000000000..ac62fb37271431bac70e76633cf5d54e206d2edb --- /dev/null +++ b/drqbc/drqv2.py @@ -0,0 +1,292 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +import hydra +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +import utils + + +class RandomShiftsAug(nn.Module): + def __init__(self, pad=4): + super().__init__() + self.pad = pad + + def forward(self, x): + n, c, h, w = x.size() + assert h == w + padding = tuple([self.pad] * 4) + x = F.pad(x, padding, 'replicate') + eps = 1.0 / (h + 2 * self.pad) + arange = torch.linspace(-1.0 + eps, + 1.0 - eps, + h + 2 * self.pad, + device=x.device, + dtype=x.dtype)[:h] + arange = arange.unsqueeze(0).repeat(h, 1).unsqueeze(2) + base_grid = torch.cat([arange, arange.transpose(1, 0)], dim=2) + base_grid = base_grid.unsqueeze(0).repeat(n, 1, 1, 1) + + shift = torch.randint(0, + 2 * self.pad + 1, + size=(n, 1, 1, 2), + device=x.device, + dtype=x.dtype) + shift *= 2.0 / (h + 2 * self.pad) + + grid = base_grid + shift + return F.grid_sample(x, + grid, + padding_mode='zeros', + align_corners=False) + + +class NoShiftAug(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return x + + +class Encoder(nn.Module): + def __init__(self, obs_shape): + super().__init__() + + assert len(obs_shape) == 3 + self.repr_dim = 32 * 35 * 35 + + self.convnet = nn.Sequential(nn.Conv2d(obs_shape[0], 32, 3, stride=2), + nn.ReLU(), nn.Conv2d(32, 32, 3, stride=1), + nn.ReLU(), nn.Conv2d(32, 32, 3, stride=1), + nn.ReLU(), nn.Conv2d(32, 32, 3, stride=1), + nn.ReLU()) + + self.apply(utils.weight_init) + + def forward(self, obs): + obs = obs / 255.0 - 0.5 + h = self.convnet(obs) + h = h.view(h.shape[0], -1) + return h + + +class Actor(nn.Module): + def __init__(self, repr_dim, action_shape, feature_dim, hidden_dim): + super().__init__() + + self.trunk = nn.Sequential(nn.Linear(repr_dim, feature_dim), + nn.LayerNorm(feature_dim), nn.Tanh()) + + self.policy = nn.Sequential(nn.Linear(feature_dim, hidden_dim), + nn.ReLU(inplace=True), + nn.Linear(hidden_dim, hidden_dim), + nn.ReLU(inplace=True), + nn.Linear(hidden_dim, action_shape[0])) + + self.apply(utils.weight_init) + + def forward(self, obs, std): + h = self.trunk(obs) + + mu = self.policy(h) + mu = torch.tanh(mu) + std = torch.ones_like(mu) * std + + dist = utils.TruncatedNormal(mu, std) + return dist + + +class Critic(nn.Module): + def __init__(self, repr_dim, action_shape, feature_dim, hidden_dim): + super().__init__() + + self.trunk = nn.Sequential(nn.Linear(repr_dim, feature_dim), + nn.LayerNorm(feature_dim), nn.Tanh()) + + self.Q1 = nn.Sequential( + nn.Linear(feature_dim + action_shape[0], hidden_dim), + nn.ReLU(inplace=True), nn.Linear(hidden_dim, hidden_dim), + nn.ReLU(inplace=True), nn.Linear(hidden_dim, 1)) + + self.Q2 = nn.Sequential( + nn.Linear(feature_dim + action_shape[0], hidden_dim), + nn.ReLU(inplace=True), nn.Linear(hidden_dim, hidden_dim), + nn.ReLU(inplace=True), nn.Linear(hidden_dim, 1)) + + self.apply(utils.weight_init) + + def forward(self, obs, action): + h = self.trunk(obs) + h_action = torch.cat([h, action], dim=-1) + q1 = self.Q1(h_action) + q2 = self.Q2(h_action) + + return q1, q2 + + +class DrQV2Agent: + def __init__(self, obs_shape, action_shape, device, lr, feature_dim, + hidden_dim, critic_target_tau, num_expl_steps, + update_every_steps, stddev_schedule, stddev_clip, use_tb, + offline=False, bc_weight=2.5, augmentation=RandomShiftsAug(pad=4), + use_bc=True): + self.device = device + self.critic_target_tau = critic_target_tau + self.update_every_steps = update_every_steps + self.use_tb = use_tb + self.num_expl_steps = num_expl_steps + self.stddev_schedule = stddev_schedule + self.stddev_clip = stddev_clip + self.offline = offline + self.bc_weight = bc_weight + self.use_bc = use_bc + + # models + self.encoder = Encoder(obs_shape).to(device) + self.actor = Actor(self.encoder.repr_dim, action_shape, feature_dim, + hidden_dim).to(device) + + self.critic = Critic(self.encoder.repr_dim, action_shape, feature_dim, + hidden_dim).to(device) + self.critic_target = Critic(self.encoder.repr_dim, action_shape, + feature_dim, hidden_dim).to(device) + self.critic_target.load_state_dict(self.critic.state_dict()) + + # optimizers + self.encoder_opt = torch.optim.Adam(self.encoder.parameters(), lr=lr) + self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=lr) + self.critic_opt = torch.optim.Adam(self.critic.parameters(), lr=lr) + + # data augmentation + self.aug = augmentation + + self.train() + self.critic_target.train() + + def train(self, training=True): + self.training = training + self.encoder.train(training) + self.actor.train(training) + self.critic.train(training) + + def act(self, obs, step, eval_mode): + obs = torch.as_tensor(obs, device=self.device) + obs = self.encoder(obs.unsqueeze(0)) + stddev = utils.schedule(self.stddev_schedule, step) + dist = self.actor(obs, stddev) + if eval_mode: + action = dist.mean + else: + action = dist.sample(clip=None) + if step < self.num_expl_steps: + action.uniform_(-1.0, 1.0) + return action.cpu().numpy()[0] + + def update_critic(self, obs, action, reward, discount, next_obs, step): + metrics = dict() + + with torch.no_grad(): + stddev = utils.schedule(self.stddev_schedule, step) + dist = self.actor(next_obs, stddev) + next_action = dist.sample(clip=self.stddev_clip) + target_Q1, target_Q2 = self.critic_target(next_obs, next_action) + target_V = torch.min(target_Q1, target_Q2) + target_Q = reward.float() + (discount * target_V) + + Q1, Q2 = self.critic(obs, action) + critic_loss = F.mse_loss(Q1, target_Q) + F.mse_loss(Q2, target_Q) + + if self.use_tb: + metrics['critic_target_q'] = target_Q.mean().item() + metrics['critic_q1'] = Q1.mean().item() + metrics['critic_q2'] = Q2.mean().item() + metrics['critic_loss'] = critic_loss.item() + + # optimize encoder and critic + self.encoder_opt.zero_grad(set_to_none=True) + self.critic_opt.zero_grad(set_to_none=True) + critic_loss.backward() + self.critic_opt.step() + self.encoder_opt.step() + + return metrics + + def update_actor(self, obs, step, behavioural_action=None): + metrics = dict() + + stddev = utils.schedule(self.stddev_schedule, step) + dist = self.actor(obs, stddev) + action = dist.sample(clip=self.stddev_clip) + log_prob = dist.log_prob(action).sum(-1, keepdim=True) + Q1, Q2 = self.critic(obs, action) + Q = torch.min(Q1, Q2) + + actor_policy_improvement_loss = -Q.mean() + + actor_loss = actor_policy_improvement_loss + + # offline BC Loss + if self.offline: + actor_bc_loss = F.mse_loss(action, behavioural_action) + # Eq. 5 of arXiv:2106.06860 + lam = self.bc_weight / Q.detach().abs().mean() + if self.use_bc: + actor_loss = actor_policy_improvement_loss * lam + actor_bc_loss + else: + actor_loss = actor_policy_improvement_loss * lam + + # optimize actor + self.actor_opt.zero_grad(set_to_none=True) + actor_loss.backward() + self.actor_opt.step() + + if self.use_tb: + metrics['actor_loss'] = actor_policy_improvement_loss.item() + metrics['actor_logprob'] = log_prob.mean().item() + metrics['actor_ent'] = dist.entropy().sum(dim=-1).mean().item() + if self.offline: + metrics['actor_bc_loss'] = actor_bc_loss.item() + + return metrics + + def update(self, replay_buffer, step): + metrics = dict() + + if step % self.update_every_steps != 0: + return metrics + + batch = next(replay_buffer) + obs, action, reward, discount, next_obs = utils.to_torch( + batch, self.device) + + # augment + obs = self.aug(obs.float()) + next_obs = self.aug(next_obs.float()) + # encode + obs = self.encoder(obs) + with torch.no_grad(): + next_obs = self.encoder(next_obs) + + if self.use_tb: + metrics['batch_reward'] = reward.mean().item() + + # update critic + metrics.update( + self.update_critic(obs, action, reward, discount, next_obs, step)) + + # update actor + if self.offline: + metrics.update(self.update_actor(obs.detach(), step, action.detach())) + else: + metrics.update(self.update_actor(obs.detach(), step)) + + # update critic target + utils.soft_update_params(self.critic, self.critic_target, + self.critic_target_tau) + + return metrics diff --git a/drqbc/logger.py b/drqbc/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..dd7060a8f5e5ccd3ce05fdd4c76dde98b902a7ff --- /dev/null +++ b/drqbc/logger.py @@ -0,0 +1,213 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +import csv +import datetime +from collections import defaultdict + +import numpy as np +import torch +# import torchvision +from termcolor import colored +from torch.utils.tensorboard import SummaryWriter + +COMMON_TRAIN_FORMAT = [('frame', 'F', 'int'), ('step', 'S', 'int'), + ('episode', 'E', 'int'), ('episode_length', 'L', 'int'), + ('episode_reward', 'R', 'float'), + ('buffer_size', 'BS', 'int'), ('fps', 'FPS', 'float'), + ('total_time', 'T', 'time')] + +OFFLINE_TRAIN_FORMAT = [('step', 'S', 'int'), ('buffer_size', 'BS', 'int'), + ('fps', 'FPS', 'float'), ('total_time', 'T', 'time')] + +COMMON_EVAL_FORMAT = [('frame', 'F', 'int'), ('step', 'S', 'int'), + ('episode', 'E', 'int'), ('episode_length', 'L', 'int'), + ('episode_reward', 'R', 'float'), + ('total_time', 'T', 'time')] + +DISTRACTING_EVAL_FORMAT = [('frame', 'F', 'int'), ('step', 'S', 'int'), + ('episode', 'E', 'int'), ('episode_length', 'L', 'int'), + ('episode_reward', 'R', 'float'), + ('easy_episode_reward', 'EER', 'float'), + ('medium_episode_reward', 'MER', 'float'), + ('hard_episode_reward', 'HER', 'float'), + ('fixed_easy_episode_reward', 'FEER', 'float'), + ('fixed_medium_episode_reward', 'FMER', 'float'), + ('fixed_hard_episode_reward', 'FHER', 'float'), + ('total_time', 'T', 'time')] + +MULTITASK_EVAL_FORMAT = [('frame', 'F', 'int'), ('step', 'S', 'int'), + ('episode', 'E', 'int'), ('episode_length', 'L', 'int'), + ('episode_reward', 'R', 'float'), + ('len1_episode_reward', 'R1', 'float'), + ('len2_episode_reward', 'R2', 'float'), + ('len3_episode_reward', 'R3', 'float'), + ('len4_episode_reward', 'R4', 'float'), + ('len5_episode_reward', 'R5', 'float'), + ('len6_episode_reward', 'R6', 'float'), + ('len7_episode_reward', 'R7', 'float'), + ('len8_episode_reward', 'R8', 'float'), + ('len9_episode_reward', 'R9', 'float'), + ('len10_episode_reward', 'R10', 'float'), + ('total_time', 'T', 'time')] + + +class AverageMeter(object): + def __init__(self): + self._sum = 0 + self._count = 0 + + def update(self, value, n=1): + self._sum += value + self._count += n + + def value(self): + return self._sum / max(1, self._count) + + +class MetersGroup(object): + def __init__(self, csv_file_name, formating): + self._csv_file_name = csv_file_name + self._formating = formating + self._meters = defaultdict(AverageMeter) + self._csv_file = None + self._csv_writer = None + + def log(self, key, value, n=1): + self._meters[key].update(value, n) + + def _prime_meters(self): + data = dict() + for key, meter in self._meters.items(): + if key.startswith('train'): + key = key[len('train') + 1:] + else: + key = key[len('eval') + 1:] + key = key.replace('/', '_') + data[key] = meter.value() + return data + + def _remove_old_entries(self, data): + rows = [] + with self._csv_file_name.open('r') as f: + reader = csv.DictReader(f) + for row in reader: + if float(row['step']) >= data['step']: + break + rows.append(row) + with self._csv_file_name.open('w') as f: + writer = csv.DictWriter(f, + fieldnames=sorted(data.keys()), + restval=0.0) + writer.writeheader() + for row in rows: + writer.writerow(row) + + def _dump_to_csv(self, data): + if self._csv_writer is None: + should_write_header = True + if self._csv_file_name.exists(): + self._remove_old_entries(data) + should_write_header = False + + self._csv_file = self._csv_file_name.open('a') + self._csv_writer = csv.DictWriter(self._csv_file, + fieldnames=sorted(data.keys()), + restval=0.0) + if should_write_header: + self._csv_writer.writeheader() + + self._csv_writer.writerow(data) + self._csv_file.flush() + + def _format(self, key, value, ty): + if ty == 'int': + value = int(value) + return f'{key}: {value}' + elif ty == 'float': + return f'{key}: {value:.04f}' + elif ty == 'time': + value = str(datetime.timedelta(seconds=int(value))) + return f'{key}: {value}' + else: + raise f'invalid format type: {ty}' + + def _dump_to_console(self, data, prefix): + prefix = colored(prefix, 'yellow' if prefix == 'train' else 'green') + pieces = [f'| {prefix: <14}'] + for key, disp_key, ty in self._formating: + value = data.get(key, 0) + pieces.append(self._format(disp_key, value, ty)) + print(' | '.join(pieces)) + + def dump(self, step, prefix): + if len(self._meters) == 0: + return + data = self._prime_meters() + if 'frame' in data: + data['frame'] = step + self._dump_to_csv(data) + self._dump_to_console(data, prefix) + self._meters.clear() + + +class Logger(object): + def __init__(self, log_dir, use_tb, offline=False, distracting_eval=False, multitask_eval=False): + self._log_dir = log_dir + train_formatting = OFFLINE_TRAIN_FORMAT if offline else COMMON_TRAIN_FORMAT + if distracting_eval: + eval_formatting = DISTRACTING_EVAL_FORMAT + elif multitask_eval: + eval_formatting = MULTITASK_EVAL_FORMAT + else: + eval_formatting = COMMON_EVAL_FORMAT + self._train_mg = MetersGroup(log_dir / 'train.csv', + formating=train_formatting) + self._eval_mg = MetersGroup(log_dir / 'eval.csv', + formating=eval_formatting) + if use_tb: + self._sw = SummaryWriter(str(log_dir / 'tb')) + else: + self._sw = None + + def _try_sw_log(self, key, value, step): + if self._sw is not None: + self._sw.add_scalar(key, value, step) + + def log(self, key, value, step): + assert key.startswith('train') or key.startswith('eval') + if type(value) == torch.Tensor: + value = value.item() + self._try_sw_log(key, value, step) + mg = self._train_mg if key.startswith('train') else self._eval_mg + mg.log(key, value) + + def log_metrics(self, metrics, step, ty): + for key, value in metrics.items(): + self.log(f'{ty}/{key}', value, step) + + def dump(self, step, ty=None): + if ty is None or ty == 'eval': + self._eval_mg.dump(step, 'eval') + if ty is None or ty == 'train': + self._train_mg.dump(step, 'train') + + def log_and_dump_ctx(self, step, ty): + return LogAndDumpCtx(self, step, ty) + + +class LogAndDumpCtx: + def __init__(self, logger, step, ty): + self._logger = logger + self._step = step + self._ty = ty + + def __enter__(self): + return self + + def __call__(self, key, value): + self._logger.log(f'{self._ty}/{key}', value, self._step) + + def __exit__(self, *args): + self._logger.dump(self._step, self._ty) diff --git a/drqbc/numpy_replay_buffer.py b/drqbc/numpy_replay_buffer.py new file mode 100644 index 0000000000000000000000000000000000000000..98be22e8d2ad5c8fdb3386c07faaeb849703d6af --- /dev/null +++ b/drqbc/numpy_replay_buffer.py @@ -0,0 +1,141 @@ +import numpy as np +import abc + + +class AbstractReplayBuffer(abc.ABC): + @abc.abstractmethod + def add(self, time_step): + pass + + @abc.abstractmethod + def __next__(self, ): + pass + + @abc.abstractmethod + def __len__(self, ): + pass + + +class EfficientReplayBuffer(AbstractReplayBuffer): + '''Fast + efficient replay buffer implementation in numpy.''' + + def __init__(self, buffer_size, batch_size, nstep, discount, frame_stack, + data_specs=None, sarsa=False): + self.buffer_size = buffer_size + self.data_dict = {} + self.index = -1 + self.traj_index = 0 + self.frame_stack = frame_stack + self._recorded_frames = frame_stack + 1 + self.batch_size = batch_size + self.nstep = nstep + self.discount = discount + self.full = False + self.discount_vec = np.power(discount, np.arange(nstep)) # n_step - first dim should broadcast + self.next_dis = discount ** nstep + self.sarsa = sarsa + + def _initial_setup(self, time_step): + self.index = 0 + self.obs_shape = list(time_step.observation.shape) + self.ims_channels = self.obs_shape[0] // self.frame_stack + self.act_shape = time_step.action.shape + + self.obs = np.zeros([self.buffer_size, self.ims_channels, *self.obs_shape[1:]], dtype=np.uint8) + self.act = np.zeros([self.buffer_size, *self.act_shape], dtype=np.float32) + self.rew = np.zeros([self.buffer_size], dtype=np.float32) + self.dis = np.zeros([self.buffer_size], dtype=np.float32) + self.valid = np.zeros([self.buffer_size], dtype=np.bool_) + + def add_data_point(self, time_step): + first = time_step.first() + latest_obs = time_step.observation[-self.ims_channels:] + if first: + end_index = self.index + self.frame_stack + end_invalid = end_index + self.frame_stack + 1 + if end_invalid > self.buffer_size: + if end_index > self.buffer_size: + end_index = end_index % self.buffer_size + self.obs[self.index:self.buffer_size] = latest_obs + self.obs[0:end_index] = latest_obs + self.full = True + else: + self.obs[self.index:end_index] = latest_obs + end_invalid = end_invalid % self.buffer_size + self.valid[self.index:self.buffer_size] = False + self.valid[0:end_invalid] = False + else: + self.obs[self.index:end_index] = latest_obs + self.valid[self.index:end_invalid] = False + self.index = end_index + self.traj_index = 1 + else: + np.copyto(self.obs[self.index], latest_obs) # Check most recent image + np.copyto(self.act[self.index], time_step.action) + self.rew[self.index] = time_step.reward + self.dis[self.index] = time_step.discount + self.valid[(self.index + self.frame_stack) % self.buffer_size] = False + if self.traj_index >= self.nstep: + self.valid[(self.index - self.nstep + 1) % self.buffer_size] = True + self.index += 1 + self.traj_index += 1 + if self.index == self.buffer_size: + self.index = 0 + self.full = True + + def add(self, time_step): + if self.index == -1: + self._initial_setup(time_step) + self.add_data_point(time_step) + + def __next__(self, ): + indices = np.random.choice(self.valid.nonzero()[0], size=self.batch_size) + return self.gather_nstep_indices(indices) + + def gather_nstep_indices(self, indices): + n_samples = indices.shape[0] + all_gather_ranges = np.stack([np.arange(indices[i] - self.frame_stack, indices[i] + self.nstep) + for i in range(n_samples)], axis=0) % self.buffer_size + gather_ranges = all_gather_ranges[:, self.frame_stack:] # bs x nstep + obs_gather_ranges = all_gather_ranges[:, :self.frame_stack] + nobs_gather_ranges = all_gather_ranges[:, -self.frame_stack:] + + all_rewards = self.rew[gather_ranges] + + # Could implement below operation as a matmul in pytorch for marginal additional speed improvement + rew = np.sum(all_rewards * self.discount_vec, axis=1, keepdims=True) + + obs = np.reshape(self.obs[obs_gather_ranges], [n_samples, *self.obs_shape]) + nobs = np.reshape(self.obs[nobs_gather_ranges], [n_samples, *self.obs_shape]) + + act = self.act[indices] + dis = np.expand_dims(self.next_dis * self.dis[nobs_gather_ranges[:, -1]], axis=-1) + + if self.sarsa: + nact = self.act[indices + self.nstep] + return (obs, act, rew, dis, nobs, nact) + + return (obs, act, rew, dis, nobs) + + def __len__(self): + if self.full: + return self.buffer_size + else: + return self.index + + def get_train_and_val_indices(self, validation_percentage): + all_indices = self.valid.nonzero()[0] + num_indices = all_indices.shape[0] + num_val = int(num_indices * validation_percentage) + np.random.shuffle(all_indices) + val_indices, train_indices = np.split(all_indices, + [num_val]) + return train_indices, val_indices + + def get_obs_act_batch(self, indices): + n_samples = indices.shape[0] + obs_gather_ranges = np.stack([np.arange(indices[i] - self.frame_stack, indices[i]) + for i in range(n_samples)], axis=0) % self.buffer_size + obs = np.reshape(self.obs[obs_gather_ranges], [n_samples, *self.obs_shape]) + act = self.act[indices] + return obs, act diff --git a/drqbc/replay_buffer.py b/drqbc/replay_buffer.py new file mode 100644 index 0000000000000000000000000000000000000000..5e199a9b14c405dd75e1805d0ab5841c63e4a099 --- /dev/null +++ b/drqbc/replay_buffer.py @@ -0,0 +1,190 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +import datetime +import io +import random +import traceback +from collections import defaultdict + +import numpy as np +import torch +import torch.nn as nn +from torch.utils.data import IterableDataset + + +def episode_len(episode): + # subtract -1 because the dummy first transition + return next(iter(episode.values())).shape[0] - 1 + + +def save_episode(episode, fn): + with io.BytesIO() as bs: + np.savez_compressed(bs, **episode) + bs.seek(0) + with fn.open('wb') as f: + f.write(bs.read()) + + +def load_episode(fn): + with fn.open('rb') as f: + episode = np.load(f) + episode = {k: episode[k] for k in episode.keys()} + return episode + + +class ReplayBufferStorage: + def __init__(self, data_specs, replay_dir): + self._data_specs = data_specs + self._replay_dir = replay_dir + replay_dir.mkdir(exist_ok=True) + self._current_episode = defaultdict(list) + self._preload() + + def __len__(self): + return self._num_transitions + + def add(self, time_step): + for spec in self._data_specs: + value = time_step[spec.name] + if np.isscalar(value): + value = np.full(spec.shape, value, spec.dtype) + assert spec.shape == value.shape and spec.dtype == value.dtype + self._current_episode[spec.name].append(value) + if time_step.last(): + episode = dict() + for spec in self._data_specs: + value = self._current_episode[spec.name] + episode[spec.name] = np.array(value, spec.dtype) + self._current_episode = defaultdict(list) + self._store_episode(episode) + + def _preload(self): + self._num_episodes = 0 + self._num_transitions = 0 + for fn in self._replay_dir.glob('*.npz'): + _, _, eps_len = fn.stem.split('_') + self._num_episodes += 1 + self._num_transitions += int(eps_len) + + def _store_episode(self, episode): + eps_idx = self._num_episodes + eps_len = episode_len(episode) + self._num_episodes += 1 + self._num_transitions += eps_len + ts = datetime.datetime.now().strftime('%Y%m%dT%H%M%S') + eps_fn = f'{ts}_{eps_idx}_{eps_len}.npz' + save_episode(episode, self._replay_dir / eps_fn) + + +class ReplayBuffer(IterableDataset): + def __init__(self, replay_dir, max_size, num_workers, nstep, discount, + fetch_every, save_snapshot): + self._replay_dir = replay_dir + self._size = 0 + self._max_size = max_size + self._num_workers = max(1, num_workers) + self._episode_fns = [] + self._episodes = dict() + self._nstep = nstep + self._discount = discount + self._fetch_every = fetch_every + self._samples_since_last_fetch = fetch_every + self._save_snapshot = save_snapshot + + def _sample_episode(self): + eps_fn = random.choice(self._episode_fns) + return self._episodes[eps_fn] + + def _store_episode(self, eps_fn): + try: + episode = load_episode(eps_fn) + except: + return False + eps_len = episode_len(episode) + while eps_len + self._size > self._max_size: + early_eps_fn = self._episode_fns.pop(0) + early_eps = self._episodes.pop(early_eps_fn) + self._size -= episode_len(early_eps) + early_eps_fn.unlink(missing_ok=True) + self._episode_fns.append(eps_fn) + self._episode_fns.sort() + self._episodes[eps_fn] = episode + self._size += eps_len + + if not self._save_snapshot: + eps_fn.unlink(missing_ok=True) + return True + + def _try_fetch(self): + if self._samples_since_last_fetch < self._fetch_every: + return + self._samples_since_last_fetch = 0 + try: + worker_id = torch.utils.data.get_worker_info().id + except: + worker_id = 0 + eps_fns = sorted(self._replay_dir.glob('*.npz'), reverse=True) + fetched_size = 0 + for eps_fn in eps_fns: + eps_idx, eps_len = [int(x) for x in eps_fn.stem.split('_')[1:]] + if eps_idx % self._num_workers != worker_id: + continue + if eps_fn in self._episodes.keys(): + break + if fetched_size + eps_len > self._max_size: + break + fetched_size += eps_len + if not self._store_episode(eps_fn): + break + + def _sample(self): + try: + self._try_fetch() + except: + traceback.print_exc() + self._samples_since_last_fetch += 1 + episode = self._sample_episode() + # add +1 for the first dummy transition + idx = np.random.randint(0, episode_len(episode) - self._nstep + 1) + 1 + obs = episode['observation'][idx - 1] + action = episode['action'][idx] + next_obs = episode['observation'][idx + self._nstep - 1] + reward = np.zeros_like(episode['reward'][idx]) + discount = np.ones_like(episode['discount'][idx]) + for i in range(self._nstep): + step_reward = episode['reward'][idx + i] + reward += discount * step_reward + discount *= episode['discount'][idx + i] * self._discount + return (obs, action, reward, discount, next_obs) + + def __iter__(self): + while True: + yield self._sample() + + +def _worker_init_fn(worker_id): + seed = np.random.get_state()[1][0] + worker_id + np.random.seed(seed) + random.seed(seed) + + +def make_replay_loader(replay_dir, max_size, batch_size, num_workers, + save_snapshot, nstep, discount): + max_size_per_worker = max_size // max(1, num_workers) + + iterable = ReplayBuffer(replay_dir, + max_size_per_worker, + num_workers, + nstep, + discount, + fetch_every=1000, + save_snapshot=save_snapshot) + + loader = torch.utils.data.DataLoader(iterable, + batch_size=batch_size, + num_workers=num_workers, + pin_memory=True, + worker_init_fn=_worker_init_fn) + return loader diff --git a/drqbc/train.py b/drqbc/train.py new file mode 100644 index 0000000000000000000000000000000000000000..c22d2ba79e9ad7255eea92022d9a445d7c6b28de --- /dev/null +++ b/drqbc/train.py @@ -0,0 +1,324 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +import warnings + +warnings.filterwarnings('ignore', category=DeprecationWarning) + +import os + +os.environ['MKL_SERVICE_FORCE_INTEL'] = '1' +os.environ['MUJOCO_GL'] = 'egl' + +from pathlib import Path +import hydra +import numpy as np +import torch +from dm_env import specs + +import dmc +import utils +from logger import Logger +from numpy_replay_buffer import EfficientReplayBuffer +from video import TrainVideoRecorder, VideoRecorder +from utils import load_offline_dataset_into_buffer + +torch.backends.cudnn.benchmark = True + + +def make_agent(obs_spec, action_spec, cfg): + cfg.obs_shape = obs_spec.shape + cfg.action_shape = action_spec.shape + return hydra.utils.instantiate(cfg) + + +class Workspace: + def __init__(self, cfg): + self.work_dir = Path.cwd() + print(f'workspace: {self.work_dir}') + + self.cfg = cfg + utils.set_seed_everywhere(cfg.seed) + self.device = torch.device(cfg.device) + self.setup() + + self.agent = make_agent(self.train_env.observation_spec(), + self.train_env.action_spec(), + self.cfg.agent) + self.timer = utils.Timer() + self._global_step = 0 + self._global_episode = 0 + + def setup(self): + # create logger + self.logger = Logger(self.work_dir, use_tb=self.cfg.use_tb, offline=self.cfg.offline, + distracting_eval=self.cfg.eval_on_distracting, multitask_eval=self.cfg.eval_on_multitask) + # create envs + self.train_env = dmc.make(self.cfg.task_name, self.cfg.frame_stack, + self.cfg.action_repeat, self.cfg.seed, self.cfg.distracting_mode) + self.eval_env = dmc.make(self.cfg.task_name, self.cfg.frame_stack, + self.cfg.action_repeat, self.cfg.seed, self.cfg.distracting_mode) + # create replay buffer + data_specs = (self.train_env.observation_spec(), + self.train_env.action_spec(), + specs.Array((1,), np.float32, 'reward'), + specs.Array((1,), np.float32, 'discount')) + + self.replay_buffer = EfficientReplayBuffer(self.cfg.replay_buffer_size, + self.cfg.batch_size, + self.cfg.nstep, + self.cfg.discount, + self.cfg.frame_stack, + data_specs) + + self.video_recorder = VideoRecorder( + self.work_dir if self.cfg.save_video else None) + self.train_video_recorder = TrainVideoRecorder( + self.work_dir if self.cfg.save_train_video else None) + + self.eval_on_distracting = self.cfg.eval_on_distracting + self.eval_on_multitask = self.cfg.eval_on_multitask + + @property + def global_step(self): + return self._global_step + + @property + def global_episode(self): + return self._global_episode + + @property + def global_frame(self): + return self.global_step * self.cfg.action_repeat + + def eval(self): + step, episode, total_reward = 0, 0, 0 + eval_until_episode = utils.Until(self.cfg.num_eval_episodes) + + while eval_until_episode(episode): + time_step = self.eval_env.reset() + self.video_recorder.init(self.eval_env, enabled=(episode == 0)) + while not time_step.last(): + with torch.no_grad(), utils.eval_mode(self.agent): + action = self.agent.act(time_step.observation, + self.global_step, + eval_mode=True) + time_step = self.eval_env.step(action) + self.video_recorder.record(self.eval_env) + total_reward += time_step.reward + step += 1 + + episode += 1 + self.video_recorder.save(f'{self.global_frame}.mp4') + + with self.logger.log_and_dump_ctx(self.global_frame, ty='eval') as log: + log('episode_reward', total_reward / episode) + log('episode_length', step * self.cfg.action_repeat / episode) + log('episode', self.global_episode) + log('step', self.global_step) + + def eval_distracting(self, record_video): + distraction_modes = ['easy', 'medium', 'hard', 'fixed_easy', 'fixed_medium', 'fixed_hard'] + if not hasattr(self, 'distracting_envs'): + self.distracting_envs = [] + for distraction_mode in distraction_modes: + env = dmc.make(self.cfg.task_name, self.cfg.frame_stack, + self.cfg.action_repeat, self.cfg.seed, distracting_mode=distraction_mode) + self.distracting_envs.append(env) + for env, env_name in zip(self.distracting_envs, distraction_modes): + self.eval_single_env(env, env_name, record_video) + + def eval_multitask(self, record_video): + multitask_modes = [f'len_{i}' for i in range(1, 11, 1)] + if not hasattr(self, 'multitask_envs'): + self.multitask_envs = [] + for multitask_mode in multitask_modes: + env = dmc.make(self.cfg.task_name, self.cfg.frame_stack, + self.cfg.action_repeat, self.cfg.seed, multitask_mode=multitask_mode) + self.multitask_envs.append(env) + for env, env_name in zip(self.multitask_envs, multitask_modes): + self.eval_single_env(env, env_name, record_video) + + def eval_single_env(self, env, env_name, save_video): + step, episode, total_reward = 0, 0, 0 + eval_until_episode = utils.Until(self.cfg.num_eval_episodes) + + while eval_until_episode(episode): + time_step = env.reset() + self.video_recorder.init(env, enabled=((episode == 0) and save_video)) + while not time_step.last(): + with torch.no_grad(), utils.eval_mode(self.agent): + action = self.agent.act(time_step.observation, + self.global_step, + eval_mode=True) + time_step = env.step(action) + self.video_recorder.record(env) + total_reward += time_step.reward + step += 1 + + episode += 1 + self.video_recorder.save(f'{env_name}_{self.global_frame}.mp4') + + self.logger.log(f'eval/{env_name}_episode_reward', total_reward / episode, self.global_frame) + + def train(self): + # predicates + train_until_step = utils.Until(self.cfg.num_train_frames, + self.cfg.action_repeat) + seed_until_step = utils.Until(self.cfg.num_seed_frames, + self.cfg.action_repeat) + eval_every_step = utils.Every(self.cfg.eval_every_frames, + self.cfg.action_repeat) + # only in distracting evaluation mode + eval_save_vid_every_step = utils.Every(self.cfg.eval_save_vid_every_step, + self.cfg.action_repeat) + + episode_step, episode_reward = 0, 0 + time_step = self.train_env.reset() + self.replay_storage.add(time_step) + self.train_video_recorder.init(time_step.observation) + metrics = None + while train_until_step(self.global_step): + if time_step.last(): + self._global_episode += 1 + self.train_video_recorder.save(f'{self.global_frame}.mp4') + # wait until all the metrics schema is populated + if metrics is not None: + # log stats + elapsed_time, total_time = self.timer.reset() + episode_frame = episode_step * self.cfg.action_repeat + with self.logger.log_and_dump_ctx(self.global_frame, + ty='train') as log: + log('fps', episode_frame / elapsed_time) + log('total_time', total_time) + log('episode_reward', episode_reward) + log('episode_length', episode_frame) + log('episode', self.global_episode) + log('buffer_size', len(self.replay_storage)) + log('step', self.global_step) + + # reset env + time_step = self.train_env.reset() + self.replay_storage.add(time_step) + self.train_video_recorder.init(time_step.observation) + # try to save snapshot + if self.cfg.save_snapshot: + self.save_snapshot() + episode_step = 0 + episode_reward = 0 + + # try to evaluate + if eval_every_step(self.global_step): + self.logger.log('eval_total_time', self.timer.total_time(), + self.global_frame) + if self.eval_on_distracting: + self.eval_distracting(eval_save_vid_every_step(self.global_step)) + if self.eval_on_multitask: + self.eval_multitask(eval_save_vid_every_step(self.global_step)) + self.eval() + + # sample action + with torch.no_grad(), utils.eval_mode(self.agent): + action = self.agent.act(time_step.observation, + self.global_step, + eval_mode=False) + + # try to update the agent + if not seed_until_step(self.global_step): + metrics = self.agent.update(self.replay_iter, self.global_step) + self.logger.log_metrics(metrics, self.global_frame, ty='train') + + # take env step + time_step = self.train_env.step(action) + episode_reward += time_step.reward + self.replay_storage.add(time_step) + self.train_video_recorder.record(time_step.observation) + episode_step += 1 + self._global_step += 1 + + def train_offline(self, offline_dir): + # Open dataset, load as memory buffer + load_offline_dataset_into_buffer(Path(offline_dir), self.replay_buffer, self.cfg.frame_stack, + self.cfg.replay_buffer_size) + + if self.replay_buffer.index == -1: + raise ValueError('No offline data loaded, check directory.') + + # predicates + train_until_step = utils.Until(self.cfg.num_train_frames, 1) + eval_every_step = utils.Every(self.cfg.eval_every_frames, 1) + show_train_stats_every_step = utils.Every(self.cfg.show_train_stats_every_frames, 1) + # only in distracting evaluation mode + eval_save_vid_every_step = utils.Every(self.cfg.eval_save_vid_every_step, + self.cfg.action_repeat) + + metrics = None + step = 0 + while train_until_step(self.global_step): + if show_train_stats_every_step(self.global_step): + # wait until all the metrics schema is populated + if metrics is not None: + # log stats + elapsed_time, total_time = self.timer.reset() + with self.logger.log_and_dump_ctx(self.global_frame, + ty='train') as log: + log('fps', step / elapsed_time) + log('total_time', total_time) + log('buffer_size', len(self.replay_buffer)) + log('step', self.global_step) + step = 0 + # try to save snapshot + if self.cfg.save_snapshot: + self.save_snapshot() + step += 1 + # try to evaluate + if eval_every_step(self.global_step): + self.logger.log('eval_total_time', self.timer.total_time(), + self.global_frame) + if self.eval_on_distracting: + self.eval_distracting(eval_save_vid_every_step(self.global_step)) + if self.eval_on_multitask: + self.eval_multitask(eval_save_vid_every_step(self.global_step)) + self.eval() + + # try to update the agent + metrics = self.agent.update(self.replay_buffer, self.global_step) + if show_train_stats_every_step(self.global_step): + self.logger.log_metrics(metrics, self.global_frame, ty='train') + + self._global_step += 1 + + def save_snapshot(self): + snapshot = self.work_dir / 'snapshot.pt' + keys_to_save = ['agent', 'timer', '_global_step', '_global_episode'] + payload = {k: self.__dict__[k] for k in keys_to_save} + with snapshot.open('wb') as f: + torch.save(payload, f) + + def load_snapshot(self): + snapshot = self.work_dir / 'snapshot.pt' + with snapshot.open('rb') as f: + payload = torch.load(f) + for k, v in payload.items(): + self.__dict__[k] = v + + +@hydra.main(config_path='cfgs', config_name='config') +def main(cfg): + from train import Workspace as W + root_dir = Path.cwd() + workspace = W(cfg) + print(cfg) + snapshot = root_dir / 'snapshot.pt' + if snapshot.exists(): + print(f'resuming: {snapshot}') + workspace.load_snapshot() + if cfg.offline: + workspace.train_offline(cfg.offline_dir) + else: + workspace.train() + + +if __name__ == '__main__': + main() diff --git a/drqbc/utils.py b/drqbc/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..12faafb9dd6e805459de8ebc77bb151e748c120e --- /dev/null +++ b/drqbc/utils.py @@ -0,0 +1,207 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +import random +import re +import time + +import numpy as np +import h5py +from collections import deque +import dmc +from dm_env import StepType +from drqbc.numpy_replay_buffer import EfficientReplayBuffer + +import torch +import torch.nn as nn +from torch import distributions as pyd +from torch.distributions.utils import _standard_normal + + +class eval_mode: + def __init__(self, *models): + self.models = models + + def __enter__(self): + self.prev_states = [] + for model in self.models: + self.prev_states.append(model.training) + model.train(False) + + def __exit__(self, *args): + for model, state in zip(self.models, self.prev_states): + model.train(state) + return False + + +def set_seed_everywhere(seed): + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + np.random.seed(seed) + random.seed(seed) + + +def soft_update_params(net, target_net, tau): + for param, target_param in zip(net.parameters(), target_net.parameters()): + target_param.data.copy_(tau * param.data + + (1 - tau) * target_param.data) + + +def to_torch(xs, device): + return tuple(torch.as_tensor(x, device=device) for x in xs) + + +def weight_init(m): + if isinstance(m, nn.Linear): + nn.init.orthogonal_(m.weight.data) + if hasattr(m.bias, 'data'): + m.bias.data.fill_(0.0) + elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): + gain = nn.init.calculate_gain('relu') + nn.init.orthogonal_(m.weight.data, gain) + if hasattr(m.bias, 'data'): + m.bias.data.fill_(0.0) + + +class Until: + def __init__(self, until, action_repeat=1): + self._until = until + self._action_repeat = action_repeat + + def __call__(self, step): + if self._until is None: + return True + until = self._until // self._action_repeat + return step < until + + +class Every: + def __init__(self, every, action_repeat=1): + self._every = every + self._action_repeat = action_repeat + + def __call__(self, step): + if self._every is None: + return False + every = self._every // self._action_repeat + if step % every == 0: + return True + return False + + +class Timer: + def __init__(self): + self._start_time = time.time() + self._last_time = time.time() + + def reset(self): + elapsed_time = time.time() - self._last_time + self._last_time = time.time() + total_time = time.time() - self._start_time + return elapsed_time, total_time + + def total_time(self): + return time.time() - self._start_time + + +class TruncatedNormal(pyd.Normal): + def __init__(self, loc, scale, low=-1.0, high=1.0, eps=1e-6): + super().__init__(loc, scale, validate_args=False) + self.low = low + self.high = high + self.eps = eps + + def _clamp(self, x): + clamped_x = torch.clamp(x, self.low + self.eps, self.high - self.eps) + x = x - x.detach() + clamped_x.detach() + return x + + def sample(self, clip=None, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + eps = _standard_normal(shape, + dtype=self.loc.dtype, + device=self.loc.device) + eps *= self.scale + if clip is not None: + eps = torch.clamp(eps, -clip, clip) + x = self.loc + eps + return self._clamp(x) + + +def schedule(schdl, step): + try: + return float(schdl) + except ValueError: + match = re.match(r'linear\((.+),(.+),(.+)\)', schdl) + if match: + init, final, duration = [float(g) for g in match.groups()] + mix = np.clip(step / duration, 0.0, 1.0) + return (1.0 - mix) * init + mix * final + match = re.match(r'step_linear\((.+),(.+),(.+),(.+),(.+)\)', schdl) + if match: + init, final1, duration1, final2, duration2 = [ + float(g) for g in match.groups() + ] + if step <= duration1: + mix = np.clip(step / duration1, 0.0, 1.0) + return (1.0 - mix) * init + mix * final1 + else: + mix = np.clip((step - duration1) / duration2, 0.0, 1.0) + return (1.0 - mix) * final1 + mix * final2 + raise NotImplementedError(schdl) + + +step_type_lookup = { + 0: StepType.FIRST, + 1: StepType.MID, + 2: StepType.LAST +} + + +def load_offline_dataset_into_buffer(offline_dir, replay_buffer, frame_stack, replay_buffer_size): + filenames = sorted(offline_dir.glob('*.hdf5')) + num_steps = 0 + for filename in filenames: + try: + episodes = h5py.File(filename, 'r') + episodes = {k: episodes[k][:] for k in episodes.keys()} + add_offline_data_to_buffer(episodes, replay_buffer, framestack=frame_stack) + length = episodes['reward'].shape[0] + num_steps += length + except Exception as e: + print(f'Could not load episode {str(filename)}: {e}') + continue + print("Loaded {} offline timesteps so far...".format(int(num_steps))) + if num_steps >= replay_buffer_size: + break + print("Finished, loaded {} timesteps.".format(int(num_steps))) + + +def add_offline_data_to_buffer(offline_data: dict, replay_buffer: EfficientReplayBuffer, framestack: int = 3): + offline_data_length = offline_data['reward'].shape[0] + for v in offline_data.values(): + assert v.shape[0] == offline_data_length + for idx in range(offline_data_length): + time_step = get_timestep_from_idx(offline_data, idx) + if not time_step.first(): + stacked_frames.append(time_step.observation) + time_step_stack = time_step._replace(observation=np.concatenate(stacked_frames, axis=0)) + replay_buffer.add(time_step_stack) + else: + stacked_frames = deque(maxlen=framestack) + while len(stacked_frames) < framestack: + stacked_frames.append(time_step.observation) + time_step_stack = time_step._replace(observation=np.concatenate(stacked_frames, axis=0)) + replay_buffer.add(time_step_stack) + + +def get_timestep_from_idx(offline_data: dict, idx: int): + return dmc.ExtendedTimeStep( + step_type=step_type_lookup[offline_data['step_type'][idx]], + reward=offline_data['reward'][idx], + observation=offline_data['observation'][idx], + discount=offline_data['discount'][idx], + action=offline_data['action'][idx] + ) diff --git a/drqbc/video.py b/drqbc/video.py new file mode 100644 index 0000000000000000000000000000000000000000..b4e118c9b87bdc149f7d5e4832c0cb34b6c26d85 --- /dev/null +++ b/drqbc/video.py @@ -0,0 +1,70 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +import cv2 +import imageio +import numpy as np + + +class VideoRecorder: + def __init__(self, root_dir, render_size=256, fps=20): + if root_dir is not None: + self.save_dir = root_dir / 'eval_video' + self.save_dir.mkdir(exist_ok=True) + else: + self.save_dir = None + + self.render_size = render_size + self.fps = fps + self.frames = [] + + def init(self, env, enabled=True): + self.frames = [] + self.enabled = self.save_dir is not None and enabled + self.record(env) + + def record(self, env): + if self.enabled: + if hasattr(env, 'physics'): + frame = env.physics.render(height=self.render_size, + width=self.render_size, + camera_id=0) + else: + frame = env.render() + self.frames.append(frame) + + def save(self, file_name): + if self.enabled: + path = self.save_dir / file_name + imageio.mimsave(str(path), self.frames, fps=self.fps) + + +class TrainVideoRecorder: + def __init__(self, root_dir, render_size=256, fps=20): + if root_dir is not None: + self.save_dir = root_dir / 'train_video' + self.save_dir.mkdir(exist_ok=True) + else: + self.save_dir = None + + self.render_size = render_size + self.fps = fps + self.frames = [] + + def init(self, obs, enabled=True): + self.frames = [] + self.enabled = self.save_dir is not None and enabled + self.record(obs) + + def record(self, obs): + if self.enabled: + frame = cv2.resize(obs[-3:].transpose(1, 2, 0), + dsize=(self.render_size, self.render_size), + interpolation=cv2.INTER_CUBIC) + self.frames.append(frame) + + def save(self, file_name): + if self.enabled: + path = self.save_dir / file_name + imageio.mimsave(str(path), self.frames, fps=self.fps) diff --git a/envs/.DS_Store b/envs/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..3573fed6f79961594f93d2bec1a8096b7f55279c Binary files /dev/null and b/envs/.DS_Store differ diff --git a/envs/distracting_control/LICENSE b/envs/distracting_control/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..7a4a3ea2424c09fbe48d455aed1eaa94d9124835 --- /dev/null +++ b/envs/distracting_control/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/envs/distracting_control/README.md b/envs/distracting_control/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f6581467f7105dbb40e3cedbce4aea523a4add8a --- /dev/null +++ b/envs/distracting_control/README.md @@ -0,0 +1,48 @@ +# The Distracting Control Suite + +`distracting_control` extends `dm_control` with static or dynamic visual +distractions in the form of changing colors, backgrounds, and camera poses. +Details and experimental results can be found in our +[paper](https://arxiv.org/pdf/2101.02722.pdf). + +## Requirements and Installation + +* Clone this repository +* `sh run.sh` +* Follow the instructions and install +[dm_control](https://github.com/deepmind/dm_control#requirements-and-installation). Make sure you setup your MuJoCo keys correctly. +* Download the [DAVIS 2017 + dataset](https://davischallenge.org/davis2017/code.html). Make sure to select the 2017 TrainVal - Images and Annotations (480p). The training images will be used as distracting backgrounds. + +## Instructions + +* You can run the `distracting_control_demo` to generate sample images of the + different tasks at different difficulties: + + ``` + python distracting_control_demo --davis_path=$HOME/DAVIS/JPEGImages/480p/ + --output_dir=/tmp/distrtacting_control_demo + ``` +* As seen from the demo to generate an instance of the environment you simply + need to import the suite and use `suite.load` while specifying the + `dm_control` domain and task, then choosing a difficulty and providing the + dataset_path. + +* Note the environment follows the dm_control environment APIs. + +## Paper + +If you use this code, please cite the accompanying [paper](https://arxiv.org/pdf/2101.02722.pdf) as: + +``` +@article{stone2021distracting, + title={The Distracting Control Suite -- A Challenging Benchmark for Reinforcement Learning from Pixels}, + author={Austin Stone and Oscar Ramirez and Kurt Konolige and Rico Jonschkowski}, + year={2021}, + journal={arXiv preprint arXiv:2101.02722}, +} +``` + +## Disclaimer + +This is not an official Google product. diff --git a/envs/distracting_control/background.py b/envs/distracting_control/background.py new file mode 100644 index 0000000000000000000000000000000000000000..832d71bee04252c8298d5024681783c4c1ec5a91 --- /dev/null +++ b/envs/distracting_control/background.py @@ -0,0 +1,260 @@ +# coding=utf-8 +# Copyright 2021 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""A wrapper for dm_control environments which applies color distractions.""" +import os + +from PIL import Image +import collections +from dm_control.rl import control +import numpy as np + +import tensorflow as tf +from dm_control.mujoco.wrapper import mjbindings + +DAVIS17_TRAINING_VIDEOS = [ + 'bear', 'bmx-bumps', 'boat', 'boxing-fisheye', 'breakdance-flare', 'bus', + 'car-turn', 'cat-girl', 'classic-car', 'color-run', 'crossing', + 'dance-jump', 'dancing', 'disc-jockey', 'dog-agility', 'dog-gooses', + 'dogs-scale', 'drift-turn', 'drone', 'elephant', 'flamingo', 'hike', + 'hockey', 'horsejump-low', 'kid-football', 'kite-walk', 'koala', + 'lady-running', 'lindy-hop', 'longboard', 'lucia', 'mallard-fly', + 'mallard-water', 'miami-surf', 'motocross-bumps', 'motorbike', 'night-race', + 'paragliding', 'planes-water', 'rallye', 'rhino', 'rollerblade', + 'schoolgirls', 'scooter-board', 'scooter-gray', 'sheep', 'skate-park', + 'snowboard', 'soccerball', 'stroller', 'stunt', 'surf', 'swing', 'tennis', + 'tractor-sand', 'train', 'tuk-tuk', 'upside-down', 'varanus-cage', 'walking' +] +DAVIS17_VALIDATION_VIDEOS = [ + 'bike-packing', 'blackswan', 'bmx-trees', 'breakdance', 'camel', + 'car-roundabout', 'car-shadow', 'cows', 'dance-twirl', 'dog', 'dogs-jump', + 'drift-chicane', 'drift-straight', 'goat', 'gold-fish', 'horsejump-high', + 'india', 'judo', 'kite-surf', 'lab-coat', 'libby', 'loading', 'mbike-trick', + 'motocross-jump', 'paragliding-launch', 'parkour', 'pigs', 'scooter-black', + 'shooting', 'soapbox' +] +SKY_TEXTURE_INDEX = 0 +Texture = collections.namedtuple('Texture', ('size', 'address', 'textures')) + + +def imread(filename): + img = Image.open(filename) + img_np = np.asarray(img) + return img_np + + +def size_and_flatten(image, ref_height, ref_width): + # Resize image if necessary and flatten the result. + image_height, image_width = image.shape[:2] + + if image_height != ref_height or image_width != ref_width: + image = tf.cast(tf.image.resize(image, [ref_height, ref_width]), tf.uint8) + return tf.reshape(image, [-1]).numpy() + + +def blend_to_background(alpha, image, background): + if alpha == 1.0: + return image + elif alpha == 0.0: + return background + else: + return (alpha * image.astype(np.float32) + + (1. - alpha) * background.astype(np.float32)).astype(np.uint8) + + +class DistractingBackgroundEnv(control.Environment): + """Environment wrapper for background visual distraction. + + **NOTE**: This wrapper should be applied BEFORE the pixel wrapper to make sure + the background image changes are applied before rendering occurs. + """ + + def __init__(self, + env, + dataset_path=None, + dataset_videos=None, + video_alpha=1.0, + ground_plane_alpha=1.0, + num_videos=None, + dynamic=False, + seed=None, + shuffle_buffer_size=None, + fixed=False, + ): + + if not 0 <= video_alpha <= 1: + raise ValueError('`video_alpha` must be in the range [0, 1]') + + self._env = env + self._video_alpha = video_alpha + self._ground_plane_alpha = ground_plane_alpha + self._random_state = np.random.RandomState(seed=seed) + self._dynamic = dynamic + self._shuffle_buffer_size = shuffle_buffer_size + self._background = None + self._current_img_index = 0 + + if not dataset_path or num_videos == 0: + # Allow running the wrapper without backgrounds to still set the ground + # plane alpha value. + self._video_paths = [] + else: + # Use all videos if no specific ones were passed. + if not dataset_videos: + dataset_videos = sorted(tf.io.gfile.listdir(dataset_path)) + # Replace video placeholders 'train'/'val' with the list of videos. + elif dataset_videos in ['train', 'training']: + dataset_videos = DAVIS17_TRAINING_VIDEOS + elif dataset_videos in ['val', 'validation']: + dataset_videos = DAVIS17_VALIDATION_VIDEOS + # Get complete paths for all videos. + video_paths = [ + os.path.join(dataset_path, subdir) for subdir in dataset_videos + ] + + # Optionally use only the first num_paths many paths. + if num_videos is not None: + if num_videos > len(video_paths) or num_videos < 0: + raise ValueError(f'`num_bakground_paths` is {num_videos} but ' + 'should not be larger than the number of available ' + f'background paths ({len(video_paths)}) and at ' + 'least 0.') + video_paths = video_paths[:num_videos] + + self._video_paths = video_paths + + self.fixed = fixed + self.first_reset = False + + def reset(self): + """Reset the background state.""" + time_step = self._env.reset() + if not self.fixed or not self.first_reset: + self._reset_background() + self.first_reset = True + return time_step + + def _reset_background(self): + # Make grid semi-transparent. + if self._ground_plane_alpha is not None: + self._env.physics.named.model.mat_rgba['grid', + 'a'] = self._ground_plane_alpha + + # For some reason the height of the skybox is set to 4800 by default, + # which does not work with new textures. + self._env.physics.model.tex_height[SKY_TEXTURE_INDEX] = 800 + + # Set the sky texture reference. + sky_height = self._env.physics.model.tex_height[SKY_TEXTURE_INDEX] + sky_width = self._env.physics.model.tex_width[SKY_TEXTURE_INDEX] + sky_size = sky_height * sky_width * 3 + sky_address = self._env.physics.model.tex_adr[SKY_TEXTURE_INDEX] + + sky_texture = self._env.physics.model.tex_rgb[sky_address:sky_address + + sky_size].astype(np.float32) + + if self._video_paths: + if self._shuffle_buffer_size: + # Shuffle images from all videos together to get background frames. + file_names = [ + os.path.join(path, fn) + for path in self._video_paths + for fn in tf.io.gfile.listdir(path) + ] + self._random_state.shuffle(file_names) + # Load only the first n images for performance reasons. + file_names = file_names[:self._shuffle_buffer_size] + images = [imread(fn) for fn in file_names] + else: + # Randomly pick a video and load all images. + video_path = self._random_state.choice(self._video_paths) + file_names = tf.io.gfile.listdir(video_path) + if not self._dynamic: + # Randomly pick a single static frame. + # file_names = [self._random_state.choice(file_names)] + file_names.sort() + file_names = [file_names[0]] + images = [imread(os.path.join(video_path, fn)) for fn in file_names] + + # Pick a random starting point and steping direction. + self._current_img_index = self._random_state.choice(len(images)) + self._step_direction = self._random_state.choice([-1, 1]) + + # Prepare images in the texture format by resizing and flattening. + + # Generate image textures. + texturized_images = [] + for image in images: + image_flattened = size_and_flatten(image, sky_height, sky_width) + new_texture = blend_to_background(self._video_alpha, image_flattened, + sky_texture) + texturized_images.append(new_texture) + + else: + self._current_img_index = 0 + texturized_images = [sky_texture] + + self._background = Texture(sky_size, sky_address, texturized_images) + self._apply() + + def step(self, action): + time_step = self._env.step(action) + + if time_step.first(): + self._reset_background() + return time_step + + if self._dynamic and self._video_paths: + # Move forward / backward in the image sequence by updating the index. + self._current_img_index += self._step_direction + + # Start moving forward if we are past the start of the images. + if self._current_img_index <= 0: + self._current_img_index = 0 + self._step_direction = abs(self._step_direction) + # Start moving backwards if we are past the end of the images. + if self._current_img_index >= len(self._background.textures): + self._current_img_index = len(self._background.textures) - 1 + self._step_direction = -abs(self._step_direction) + + self._apply() + return time_step + + def _apply(self): + """Apply the background texture to the physics.""" + + if self._background: + start = self._background.address + end = self._background.address + self._background.size + texture = self._background.textures[self._current_img_index] + + self._env.physics.model.tex_rgb[start:end] = texture + # Upload the new texture to the GPU. Note: we need to make sure that the + # OpenGL context belonging to this Physics instance is the current one. + with self._env.physics.contexts.gl.make_current() as ctx: + ctx.call( + mjbindings.mjlib.mjr_uploadTexture, + self._env.physics.model.ptr, + self._env.physics.contexts.mujoco.ptr, + SKY_TEXTURE_INDEX, + ) + + # Forward property and method calls to self._env. + def __getattr__(self, attr): + if hasattr(self._env, attr): + return getattr(self._env, attr) + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) diff --git a/envs/distracting_control/camera.py b/envs/distracting_control/camera.py new file mode 100644 index 0000000000000000000000000000000000000000..57cb95c12b092fe9aaa1e4baa35d54bc7e8e30cd --- /dev/null +++ b/envs/distracting_control/camera.py @@ -0,0 +1,362 @@ +# coding=utf-8 +# Copyright 2021 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""A wrapper for dm_control environments which applies camera distractions.""" + +import copy +from dm_control.rl import control +import numpy as np + +CAMERA_MODES = ['fixed', 'track', 'trackcom', 'targetbody', 'targetbodycom'] + + +def eul2mat(theta): + """Converts euler angles (x, y, z) to a rotation matrix.""" + + return np.array([ + [ + np.cos(theta[1]) * np.cos(theta[2]), + np.sin(theta[0]) * np.sin(theta[1]) * np.cos(theta[2]) - + np.sin(theta[2]) * np.cos(theta[0]), + np.sin(theta[1]) * np.cos(theta[0]) * np.cos(theta[2]) + + np.sin(theta[0]) * np.sin(theta[2]) + ], + [ + np.sin(theta[2]) * np.cos(theta[1]), + np.sin(theta[0]) * np.sin(theta[1]) * np.sin(theta[2]) + + np.cos(theta[0]) * np.cos(theta[2]), + np.sin(theta[1]) * np.sin(theta[2]) * np.cos(theta[0]) - + np.sin(theta[0]) * np.cos(theta[2]) + ], + [ + -np.sin(theta[1]), + np.sin(theta[0]) * np.cos(theta[1]), + np.cos(theta[0]) * np.cos(theta[1]) + ], + ]) + + +def _mat_from_theta(cos_theta, sin_theta, a): + """Builds a rotation matrix from theta and an orientation vector.""" + + row1 = [ + cos_theta + a[0] ** 2. * (1. - cos_theta), + a[0] * a[1] * (1 - cos_theta) - a[2] * sin_theta, + a[0] * a[2] * (1 - cos_theta) + a[1] * sin_theta + ] + row2 = [ + a[1] * a[0] * (1 - cos_theta) + a[2] * sin_theta, + cos_theta + a[1] ** 2. * (1 - cos_theta), + a[1] * a[2] * (1. - cos_theta) - a[0] * sin_theta + ] + row3 = [ + a[2] * a[0] * (1. - cos_theta) - a[1] * sin_theta, + a[2] * a[1] * (1. - cos_theta) + a[0] * sin_theta, + cos_theta + (a[2] ** 2.) * (1. - cos_theta) + ] + return np.stack([row1, row2, row3]) + + +def rotvec2mat(theta, vec): + """Converts a rotation around a vector to a rotation matrix.""" + + a = vec / np.sqrt(np.sum(vec ** 2.)) + sin_theta = np.sin(theta) + cos_theta = np.cos(theta) + + return _mat_from_theta(cos_theta, sin_theta, a) + + +def get_lookat_xmat_no_roll(agent_pos, camera_pos): + """Solves for the cam rotation centering the agent with 0 roll.""" + + # NOTE(austinstone): This method leads to wild oscillations around the north + # and south polls. + # For example, if agent is at (0., 0., 0.) and the camera is at (.01, 0., 1.), + # this will produce a yaw of 90 degrees whereas if the camera is slightly + # adjacent at (-.01, 0., 1.) this will produce a yaw of -90 degrees. I'm + # not sure what the fix is, as this seems like the behavior we want in all + # environments except for reacher. + delta_vec = agent_pos - camera_pos + delta_vec /= np.sqrt(np.sum(delta_vec ** 2.)) + yaw = np.arctan2(delta_vec[0], delta_vec[1]) + pitch = np.arctan2(delta_vec[2], np.sqrt(np.sum(delta_vec[:2] ** 2.))) + pitch += np.pi / 2. # Camera starts out looking at [0, 0, -1.] + return eul2mat([pitch, 0., -yaw]).flatten() + + +def get_lookat_xmat(agent_pos, camera_pos): + """Solves for the cam rotation centering the agent, allowing roll.""" + + # Solve for the rotation which centers the agent in the scene. + delta_vec = agent_pos - camera_pos + delta_vec /= np.sqrt(np.sum(delta_vec ** 2.)) + y_vec = np.array([0., 0., -1.]) # This is where the cam starts from. + a = np.cross(y_vec, delta_vec) + sin_theta = np.sqrt(np.sum(a ** 2.)) + cos_theta = np.dot(delta_vec, y_vec) + a /= (np.sqrt(np.sum(a ** 2.)) + .0001) + return _mat_from_theta(cos_theta, sin_theta, a) + + +def cart2sphere(cart): + r = np.sqrt(np.sum(cart ** 2.)) + h_angle = np.arctan2(cart[1], cart[0]) + v_angle = np.arctan2(np.sqrt(np.sum(cart[:2] ** 2.)), cart[2]) + return np.array([r, h_angle, v_angle]) + + +def sphere2cart(sphere): + r, h_angle, v_angle = sphere + x = r * np.sin(v_angle) * np.cos(h_angle) + y = r * np.sin(v_angle) * np.sin(h_angle) + z = r * np.cos(v_angle) + return np.array([x, y, z]) + + +def clip_cam_position(position, min_radius, max_radius, min_h_angle, + max_h_angle, min_v_angle, max_v_angle): + new_position = [-1., -1., -1.] + new_position[0] = np.clip(position[0], min_radius, max_radius) + new_position[1] = np.clip(position[1], min_h_angle, max_h_angle) + new_position[2] = np.clip(position[2], min_v_angle, max_v_angle) + return new_position + + +def get_lookat_point(physics, camera_id): + """Get the point that the camera is looking at. + + It is assumed that the "point" the camera looks at the agent distance + away and projected along the camera viewing matrix. + + Args: + physics: mujoco physics objects + camera_id: int + + Returns: + position: float32 np.array of length 3 + """ + dist_to_agent = physics.named.data.cam_xpos[ + camera_id] - physics.named.data.subtree_com[1] + dist_to_agent = np.sqrt(np.sum(dist_to_agent ** 2.)) + initial_viewing_mat = copy.deepcopy(physics.named.data.cam_xmat[camera_id]) + initial_viewing_mat = np.reshape(initial_viewing_mat, (3, 3)) + z_vec = np.array([0., 0., -dist_to_agent]) + rotated_vec = np.dot(initial_viewing_mat, z_vec) + return rotated_vec + physics.named.data.cam_xpos[camera_id] + + +class DistractingCameraEnv(control.Environment): + """Environment wrapper for camera pose visual distraction. + + **NOTE**: This wrapper should be applied BEFORE the pixel wrapper to make sure + the camera pose changes are applied before rendering occurs. + """ + + def __init__(self, + env, + camera_id, + horizontal_delta, + vertical_delta, + max_vel, + vel_std, + roll_delta, + max_roll_vel, + roll_std, + max_zoom_in_percent, + max_zoom_out_percent, + limit_to_upper_quadrant=False, + seed=None, + fixed=False, + ): + self._env = env + self._camera_id = camera_id + self._horizontal_delta = horizontal_delta + self._vertical_delta = vertical_delta + + self._horizontal_delta = horizontal_delta + self._vertical_delta = vertical_delta + self._max_vel = max_vel + self._vel_std = vel_std + self._roll_delta = roll_delta + self._max_roll_vel = max_roll_vel + self._roll_vel_std = roll_std + self._max_zoom_in_percent = max_zoom_in_percent + self._max_zoom_out_percent = max_zoom_out_percent + self._limit_to_upper_quadrant = limit_to_upper_quadrant + + self._random_state = np.random.RandomState(seed=seed) + + # These camera state parameters will be set on the first reset call. + self._camera_type = None + self._camera_initial_lookat_point = None + + self._camera_vel = None + self._max_h_angle = None + self._max_v_angle = None + self._min_h_angle = None + self._min_v_angle = None + self._radius = None + self._roll_vel = None + self._vel_scaling = None + + self.fixed = fixed + self.first_reset = False + + def setup_camera(self): + """Set up camera motion ranges and state.""" + # Define boundaries on the range of the camera motion. + mode = self._env._physics.model.cam_mode[0] + + camera_type = CAMERA_MODES[mode] + assert camera_type in ['fixed', 'trackcom'] + + self._camera_type = camera_type + self._cam_initial_lookat_point = get_lookat_point(self._env.physics, + self._camera_id) + + start_pos = copy.deepcopy( + self._env.physics.named.data.cam_xpos[self._camera_id]) + + if self._camera_type != 'fixed': + # Center the camera relative to the agent's center of mass. + start_pos -= self._env.physics.named.data.subtree_com[1] + + start_r, start_h_angle, start_v_angle = cart2sphere(start_pos) + # Scale the velocity by the starting radius. Most environments have radius 4, + # but this downscales the velocity for the envs with radius < 4. + self._vel_scaling = start_r / 4. + self._max_h_angle = start_h_angle + self._horizontal_delta + self._min_h_angle = start_h_angle - self._horizontal_delta + self._max_v_angle = start_v_angle + self._vertical_delta + self._min_v_angle = start_v_angle - self._vertical_delta + + if self._limit_to_upper_quadrant: + # A centered cam is at np.pi / 2. + self._max_v_angle = min(self._max_v_angle, np.pi / 2.) + self._min_v_angle = max(self._min_v_angle, 0.) + # A centered cam is at -np.pi / 2. + self._max_h_angle = min(self._max_h_angle, 0.) + self._min_h_angle = max(self._min_h_angle, -np.pi) + + self._max_roll = self._roll_delta + self._min_roll = -self._roll_delta + self._min_radius = max(start_r - start_r * self._max_zoom_in_percent, 0.) + self._max_radius = start_r + start_r * self._max_zoom_out_percent + + # Decide the starting position for the camera. + self._h_angle = self._random_state.uniform(self._min_h_angle, + self._max_h_angle) + + self._v_angle = self._random_state.uniform(self._min_v_angle, + self._max_v_angle) + + self._radius = self._random_state.uniform(self._min_radius, + self._max_radius) + + self._roll = self._random_state.uniform(self._min_roll, self._max_roll) + + # Decide the starting velocity for the camera. + vel = self._random_state.randn(3) + vel /= np.sqrt(np.sum(vel ** 2.)) + vel *= self._random_state.uniform(0., self._max_vel) + self._camera_vel = vel + self._roll_vel = self._random_state.uniform(-self._max_roll_vel, + self._max_roll_vel) + + def reset(self): + """Reset the camera state. """ + time_step = self._env.reset() + if not self.fixed or not self.first_reset: + self.setup_camera() + self.first_reset = True + self._apply() + return time_step + + def step(self, action): + time_step = self._env.step(action) + + if time_step.first(): + self.setup_camera() + + self._apply() + return time_step + + def _apply(self): + if not self._camera_type: + self.setup_camera() + + # Random walk the velocity. + vel_delta = self._random_state.randn(3) + self._camera_vel += vel_delta * self._vel_std * self._vel_scaling + self._roll_vel += self._random_state.randn() * self._roll_vel_std + + # Clip velocity if it gets too big. + vel_norm = np.sqrt(np.sum(self._camera_vel ** 2.)) + if vel_norm > self._max_vel * self._vel_scaling: + self._camera_vel *= (self._max_vel * self._vel_scaling) / vel_norm + + self._roll_vel = np.clip(self._roll_vel, -self._max_roll_vel, + self._max_roll_vel) + + cart_cam_pos = sphere2cart([self._radius, self._h_angle, self._v_angle]) + # Apply velocity vector to camera + sphere_cam_pos2 = cart2sphere(cart_cam_pos + self._camera_vel) + sphere_cam_pos2 = clip_cam_position(sphere_cam_pos2, self._min_radius, + self._max_radius, self._min_h_angle, + self._max_h_angle, self._min_v_angle, + self._max_v_angle) + + self._camera_vel = sphere2cart(sphere_cam_pos2) - cart_cam_pos + + self._radius, self._h_angle, self._v_angle = sphere_cam_pos2 + + roll2 = self._roll + self._roll_vel + roll2 = np.clip(roll2, self._min_roll, self._max_roll) + + self._roll_vel = roll2 - self._roll + self._roll = roll2 + + cart_cam_pos = sphere2cart(sphere_cam_pos2) + + if self._limit_to_upper_quadrant: + lookat_method = get_lookat_xmat_no_roll + else: + # This method avoids jitteriness at the pole but allows some roll + # in the camera matrix. This is important for reacher. + lookat_method = get_lookat_xmat + + if self._camera_type == 'fixed': + lookat_mat = lookat_method(self._cam_initial_lookat_point, + cart_cam_pos) + else: + # Go from agent centric to world coords + cart_cam_pos += self._env.physics.named.data.subtree_com[1] + lookat_mat = lookat_method( + get_lookat_point(self._env.physics, self._camera_id), cart_cam_pos) + + lookat_mat = np.reshape(lookat_mat, (3, 3)) + roll_mat = rotvec2mat(self._roll, np.array([0., 0., 1.])) + xmat = np.dot(lookat_mat, roll_mat) + self._env.physics.named.data.cam_xpos[self._camera_id] = cart_cam_pos + self._env.physics.named.data.cam_xmat[self._camera_id] = xmat.flatten() + + # Forward property and method calls to self._env. + def __getattr__(self, attr): + if hasattr(self._env, attr): + return getattr(self._env, attr) + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) diff --git a/envs/distracting_control/color.py b/envs/distracting_control/color.py new file mode 100644 index 0000000000000000000000000000000000000000..1b6e7b815bcf63ac6f814bcc3d12c0a5f4f33e84 --- /dev/null +++ b/envs/distracting_control/color.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# Copyright 2021 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""A wrapper for dm_control environments which applies color distractions.""" + +from dm_control.rl import control +import numpy as np + + +class DistractingColorEnv(control.Environment): + """Environment wrapper for color visual distraction. + + **NOTE**: This wrapper should be applied BEFORE the pixel wrapper to make sure + the color changes are applied before rendering occurs. + """ + + def __init__(self, + env, + step_std, + max_delta, + seed=None, + fixed=False, + ): + """Initialize the environment wrapper. + + Args: + env: instance of dm_control Environment to wrap with augmentations. + """ + if step_std < 0: + raise ValueError("`step_std` must be greater than or equal to 0.") + if max_delta < 0: + raise ValueError("`max_delta` must be greater than or equal to 0.") + + self._env = env + self._step_std = step_std + self._max_delta = max_delta + self._random_state = np.random.RandomState(seed=seed) + + self._cam_type = None + self._current_rgb = None + self._max_rgb = None + self._min_rgb = None + self._original_rgb = None + + self.fixed = fixed + self.first_reset = False + + def reset(self): + """Reset the distractions state.""" + time_step = self._env.reset() + if not self.fixed or not self.first_reset: + self._reset_color() + self.first_reset = True + return time_step + + def _reset_color(self): + # Save all original colors. + if self._original_rgb is None: + self._original_rgb = np.copy(self._env.physics.model.mat_rgba)[:, :3] + # Determine minimum and maximum rgb values. + self._max_rgb = np.clip(self._original_rgb + self._max_delta, 0.0, 1.0) + self._min_rgb = np.clip(self._original_rgb - self._max_delta, 0.0, 1.0) + + # Pick random colors in the allowed ranges. + r = self._random_state.uniform(size=self._min_rgb.shape) + self._current_rgb = self._min_rgb + r * (self._max_rgb - self._min_rgb) + + # Apply the color changes. + self._env.physics.model.mat_rgba[:, :3] = self._current_rgb + + def step(self, action): + time_step = self._env.step(action) + + if time_step.first(): + self._reset_color() + return time_step + + color_change = self._random_state.randn(*self._current_rgb.shape) + color_change = color_change * self._step_std + + new_color = self._current_rgb + color_change + + self._current_rgb = np.clip( + new_color, + a_min=self._min_rgb, + a_max=self._max_rgb, + ) + + # Apply the color changes. + self._env.physics.model.mat_rgba[:, :3] = self._current_rgb + return time_step + + # Forward property and method calls to self._env. + def __getattr__(self, attr): + if hasattr(self._env, attr): + return getattr(self._env, attr) + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) diff --git a/envs/distracting_control/suite.py b/envs/distracting_control/suite.py new file mode 100644 index 0000000000000000000000000000000000000000..5598df2f8acefb1f60b2c7a99b3ba8a9a4675354 --- /dev/null +++ b/envs/distracting_control/suite.py @@ -0,0 +1,219 @@ +# coding=utf-8 +# Copyright 2021 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A collection of MuJoCo-based Reinforcement Learning environments. + +The suite provides a similar API to the original dm_control suite. +Users can configure the distractions on top of the original tasks. The suite is +targeted for loading environments directly with similar configurations as those +used in the original paper. Each distraction wrapper can be used independently +though. +""" +try: + from dm_control import suite # pylint: disable=g-import-not-at-top + from dm_control.suite.wrappers import pixels # pylint: disable=g-import-not-at-top +except ImportError: + suite = None + +from envs.distracting_control import background +from envs.distracting_control import camera +from envs.distracting_control import color +from envs.distracting_control import suite_utils + + +def is_available(): + return suite is not None + + +def load(domain_name, + task_name, + difficulty=None, + dynamic=False, + background_dataset_path=None, + background_dataset_videos="train", + background_kwargs=None, + camera_kwargs=None, + color_kwargs=None, + task_kwargs=None, + environment_kwargs=None, + visualize_reward=False, + render_kwargs=None, + pixels_only=True, + pixels_observation_key="pixels", + env_state_wrappers=None, + # + fixed_distraction=False, + background_seed=None, + color_seed=None, + camera_seed=None, + ): + """Returns an environment from a domain name, task name and optional settings. + + ```python + env = suite.load('cartpole', 'balance') + ``` + + Adding a difficulty will configure distractions matching the reference paper + for easy, medium, hard. + + Users can also toggle dynamic properties for distractions. + + Args: + domain_name: A string containing the name of a domain. + task_name: A string containing the name of a task. + difficulty: Difficulty for the suite. One of 'easy', 'medium', 'hard'. + dynamic: Boolean controlling whether distractions are dynamic or static. + background_dataset_path: String to the davis directory that contains the + video directories. + background_dataset_videos: String ('train'/'val') or list of strings of the + DAVIS videos to be used for backgrounds. + background_kwargs: Dict, overwrites settings for background distractions. + camera_kwargs: Dict, overwrites settings for camera distractions. + color_kwargs: Dict, overwrites settings for color disdifficultytractions. + task_kwargs: Dict, dm control task kwargs. + environment_kwargs: Optional `dict` specifying keyword arguments for the + environment. + visualize_reward: Optional `bool`. If `True`, object colours in rendered + frames are set to indicate the reward at each step. Default `False`. + render_kwargs: Dict, render kwargs for pixel wrapper. + pixels_only: Boolean controlling the exclusion of states in the observation. + pixels_observation_key: Key in the observation used for the rendered image. + env_state_wrappers: Env state wrappers to be called before the PixelWrapper. + + Returns: + The requested environment. + """ + if not is_available(): + raise ImportError("dm_control module is not available. Make sure you " + "follow the installation instructions from the " + "dm_control package.") + + if difficulty not in [None, "easy", "medium", "hard"]: + raise ValueError("Difficulty should be one of: 'easy', 'medium', 'hard'.") + + render_kwargs = render_kwargs or {} + if "camera_id" not in render_kwargs: + render_kwargs["camera_id"] = 2 if domain_name == "quadruped" else 0 + + env = suite.load( + domain_name, + task_name, + task_kwargs=task_kwargs, + environment_kwargs=environment_kwargs, + visualize_reward=visualize_reward) + + env = distracting_wrapper( + env, + domain_name, + difficulty, + dynamic, + background_dataset_path, + background_dataset_videos, + background_kwargs, + camera_kwargs, + color_kwargs, + render_kwargs, + pixels_only, + pixels_observation_key, + env_state_wrappers, + # + fixed_distraction, + background_seed, + color_seed, + camera_seed + ) + + return env + + +def distracting_wrapper( + env, + domain_name, + difficulty=None, + dynamic=False, + background_dataset_path=None, + background_dataset_videos="train", + background_kwargs=None, + camera_kwargs=None, + color_kwargs=None, + render_kwargs=None, + pixels_only=True, + pixels_observation_key="pixels", + env_state_wrappers=None, + # + fixed_distraction=False, + background_seed=None, + color_seed=None, + camera_seed=None): + # Apply background distractions. + if difficulty or background_kwargs: + background_dataset_path = ( + background_dataset_path or suite_utils.DEFAULT_BACKGROUND_PATH) + final_background_kwargs = dict(fixed=fixed_distraction, seed=background_seed) + if difficulty: + # Get kwargs for the given difficulty. + num_videos = suite_utils.DIFFICULTY_NUM_VIDEOS[difficulty] + final_background_kwargs.update( + suite_utils.get_background_kwargs(domain_name, num_videos, dynamic, + background_dataset_path, + background_dataset_videos)) + else: + # Set the dataset path and the videos. + final_background_kwargs.update( + dict(dataset_path=background_dataset_path, + dataset_videos=background_dataset_videos)) + if background_kwargs: + # Overwrite kwargs with those passed here. + final_background_kwargs.update(background_kwargs) + env = background.DistractingBackgroundEnv(env, **final_background_kwargs) + + # Apply camera distractions. + if difficulty or camera_kwargs: + final_camera_kwargs = dict(camera_id=render_kwargs["camera_id"], fixed=fixed_distraction, seed=camera_seed) + if difficulty: + # Get kwargs for the given difficulty. + scale = suite_utils.DIFFICULTY_SCALE[difficulty] + final_camera_kwargs.update( + suite_utils.get_camera_kwargs(domain_name, scale, dynamic)) + if camera_kwargs: + # Overwrite kwargs with those passed here. + final_camera_kwargs.update(camera_kwargs) + env = camera.DistractingCameraEnv(env, **final_camera_kwargs) + + # Apply color distractions. + if difficulty or color_kwargs: + final_color_kwargs = dict(fixed=fixed_distraction, seed=color_seed) + if difficulty: + # Get kwargs for the given difficulty. + scale = suite_utils.DIFFICULTY_SCALE[difficulty] + final_color_kwargs.update(suite_utils.get_color_kwargs(scale, dynamic)) + if color_kwargs: + # Overwrite kwargs with those passed here. + final_color_kwargs.update(color_kwargs) + env = color.DistractingColorEnv(env, **final_color_kwargs) + + if env_state_wrappers is not None: + for wrapper in env_state_wrappers: + env = wrapper(env) + # Apply Pixel wrapper after distractions. This is needed to ensure the + # changes from the distraction wrapper are applied to the MuJoCo environment + # before the rendering occurs. + env = pixels.Wrapper( + env, + pixels_only=pixels_only, + render_kwargs=render_kwargs, + observation_key=pixels_observation_key) + + return env diff --git a/envs/distracting_control/suite_utils.py b/envs/distracting_control/suite_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a1f40621eb89d3b40c6da9f1c9480fefcdcd59f5 --- /dev/null +++ b/envs/distracting_control/suite_utils.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# Copyright 2021 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A collection of MuJoCo-based Reinforcement Learning environments. + +The suite provides a similar API to the original dm_control suite. +Users can configure the distractions on top of the original tasks. The suite is +targeted for loading environments directly with similar configurations as those +used in the original paper. Each distraction wrapper can be used independently +though. +""" +import numpy as np + +DIFFICULTY_SCALE = dict(easy=0.1, medium=0.2, hard=0.3) +DIFFICULTY_NUM_VIDEOS = dict(easy=4, medium=8, hard=None) +DEFAULT_BACKGROUND_PATH = "$HOME/davis/" + + +def get_color_kwargs(scale, dynamic): + max_delta = scale + step_std = 0.03 * scale if dynamic else 0.0 + return dict(max_delta=max_delta, step_std=step_std) + + +def get_camera_kwargs(domain_name, scale, dynamic): + assert domain_name in ['reacher', 'cartpole', 'finger', 'cheetah', + 'ball_in_cup', 'walker'] + assert scale >= 0.0 + assert scale <= 1.0 + return dict( + vertical_delta=np.pi / 2 * scale, + horizontal_delta=np.pi / 2 * scale, + # Limit camera to -90 / 90 degree rolls. + roll_delta=np.pi / 2. * scale, + vel_std=.1 * scale if dynamic else 0., + max_vel=.4 * scale if dynamic else 0., + roll_std=np.pi / 300 * scale if dynamic else 0., + max_roll_vel=np.pi / 50 * scale if dynamic else 0., + max_zoom_in_percent=.5 * scale, + max_zoom_out_percent=1.5 * scale, + limit_to_upper_quadrant='reacher' not in domain_name, + ) + + +def get_background_kwargs(domain_name, + num_videos, + dynamic, + dataset_path, + dataset_videos=None, + shuffle=False, + video_alpha=1.0): + assert domain_name in ['reacher', 'cartpole', 'finger', 'cheetah', + 'ball_in_cup', 'walker'] + if domain_name == 'reacher': + ground_plane_alpha = 0.0 + elif domain_name in ['walker', 'cheetah']: + ground_plane_alpha = 1.0 + else: + ground_plane_alpha = 0.3 + + return dict( + num_videos=num_videos, + video_alpha=video_alpha, + ground_plane_alpha=ground_plane_alpha, + dynamic=dynamic, + dataset_path=dataset_path, + dataset_videos=dataset_videos, + shuffle_buffer_size=100 if shuffle else None, + ) diff --git a/envs/fb_mtenv_dmc/.DS_Store b/envs/fb_mtenv_dmc/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5bc5cfdd900eb1a3ca697b423ade62e2b51de149 Binary files /dev/null and b/envs/fb_mtenv_dmc/.DS_Store differ diff --git a/envs/fb_mtenv_dmc/LICENSE b/envs/fb_mtenv_dmc/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..87cbf536c6c48a5f7b46e7b47338aa35af36dd78 --- /dev/null +++ b/envs/fb_mtenv_dmc/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Facebook, Inc. and its affiliates. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/README.md b/envs/fb_mtenv_dmc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..135ab4240e6e8252b1ca8d0af151811b8447f90e --- /dev/null +++ b/envs/fb_mtenv_dmc/README.md @@ -0,0 +1,56 @@ +# DeepMind Control Suite. + +This submodule contains the domains and tasks described in the +[DeepMind Control Suite tech report](https://arxiv.org/abs/1801.00690). + +## Quickstart + +```python +from dm_control import suite +import numpy as np + +# Load one task: +env = suite.load(domain_name="cartpole", task_name="swingup") + +# Iterate over a task set: +for domain_name, task_name in suite.BENCHMARKING: + env = suite.load(domain_name, task_name) + +# Step through an episode and print out reward, discount and observation. +action_spec = env.action_spec() +time_step = env.reset() +while not time_step.last(): + action = np.random.uniform(action_spec.minimum, + action_spec.maximum, + size=action_spec.shape) + time_step = env.step(action) + print(time_step.reward, time_step.discount, time_step.observation) +``` + +## Illustration video + +Below is a video montage of solved Control Suite tasks, with reward +visualisation enabled. + +[![Video montage](https://img.youtube.com/vi/rAai4QzcYbs/0.jpg)](https://www.youtube.com/watch?v=rAai4QzcYbs) + + +### Quadruped domain [April 2019] + +Roughly based on the 'ant' model introduced by [Schulman et al. 2015](https://arxiv.org/abs/1506.02438). Main modifications to the body are: + +- 4 DoFs per leg, 1 constraining tendon. +- 3 actuators per leg: 'yaw', 'lift', 'extend'. +- Filtered position actuators with timescale of 100ms. +- Sensors include an IMU, force/torque sensors, and rangefinders. + +Four tasks: + +- `walk` and `run`: self-right the body then move forward at a desired speed. +- `escape`: escape a bowl-shaped random terrain (uses rangefinders). +- `fetch`, go to a moving ball and bring it to a target. + +All behaviors in the video below were trained with [Abdolmaleki et al's +MPO](https://arxiv.org/abs/1806.06920). + +[![Video montage](https://img.youtube.com/vi/RhRLjbb7pBE/0.jpg)](https://www.youtube.com/watch?v=RhRLjbb7pBE) diff --git a/envs/fb_mtenv_dmc/__init__.py b/envs/fb_mtenv_dmc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1e45dc2a347be33693bcc3514c9834009289e42f --- /dev/null +++ b/envs/fb_mtenv_dmc/__init__.py @@ -0,0 +1,167 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""A collection of MuJoCo-based Reinforcement Learning environments.""" + +from __future__ import absolute_import, division, print_function + +import collections +import inspect +import itertools +import sys + +from dm_control.rl import control + +from . import ( + acrobot, + ball_in_cup, + cartpole, + cheetah, + finger, + fish, + hopper, + humanoid, + humanoid_CMU, + lqr, + manipulator, + pendulum, + point_mass, + quadruped, + reacher, + stacker, + swimmer, + walker, +) + +# Find all domains imported. +_DOMAINS = { + name: module + for name, module in locals().items() + if inspect.ismodule(module) and hasattr(module, "SUITE") +} + + +def _get_tasks(tag): + """Returns a sequence of (domain name, task name) pairs for the given tag.""" + result = [] + + for domain_name in sorted(_DOMAINS.keys()): + domain = _DOMAINS[domain_name] + + if tag is None: + tasks_in_domain = domain.SUITE + else: + tasks_in_domain = domain.SUITE.tagged(tag) + + for task_name in tasks_in_domain.keys(): + result.append((domain_name, task_name)) + + return tuple(result) + + +def _get_tasks_by_domain(tasks): + """Returns a dict mapping from task name to a tuple of domain names.""" + result = collections.defaultdict(list) + + for domain_name, task_name in tasks: + result[domain_name].append(task_name) + + return {k: tuple(v) for k, v in result.items()} + + +# A sequence containing all (domain name, task name) pairs. +ALL_TASKS = _get_tasks(tag=None) + +# Subsets of ALL_TASKS, generated via the tag mechanism. +BENCHMARKING = _get_tasks("benchmarking") +EASY = _get_tasks("easy") +HARD = _get_tasks("hard") +EXTRA = tuple(sorted(set(ALL_TASKS) - set(BENCHMARKING))) + +# A mapping from each domain name to a sequence of its task names. +TASKS_BY_DOMAIN = _get_tasks_by_domain(ALL_TASKS) + + +def load( + domain_name, + task_name, + task_kwargs=None, + environment_kwargs=None, + visualize_reward=False, +): + """Returns an environment from a domain name, task name and optional settings. + + ```python + env = suite.load('cartpole', 'balance') + ``` + + Args: + domain_name: A string containing the name of a domain. + task_name: A string containing the name of a task. + task_kwargs: Optional `dict` of keyword arguments for the task. + environment_kwargs: Optional `dict` specifying keyword arguments for the + environment. + visualize_reward: Optional `bool`. If `True`, object colours in rendered + frames are set to indicate the reward at each step. Default `False`. + + Returns: + The requested environment. + """ + return build_environment( + domain_name, task_name, task_kwargs, environment_kwargs, visualize_reward, + ) + + +def build_environment( + domain_name, + task_name, + task_kwargs=None, + environment_kwargs=None, + visualize_reward=False, +): + """Returns an environment from the suite given a domain name and a task name. + + Args: + domain_name: A string containing the name of a domain. + task_name: A string containing the name of a task. + task_kwargs: Optional `dict` specifying keyword arguments for the task. + environment_kwargs: Optional `dict` specifying keyword arguments for the + environment. + visualize_reward: Optional `bool`. If `True`, object colours in rendered + frames are set to indicate the reward at each step. Default `False`. + + Raises: + ValueError: If the domain or task doesn't exist. + + Returns: + An instance of the requested environment. + """ + if domain_name not in _DOMAINS: + raise ValueError("Domain {!r} does not exist.".format(domain_name)) + + domain = _DOMAINS[domain_name] + + if task_name not in domain.SUITE: + raise ValueError( + "Level {!r} does not exist in domain {!r}.".format(task_name, domain_name) + ) + + task_kwargs = task_kwargs or {} + if environment_kwargs is not None: + task_kwargs = task_kwargs.copy() + task_kwargs["environment_kwargs"] = environment_kwargs + env = domain.SUITE[task_name](**task_kwargs) + env.task.visualize_reward = visualize_reward + return env diff --git a/envs/fb_mtenv_dmc/acrobot.xml b/envs/fb_mtenv_dmc/acrobot.xml new file mode 100644 index 0000000000000000000000000000000000000000..79b76d9c6c609b8168b193025e0e2a15279282d7 --- /dev/null +++ b/envs/fb_mtenv_dmc/acrobot.xml @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/ball_in_cup.py b/envs/fb_mtenv_dmc/ball_in_cup.py new file mode 100644 index 0000000000000000000000000000000000000000..daf479c2ec49d384dc41e8d111325d6e730e5fc8 --- /dev/null +++ b/envs/fb_mtenv_dmc/ball_in_cup.py @@ -0,0 +1,104 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Ball-in-Cup Domain.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from dm_control import mujoco +from dm_control.rl import control +from . import base +from . import common +from dm_control.utils import containers + +_DEFAULT_TIME_LIMIT = 20 # (seconds) +_CONTROL_TIMESTEP = 0.02 # (seconds) + + +SUITE = containers.TaggedTasks() + + +def get_model_and_assets(): + """Returns a tuple containing the model XML string and a dict of assets.""" + return common.read_model("ball_in_cup.xml"), common.ASSETS + + +@SUITE.add("benchmarking", "easy") +def catch(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): + """Returns the Ball-in-Cup task.""" + physics = Physics.from_xml_string(*get_model_and_assets()) + task = BallInCup(random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + time_limit=time_limit, + control_timestep=_CONTROL_TIMESTEP, + **environment_kwargs + ) + + +class Physics(mujoco.Physics): + """Physics with additional features for the Ball-in-Cup domain.""" + + def ball_to_target(self): + """Returns the vector from the ball to the target.""" + target = self.named.data.site_xpos["target", ["x", "z"]] + ball = self.named.data.xpos["ball", ["x", "z"]] + return target - ball + + def in_target(self): + """Returns 1 if the ball is in the target, 0 otherwise.""" + ball_to_target = abs(self.ball_to_target()) + target_size = self.named.model.site_size["target", [0, 2]] + ball_size = self.named.model.geom_size["ball", 0] + return float(all(ball_to_target < target_size - ball_size)) + + +class BallInCup(base.Task): + """The Ball-in-Cup task. Put the ball in the cup.""" + + def initialize_episode(self, physics): + """Sets the state of the environment at the start of each episode. + + Args: + physics: An instance of `Physics`. + + """ + # Find a collision-free random initial position of the ball. + penetrating = True + while penetrating: + # Assign a random ball position. + physics.named.data.qpos["ball_x"] = self.random.uniform(-0.2, 0.2) + physics.named.data.qpos["ball_z"] = self.random.uniform(0.2, 0.5) + # Check for collisions. + physics.after_reset() + penetrating = physics.data.ncon > 0 + super(BallInCup, self).initialize_episode(physics) + + def get_observation(self, physics): + """Returns an observation of the state.""" + obs = collections.OrderedDict() + obs["position"] = physics.position() + obs["velocity"] = physics.velocity() + return obs + + def get_reward(self, physics): + """Returns a sparse reward.""" + return physics.in_target() diff --git a/envs/fb_mtenv_dmc/ball_in_cup.xml b/envs/fb_mtenv_dmc/ball_in_cup.xml new file mode 100644 index 0000000000000000000000000000000000000000..792073f02ce9c71c5d6231f20b5e8c8934959bbb --- /dev/null +++ b/envs/fb_mtenv_dmc/ball_in_cup.xml @@ -0,0 +1,54 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/cartpole.py b/envs/fb_mtenv_dmc/cartpole.py new file mode 100644 index 0000000000000000000000000000000000000000..778bbd7f772bff62baca010fa014645ba0ab3408 --- /dev/null +++ b/envs/fb_mtenv_dmc/cartpole.py @@ -0,0 +1,252 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Cartpole domain.""" + +from __future__ import absolute_import, division, print_function + +import collections + +import numpy as np +from dm_control import mujoco +from dm_control.rl import control +from dm_control.utils import containers, rewards +from . import base, common +from lxml import etree +from six.moves import range + +_DEFAULT_TIME_LIMIT = 10 +SUITE = containers.TaggedTasks() + + +def get_model_and_assets(num_poles=1, xml_file_id=None): + """Returns a tuple containing the model XML string and a dict of assets.""" + return _make_model(num_poles, xml_file_id), common.ASSETS + + +@SUITE.add("benchmarking") +def balance( + time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None, +): + """Returns the Cartpole Balance task.""" + physics = Physics.from_xml_string(*get_model_and_assets()) + task = Balance(swing_up=False, sparse=False, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, task, time_limit=time_limit, **environment_kwargs + ) + + +@SUITE.add("benchmarking") +def balance_sparse( + time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None +): + """Returns the sparse reward variant of the Cartpole Balance task.""" + physics = Physics.from_xml_string(*get_model_and_assets()) + task = Balance(swing_up=False, sparse=True, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, task, time_limit=time_limit, **environment_kwargs + ) + + +@SUITE.add("benchmarking") +def swingup( + time_limit=_DEFAULT_TIME_LIMIT, + xml_file_id=None, + random=None, + environment_kwargs=None, +): + """Returns the Cartpole Swing-Up task.""" + physics = Physics.from_xml_string(*get_model_and_assets(xml_file_id=xml_file_id)) + task = Balance(swing_up=True, sparse=False, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, task, time_limit=time_limit, **environment_kwargs + ) + + +@SUITE.add("benchmarking") +def swingup_sparse( + time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None +): + """Returns the sparse reward variant of teh Cartpole Swing-Up task.""" + physics = Physics.from_xml_string(*get_model_and_assets()) + task = Balance(swing_up=True, sparse=True, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, task, time_limit=time_limit, **environment_kwargs + ) + + +@SUITE.add() +def two_poles(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): + """Returns the Cartpole Balance task with two poles.""" + physics = Physics.from_xml_string(*get_model_and_assets(num_poles=2)) + task = Balance(swing_up=True, sparse=False, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, task, time_limit=time_limit, **environment_kwargs + ) + + +@SUITE.add() +def three_poles( + time_limit=_DEFAULT_TIME_LIMIT, + random=None, + num_poles=3, + sparse=False, + environment_kwargs=None, +): + """Returns the Cartpole Balance task with three or more poles.""" + physics = Physics.from_xml_string(*get_model_and_assets(num_poles=num_poles)) + task = Balance(swing_up=True, sparse=sparse, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, task, time_limit=time_limit, **environment_kwargs + ) + + +def _make_model(n_poles, xml_file_id=None): + """Generates an xml string defining a cart with `n_poles` bodies.""" + if xml_file_id is not None: + filename = f"cartpole_{xml_file_id}.xml" + print(filename) + else: + filename = f"cartpole.xml" + xml_string = common.read_model(filename) + if n_poles == 1: + return xml_string + mjcf = etree.fromstring(xml_string) + parent = mjcf.find("./worldbody/body/body") # Find first pole. + # Make chain of poles. + for pole_index in range(2, n_poles + 1): + child = etree.Element( + "body", name="pole_{}".format(pole_index), pos="0 0 1", childclass="pole" + ) + etree.SubElement(child, "joint", name="hinge_{}".format(pole_index)) + etree.SubElement(child, "geom", name="pole_{}".format(pole_index)) + parent.append(child) + parent = child + # Move plane down. + floor = mjcf.find("./worldbody/geom") + floor.set("pos", "0 0 {}".format(1 - n_poles - 0.05)) + # Move cameras back. + cameras = mjcf.findall("./worldbody/camera") + cameras[0].set("pos", "0 {} 1".format(-1 - 2 * n_poles)) + cameras[1].set("pos", "0 {} 2".format(-2 * n_poles)) + return etree.tostring(mjcf, pretty_print=True) + + +class Physics(mujoco.Physics): + """Physics simulation with additional features for the Cartpole domain.""" + + def cart_position(self): + """Returns the position of the cart.""" + return self.named.data.qpos["slider"][0] + + def angular_vel(self): + """Returns the angular velocity of the pole.""" + return self.data.qvel[1:] + + def pole_angle_cosine(self): + """Returns the cosine of the pole angle.""" + return self.named.data.xmat[2:, "zz"] + + def bounded_position(self): + """Returns the state, with pole angle split into sin/cos.""" + return np.hstack( + (self.cart_position(), self.named.data.xmat[2:, ["zz", "xz"]].ravel()) + ) + + +class Balance(base.Task): + """A Cartpole `Task` to balance the pole. + + State is initialized either close to the target configuration or at a random + configuration. + """ + + _CART_RANGE = (-0.25, 0.25) + _ANGLE_COSINE_RANGE = (0.995, 1) + + def __init__(self, swing_up, sparse, random=None): + """Initializes an instance of `Balance`. + + Args: + swing_up: A `bool`, which if `True` sets the cart to the middle of the + slider and the pole pointing towards the ground. Otherwise, sets the + cart to a random position on the slider and the pole to a random + near-vertical position. + sparse: A `bool`, whether to return a sparse or a smooth reward. + random: Optional, either a `numpy.random.RandomState` instance, an + integer seed for creating a new `RandomState`, or None to select a seed + automatically (default). + """ + self._sparse = sparse + self._swing_up = swing_up + super(Balance, self).__init__(random=random) + + def initialize_episode(self, physics): + """Sets the state of the environment at the start of each episode. + + Initializes the cart and pole according to `swing_up`, and in both cases + adds a small random initial velocity to break symmetry. + + Args: + physics: An instance of `Physics`. + """ + nv = physics.model.nv + if self._swing_up: + physics.named.data.qpos["slider"] = 0.01 * self.random.randn() + physics.named.data.qpos["hinge_1"] = np.pi + 0.01 * self.random.randn() + physics.named.data.qpos[2:] = 0.1 * self.random.randn(nv - 2) + else: + physics.named.data.qpos["slider"] = self.random.uniform(-0.1, 0.1) + physics.named.data.qpos[1:] = self.random.uniform(-0.034, 0.034, nv - 1) + physics.named.data.qvel[:] = 0.01 * self.random.randn(physics.model.nv) + super(Balance, self).initialize_episode(physics) + + def get_observation(self, physics): + """Returns an observation of the (bounded) physics state.""" + obs = collections.OrderedDict() + obs["position"] = physics.bounded_position() + obs["velocity"] = physics.velocity() + return obs + + def _get_reward(self, physics, sparse): + if sparse: + cart_in_bounds = rewards.tolerance( + physics.cart_position(), self._CART_RANGE + ) + angle_in_bounds = rewards.tolerance( + physics.pole_angle_cosine(), self._ANGLE_COSINE_RANGE + ).prod() + return cart_in_bounds * angle_in_bounds + else: + upright = (physics.pole_angle_cosine() + 1) / 2 + centered = rewards.tolerance(physics.cart_position(), margin=2) + centered = (1 + centered) / 2 + small_control = rewards.tolerance( + physics.control(), margin=1, value_at_margin=0, sigmoid="quadratic" + )[0] + small_control = (4 + small_control) / 5 + small_velocity = rewards.tolerance(physics.angular_vel(), margin=5).min() + small_velocity = (1 + small_velocity) / 2 + return upright.mean() * small_control * small_velocity * centered + + def get_reward(self, physics): + """Returns a sparse or a smooth reward, as specified in the constructor.""" + return self._get_reward(physics, sparse=self._sparse) diff --git a/envs/fb_mtenv_dmc/cartpole.xml b/envs/fb_mtenv_dmc/cartpole.xml new file mode 100644 index 0000000000000000000000000000000000000000..e01869dd239a09a3e370f863bb62ffa1ab968757 --- /dev/null +++ b/envs/fb_mtenv_dmc/cartpole.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/cartpole_cart_mass_10.xml b/envs/fb_mtenv_dmc/cartpole_cart_mass_10.xml new file mode 100644 index 0000000000000000000000000000000000000000..1ffa77227719907f0bd0a10dd627fcfa10f8a63d --- /dev/null +++ b/envs/fb_mtenv_dmc/cartpole_cart_mass_10.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/cartpole_cart_mass_8.xml b/envs/fb_mtenv_dmc/cartpole_cart_mass_8.xml new file mode 100644 index 0000000000000000000000000000000000000000..001546a782f28fcee1e5629522a357bed3bfdb9e --- /dev/null +++ b/envs/fb_mtenv_dmc/cartpole_cart_mass_8.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/cartpole_cart_mass_9.xml b/envs/fb_mtenv_dmc/cartpole_cart_mass_9.xml new file mode 100644 index 0000000000000000000000000000000000000000..0e01ff0211df73690a2ff4a17796e6d530d842c4 --- /dev/null +++ b/envs/fb_mtenv_dmc/cartpole_cart_mass_9.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/cartpole_pole_mass_1.xml b/envs/fb_mtenv_dmc/cartpole_pole_mass_1.xml new file mode 100644 index 0000000000000000000000000000000000000000..e01869dd239a09a3e370f863bb62ffa1ab968757 --- /dev/null +++ b/envs/fb_mtenv_dmc/cartpole_pole_mass_1.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/cartpole_pole_mass_2.xml b/envs/fb_mtenv_dmc/cartpole_pole_mass_2.xml new file mode 100644 index 0000000000000000000000000000000000000000..156f090bd430c4105b73d9c8dabe38c2590c1ce8 --- /dev/null +++ b/envs/fb_mtenv_dmc/cartpole_pole_mass_2.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/cartpole_pole_mass_3.xml b/envs/fb_mtenv_dmc/cartpole_pole_mass_3.xml new file mode 100644 index 0000000000000000000000000000000000000000..3f2128c316b211e95f9fe3cba2dd75c8bdb355fc --- /dev/null +++ b/envs/fb_mtenv_dmc/cartpole_pole_mass_3.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/cartpole_pole_mass_4.xml b/envs/fb_mtenv_dmc/cartpole_pole_mass_4.xml new file mode 100644 index 0000000000000000000000000000000000000000..8968b0bf29010da20b1be6c03070732601170e74 --- /dev/null +++ b/envs/fb_mtenv_dmc/cartpole_pole_mass_4.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/cartpole_pole_mass_5.xml b/envs/fb_mtenv_dmc/cartpole_pole_mass_5.xml new file mode 100644 index 0000000000000000000000000000000000000000..d448817f53203ec73c095b65e86852247d9757d6 --- /dev/null +++ b/envs/fb_mtenv_dmc/cartpole_pole_mass_5.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/cartpole_pole_mass_6.xml b/envs/fb_mtenv_dmc/cartpole_pole_mass_6.xml new file mode 100644 index 0000000000000000000000000000000000000000..0b8e8b1900f962dcd67b734575212704ddbcd496 --- /dev/null +++ b/envs/fb_mtenv_dmc/cartpole_pole_mass_6.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/cartpole_pole_mass_7.xml b/envs/fb_mtenv_dmc/cartpole_pole_mass_7.xml new file mode 100644 index 0000000000000000000000000000000000000000..cfb0f852b898528904953c0b5a76c6b2cb6a4ca0 --- /dev/null +++ b/envs/fb_mtenv_dmc/cartpole_pole_mass_7.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/cartpole_pole_mass_8.xml b/envs/fb_mtenv_dmc/cartpole_pole_mass_8.xml new file mode 100644 index 0000000000000000000000000000000000000000..c92cb62d0c38b152b7529364261ab5080e7eea38 --- /dev/null +++ b/envs/fb_mtenv_dmc/cartpole_pole_mass_8.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/cartpole_pole_mass_9.xml b/envs/fb_mtenv_dmc/cartpole_pole_mass_9.xml new file mode 100644 index 0000000000000000000000000000000000000000..86b4414ed3e69595f873ee0520eddce255d4c353 --- /dev/null +++ b/envs/fb_mtenv_dmc/cartpole_pole_mass_9.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/cheetah.py b/envs/fb_mtenv_dmc/cheetah.py new file mode 100644 index 0000000000000000000000000000000000000000..78bd562b4c6f8fbd6c7c41cdfa761dabf771417a --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah.py @@ -0,0 +1,105 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Cheetah Domain.""" + +from __future__ import absolute_import, division, print_function + +import collections + +from dm_control import mujoco +from dm_control.rl import control +from dm_control.utils import containers, rewards +from . import base, common + +# How long the simulation will run, in seconds. +_DEFAULT_TIME_LIMIT = 10 + +# Running speed above which reward is 1. +_RUN_SPEED = 10 + +SUITE = containers.TaggedTasks() + + +def get_model_and_assets(xml_file_id): + """Returns a tuple containing the model XML string and a dict of assets.""" + if xml_file_id is not None: + filename = f"cheetah_{xml_file_id}.xml" + print(filename) + else: + filename = f"cheetah.xml" + return common.read_model(filename), common.ASSETS + + +@SUITE.add("benchmarking") +def run( + time_limit=_DEFAULT_TIME_LIMIT, + xml_file_id=None, + random=None, + environment_kwargs=None, +): + """Returns the run task.""" + physics = Physics.from_xml_string(*get_model_and_assets(xml_file_id)) + task = Cheetah(random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, task, time_limit=time_limit, **environment_kwargs + ) + + +class Physics(mujoco.Physics): + """Physics simulation with additional features for the Cheetah domain.""" + + def speed(self): + """Returns the horizontal speed of the Cheetah.""" + return self.named.data.sensordata["torso_subtreelinvel"][0] + + +class Cheetah(base.Task): + """A `Task` to train a running Cheetah.""" + + def initialize_episode(self, physics): + """Sets the state of the environment at the start of each episode.""" + # The indexing below assumes that all joints have a single DOF. + assert physics.model.nq == physics.model.njnt + is_limited = physics.model.jnt_limited == 1 + lower, upper = physics.model.jnt_range[is_limited].T + physics.data.qpos[is_limited] = self.random.uniform(lower, upper) + + # Stabilize the model before the actual simulation. + for _ in range(200): + physics.step() + + physics.data.time = 0 + self._timeout_progress = 0 + super(Cheetah, self).initialize_episode(physics) + + def get_observation(self, physics): + """Returns an observation of the state, ignoring horizontal position.""" + obs = collections.OrderedDict() + # Ignores horizontal position to maintain translational invariance. + obs["position"] = physics.data.qpos[1:].copy() + obs["velocity"] = physics.velocity() + return obs + + def get_reward(self, physics): + """Returns a reward to the agent.""" + return rewards.tolerance( + physics.speed(), + bounds=(_RUN_SPEED, float("inf")), + margin=_RUN_SPEED, + value_at_margin=0, + sigmoid="linear", + ) diff --git a/envs/fb_mtenv_dmc/cheetah.xml b/envs/fb_mtenv_dmc/cheetah.xml new file mode 100644 index 0000000000000000000000000000000000000000..26076afa01d98544746cb02cfce1ff6132a63952 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_bfoot_len_1.xml b/envs/fb_mtenv_dmc/cheetah_bfoot_len_1.xml new file mode 100644 index 0000000000000000000000000000000000000000..26076afa01d98544746cb02cfce1ff6132a63952 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_bfoot_len_1.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_bfoot_len_2.xml b/envs/fb_mtenv_dmc/cheetah_bfoot_len_2.xml new file mode 100644 index 0000000000000000000000000000000000000000..f67ea670033779d4b7d1f2ac66cad275a2889ccf --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_bfoot_len_2.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_bfoot_len_3.xml b/envs/fb_mtenv_dmc/cheetah_bfoot_len_3.xml new file mode 100644 index 0000000000000000000000000000000000000000..5842e28ad7510e2fc2c8aabf661706aea30a2b5f --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_bfoot_len_3.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_bfoot_len_4.xml b/envs/fb_mtenv_dmc/cheetah_bfoot_len_4.xml new file mode 100644 index 0000000000000000000000000000000000000000..bbb280aee77876bac24a3fc54b394a703dd9321d --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_bfoot_len_4.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_bfoot_len_5.xml b/envs/fb_mtenv_dmc/cheetah_bfoot_len_5.xml new file mode 100644 index 0000000000000000000000000000000000000000..89c2bfa7a5254ce41f73c405092b7a1ebad9a49d --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_bfoot_len_5.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_bfoot_len_6.xml b/envs/fb_mtenv_dmc/cheetah_bfoot_len_6.xml new file mode 100644 index 0000000000000000000000000000000000000000..c3b66832f1067c85b7bd8d9cef9b3d352ceabdaf --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_bfoot_len_6.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_bfoot_len_7.xml b/envs/fb_mtenv_dmc/cheetah_bfoot_len_7.xml new file mode 100644 index 0000000000000000000000000000000000000000..cc2c0b028b4100af8b8804d94ee81925385fedb8 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_bfoot_len_7.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_bfoot_len_8.xml b/envs/fb_mtenv_dmc/cheetah_bfoot_len_8.xml new file mode 100644 index 0000000000000000000000000000000000000000..02d684b161327c7120c5fbfdedb79b4fbf264b08 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_bfoot_len_8.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_bfoot_len_9.xml b/envs/fb_mtenv_dmc/cheetah_bfoot_len_9.xml new file mode 100644 index 0000000000000000000000000000000000000000..2470117d97f9f0c757f12cf9d8d71efc89d088a4 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_bfoot_len_9.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_foot_pos_1.xml b/envs/fb_mtenv_dmc/cheetah_foot_pos_1.xml new file mode 100644 index 0000000000000000000000000000000000000000..26076afa01d98544746cb02cfce1ff6132a63952 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_foot_pos_1.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_foot_pos_10.xml b/envs/fb_mtenv_dmc/cheetah_foot_pos_10.xml new file mode 100644 index 0000000000000000000000000000000000000000..30dc05728f166f1fd22fdf75bb5235a8a6781193 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_foot_pos_10.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_foot_pos_2.xml b/envs/fb_mtenv_dmc/cheetah_foot_pos_2.xml new file mode 100644 index 0000000000000000000000000000000000000000..0a52c9e2f231984c38e6fbd0ea0e2aca7bcaa477 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_foot_pos_2.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_foot_pos_3.xml b/envs/fb_mtenv_dmc/cheetah_foot_pos_3.xml new file mode 100644 index 0000000000000000000000000000000000000000..42145bb1827f388c9420e06085fd49f50177d50a --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_foot_pos_3.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_foot_pos_4.xml b/envs/fb_mtenv_dmc/cheetah_foot_pos_4.xml new file mode 100644 index 0000000000000000000000000000000000000000..907d91a33021c415610ee26aaf9dea0caf3f93c6 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_foot_pos_4.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_foot_pos_5.xml b/envs/fb_mtenv_dmc/cheetah_foot_pos_5.xml new file mode 100644 index 0000000000000000000000000000000000000000..25ca76dba6f609556392ec0ef4a77b53460c8e02 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_foot_pos_5.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_foot_pos_6.xml b/envs/fb_mtenv_dmc/cheetah_foot_pos_6.xml new file mode 100644 index 0000000000000000000000000000000000000000..fabf5a103e518559c003065decbf20468888a092 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_foot_pos_6.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_foot_pos_7.xml b/envs/fb_mtenv_dmc/cheetah_foot_pos_7.xml new file mode 100644 index 0000000000000000000000000000000000000000..85e27fcbe4f6a5ec50698f33e10acd8017ce542e --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_foot_pos_7.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_foot_size_1.xml b/envs/fb_mtenv_dmc/cheetah_foot_size_1.xml new file mode 100644 index 0000000000000000000000000000000000000000..26076afa01d98544746cb02cfce1ff6132a63952 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_foot_size_1.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_foot_size_10.xml b/envs/fb_mtenv_dmc/cheetah_foot_size_10.xml new file mode 100644 index 0000000000000000000000000000000000000000..5eb0309f063b4a18d639fd249fa49622dea44863 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_foot_size_10.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_foot_size_2.xml b/envs/fb_mtenv_dmc/cheetah_foot_size_2.xml new file mode 100644 index 0000000000000000000000000000000000000000..32b6c30879be8965a54de69aa37a0564f45504e9 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_foot_size_2.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_foot_size_3.xml b/envs/fb_mtenv_dmc/cheetah_foot_size_3.xml new file mode 100644 index 0000000000000000000000000000000000000000..debeae036fdb41f32c8277f7b921b8b85ca2aa6e --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_foot_size_3.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_foot_size_4.xml b/envs/fb_mtenv_dmc/cheetah_foot_size_4.xml new file mode 100644 index 0000000000000000000000000000000000000000..0d0571a03eb1feb7dd5eb218bdea87609756dd03 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_foot_size_4.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_foot_size_5.xml b/envs/fb_mtenv_dmc/cheetah_foot_size_5.xml new file mode 100644 index 0000000000000000000000000000000000000000..8e90ad48c4a0ab636270ec03a767bcaf7861b9ea --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_foot_size_5.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_foot_size_6.xml b/envs/fb_mtenv_dmc/cheetah_foot_size_6.xml new file mode 100644 index 0000000000000000000000000000000000000000..8cf2c4881f49e38c9dbfee5e318ddc3f2764797b --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_foot_size_6.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_foot_size_7.xml b/envs/fb_mtenv_dmc/cheetah_foot_size_7.xml new file mode 100644 index 0000000000000000000000000000000000000000..e408b004830cd3802a119af3407fdc4675ab0165 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_foot_size_7.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_foot_size_8.xml b/envs/fb_mtenv_dmc/cheetah_foot_size_8.xml new file mode 100644 index 0000000000000000000000000000000000000000..d419233cceb039789ae3c449be302663522372da --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_foot_size_8.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_foot_size_9.xml b/envs/fb_mtenv_dmc/cheetah_foot_size_9.xml new file mode 100644 index 0000000000000000000000000000000000000000..d383a94276ca4ab6ea574a1fc28859bc8be21f5a --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_foot_size_9.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_torso_length_1.xml b/envs/fb_mtenv_dmc/cheetah_torso_length_1.xml new file mode 100644 index 0000000000000000000000000000000000000000..26076afa01d98544746cb02cfce1ff6132a63952 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_torso_length_1.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_torso_length_10.xml b/envs/fb_mtenv_dmc/cheetah_torso_length_10.xml new file mode 100644 index 0000000000000000000000000000000000000000..19cd5e9030b0dfa22711caa199cf75c3534dd4f6 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_torso_length_10.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_torso_length_2.xml b/envs/fb_mtenv_dmc/cheetah_torso_length_2.xml new file mode 100644 index 0000000000000000000000000000000000000000..524aeb85023a513985e4d3c675eb4ef828761f1f --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_torso_length_2.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_torso_length_3.xml b/envs/fb_mtenv_dmc/cheetah_torso_length_3.xml new file mode 100644 index 0000000000000000000000000000000000000000..0030d46197101d34bf1408de416c284ff617d68e --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_torso_length_3.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_torso_length_4.xml b/envs/fb_mtenv_dmc/cheetah_torso_length_4.xml new file mode 100644 index 0000000000000000000000000000000000000000..d1acb5e13387358537400abf505fbb5b2fd5829c --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_torso_length_4.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_torso_length_5.xml b/envs/fb_mtenv_dmc/cheetah_torso_length_5.xml new file mode 100644 index 0000000000000000000000000000000000000000..b9a569c915c1ffaaccf99cef33b2622709a1b281 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_torso_length_5.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_torso_length_6.xml b/envs/fb_mtenv_dmc/cheetah_torso_length_6.xml new file mode 100644 index 0000000000000000000000000000000000000000..ad1c29a1a9926dd865a454d70967a815597de2df --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_torso_length_6.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_torso_length_7.xml b/envs/fb_mtenv_dmc/cheetah_torso_length_7.xml new file mode 100644 index 0000000000000000000000000000000000000000..c43cbb633aa8dde5abf01f46422b27fe9cf8d838 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_torso_length_7.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_torso_length_8.xml b/envs/fb_mtenv_dmc/cheetah_torso_length_8.xml new file mode 100644 index 0000000000000000000000000000000000000000..166ec46d6ba32b9a38229eaac20a766302859d4b --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_torso_length_8.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/cheetah_torso_length_9.xml b/envs/fb_mtenv_dmc/cheetah_torso_length_9.xml new file mode 100644 index 0000000000000000000000000000000000000000..b8149fed644615a156916cf340f0eb03319e1751 --- /dev/null +++ b/envs/fb_mtenv_dmc/cheetah_torso_length_9.xml @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/envs/fb_mtenv_dmc/common/__init__.py b/envs/fb_mtenv_dmc/common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a99752426149664e28eb19d721dc11bfbbe506dd --- /dev/null +++ b/envs/fb_mtenv_dmc/common/__init__.py @@ -0,0 +1,41 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Functions to manage the common assets for domains.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +from dm_control.utils import io as resources + +_SUITE_DIR = os.path.dirname(os.path.dirname(__file__)) +_FILENAMES = [ + "./common/materials.xml", + "./common/materials_white_floor.xml", + "./common/skybox.xml", + "./common/visual.xml", +] + +ASSETS = { + filename: resources.GetResource(os.path.join(_SUITE_DIR, filename)) + for filename in _FILENAMES +} + + +def read_model(model_filename): + """Reads a model XML file and returns its contents as a string.""" + return resources.GetResource(os.path.join(_SUITE_DIR, model_filename)) diff --git a/envs/fb_mtenv_dmc/common/materials.xml b/envs/fb_mtenv_dmc/common/materials.xml new file mode 100644 index 0000000000000000000000000000000000000000..5a3b169ecd12dfb9d7b8c93092114272ebb061ea --- /dev/null +++ b/envs/fb_mtenv_dmc/common/materials.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/common/materials_white_floor.xml b/envs/fb_mtenv_dmc/common/materials_white_floor.xml new file mode 100644 index 0000000000000000000000000000000000000000..a1e35c2f01e6fda5921ef0902e6555661e9433be --- /dev/null +++ b/envs/fb_mtenv_dmc/common/materials_white_floor.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/common/skybox.xml b/envs/fb_mtenv_dmc/common/skybox.xml new file mode 100644 index 0000000000000000000000000000000000000000..b8886923e3a1d7abeb4dbe9e3a8a791d7cc5c520 --- /dev/null +++ b/envs/fb_mtenv_dmc/common/skybox.xml @@ -0,0 +1,6 @@ + + + + + diff --git a/envs/fb_mtenv_dmc/common/visual.xml b/envs/fb_mtenv_dmc/common/visual.xml new file mode 100644 index 0000000000000000000000000000000000000000..ede15ad497c1721e2978af6c1478ab5aef6a8705 --- /dev/null +++ b/envs/fb_mtenv_dmc/common/visual.xml @@ -0,0 +1,7 @@ + + + + + + + diff --git a/envs/fb_mtenv_dmc/explore.py b/envs/fb_mtenv_dmc/explore.py new file mode 100644 index 0000000000000000000000000000000000000000..c552e97db5820a6ddd9b62799a3f9946232d0ba1 --- /dev/null +++ b/envs/fb_mtenv_dmc/explore.py @@ -0,0 +1,95 @@ +# Copyright 2018 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Control suite environments explorer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import app +from absl import flags +from dm_control import suite +from dm_control.suite.wrappers import action_noise +from six.moves import input + +from dm_control import viewer + + +_ALL_NAMES = [".".join(domain_task) for domain_task in suite.ALL_TASKS] + +flags.DEFINE_enum( + "environment_name", + None, + _ALL_NAMES, + "Optional 'domain_name.task_name' pair specifying the " + "environment to load. If unspecified a prompt will appear to " + "select one.", +) +flags.DEFINE_bool("timeout", True, "Whether episodes should have a time limit.") +flags.DEFINE_bool( + "visualize_reward", + True, + "Whether to vary the colors of geoms according to the " "current reward value.", +) +flags.DEFINE_float( + "action_noise", + 0.0, + "Standard deviation of Gaussian noise to apply to actions, " + "expressed as a fraction of the max-min range for each " + "action dimension. Defaults to 0, i.e. no noise.", +) +FLAGS = flags.FLAGS + + +def prompt_environment_name(prompt, values): + environment_name = None + while not environment_name: + environment_name = input(prompt) + if not environment_name or values.index(environment_name) < 0: + print('"%s" is not a valid environment name.' % environment_name) + environment_name = None + return environment_name + + +def main(argv): + del argv + environment_name = FLAGS.environment_name + if environment_name is None: + print("\n ".join(["Available environments:"] + _ALL_NAMES)) + environment_name = prompt_environment_name( + "Please select an environment name: ", _ALL_NAMES + ) + + index = _ALL_NAMES.index(environment_name) + domain_name, task_name = suite.ALL_TASKS[index] + + task_kwargs = {} + if not FLAGS.timeout: + task_kwargs["time_limit"] = float("inf") + + def loader(): + env = suite.load( + domain_name=domain_name, task_name=task_name, task_kwargs=task_kwargs + ) + env.task.visualize_reward = FLAGS.visualize_reward + if FLAGS.action_noise > 0: + env = action_noise.Wrapper(env, scale=FLAGS.action_noise) + return env + + viewer.launch(loader) + + +if __name__ == "__main__": + app.run(main) diff --git a/envs/fb_mtenv_dmc/finger.xml b/envs/fb_mtenv_dmc/finger.xml new file mode 100644 index 0000000000000000000000000000000000000000..37a39146a34cd937fbb4abf7a85dbde0cc4240b8 --- /dev/null +++ b/envs/fb_mtenv_dmc/finger.xml @@ -0,0 +1,71 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/finger_size_1.xml b/envs/fb_mtenv_dmc/finger_size_1.xml new file mode 100644 index 0000000000000000000000000000000000000000..37a39146a34cd937fbb4abf7a85dbde0cc4240b8 --- /dev/null +++ b/envs/fb_mtenv_dmc/finger_size_1.xml @@ -0,0 +1,71 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/finger_size_10.xml b/envs/fb_mtenv_dmc/finger_size_10.xml new file mode 100644 index 0000000000000000000000000000000000000000..c1490eaf8bfda433a1630be9453b8670de6816b0 --- /dev/null +++ b/envs/fb_mtenv_dmc/finger_size_10.xml @@ -0,0 +1,71 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/finger_size_2.xml b/envs/fb_mtenv_dmc/finger_size_2.xml new file mode 100644 index 0000000000000000000000000000000000000000..b488b6bfcb628dabbfed5ca22a086a46e9c93d2c --- /dev/null +++ b/envs/fb_mtenv_dmc/finger_size_2.xml @@ -0,0 +1,71 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/finger_size_3.xml b/envs/fb_mtenv_dmc/finger_size_3.xml new file mode 100644 index 0000000000000000000000000000000000000000..cad2772095568e9d9103f4b51bfc759342ed2002 --- /dev/null +++ b/envs/fb_mtenv_dmc/finger_size_3.xml @@ -0,0 +1,71 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/finger_size_4.xml b/envs/fb_mtenv_dmc/finger_size_4.xml new file mode 100644 index 0000000000000000000000000000000000000000..fa73efe97fa6ca07a92c89ded81cb315af990f54 --- /dev/null +++ b/envs/fb_mtenv_dmc/finger_size_4.xml @@ -0,0 +1,71 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/finger_size_5.xml b/envs/fb_mtenv_dmc/finger_size_5.xml new file mode 100644 index 0000000000000000000000000000000000000000..447fe137059f26a252b74b4ab61c3c77bc508bab --- /dev/null +++ b/envs/fb_mtenv_dmc/finger_size_5.xml @@ -0,0 +1,71 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/finger_size_6.xml b/envs/fb_mtenv_dmc/finger_size_6.xml new file mode 100644 index 0000000000000000000000000000000000000000..12e466fae83de4819e0d3e6ed4f42dc3ac076417 --- /dev/null +++ b/envs/fb_mtenv_dmc/finger_size_6.xml @@ -0,0 +1,71 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/finger_size_7.xml b/envs/fb_mtenv_dmc/finger_size_7.xml new file mode 100644 index 0000000000000000000000000000000000000000..710f8c613b5e71ae6b763f72671313d1bc8944ba --- /dev/null +++ b/envs/fb_mtenv_dmc/finger_size_7.xml @@ -0,0 +1,71 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/finger_size_8.xml b/envs/fb_mtenv_dmc/finger_size_8.xml new file mode 100644 index 0000000000000000000000000000000000000000..5407ae522f4ee2ac89d78ccd07ae9c2a27b0235e --- /dev/null +++ b/envs/fb_mtenv_dmc/finger_size_8.xml @@ -0,0 +1,71 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/finger_size_9.xml b/envs/fb_mtenv_dmc/finger_size_9.xml new file mode 100644 index 0000000000000000000000000000000000000000..197d95f97d06b4ecdd8d19204ee81f760b2420e4 --- /dev/null +++ b/envs/fb_mtenv_dmc/finger_size_9.xml @@ -0,0 +1,71 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/hopper.py b/envs/fb_mtenv_dmc/hopper.py new file mode 100644 index 0000000000000000000000000000000000000000..fe253ac364d772509e4d399753d88138d689dcda --- /dev/null +++ b/envs/fb_mtenv_dmc/hopper.py @@ -0,0 +1,147 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Hopper domain.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from dm_control import mujoco +from dm_control.rl import control +from . import base +from . import common +from dm_control.suite.utils import randomizers +from dm_control.utils import containers +from dm_control.utils import rewards +import numpy as np + + +SUITE = containers.TaggedTasks() + +_CONTROL_TIMESTEP = 0.02 # (Seconds) + +# Default duration of an episode, in seconds. +_DEFAULT_TIME_LIMIT = 20 + +# Minimal height of torso over foot above which stand reward is 1. +_STAND_HEIGHT = 0.6 + +# Hopping speed above which hop reward is 1. +_HOP_SPEED = 2 + + +def get_model_and_assets(): + """Returns a tuple containing the model XML string and a dict of assets.""" + return common.read_model("hopper.xml"), common.ASSETS + + +@SUITE.add("benchmarking") +def stand(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): + """Returns a Hopper that strives to stand upright, balancing its pose.""" + physics = Physics.from_xml_string(*get_model_and_assets()) + task = Hopper(hopping=False, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + time_limit=time_limit, + control_timestep=_CONTROL_TIMESTEP, + **environment_kwargs + ) + + +@SUITE.add("benchmarking") +def hop(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): + """Returns a Hopper that strives to hop forward.""" + physics = Physics.from_xml_string(*get_model_and_assets()) + task = Hopper(hopping=True, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + time_limit=time_limit, + control_timestep=_CONTROL_TIMESTEP, + **environment_kwargs + ) + + +class Physics(mujoco.Physics): + """Physics simulation with additional features for the Hopper domain.""" + + def height(self): + """Returns height of torso with respect to foot.""" + return self.named.data.xipos["torso", "z"] - self.named.data.xipos["foot", "z"] + + def speed(self): + """Returns horizontal speed of the Hopper.""" + return self.named.data.sensordata["torso_subtreelinvel"][0] + + def touch(self): + """Returns the signals from two foot touch sensors.""" + return np.log1p(self.named.data.sensordata[["touch_toe", "touch_heel"]]) + + +class Hopper(base.Task): + """A Hopper's `Task` to train a standing and a jumping Hopper.""" + + def __init__(self, hopping, random=None): + """Initialize an instance of `Hopper`. + + Args: + hopping: Boolean, if True the task is to hop forwards, otherwise it is to + balance upright. + random: Optional, either a `numpy.random.RandomState` instance, an + integer seed for creating a new `RandomState`, or None to select a seed + automatically (default). + """ + self._hopping = hopping + super(Hopper, self).__init__(random=random) + + def initialize_episode(self, physics): + """Sets the state of the environment at the start of each episode.""" + randomizers.randomize_limited_and_rotational_joints(physics, self.random) + self._timeout_progress = 0 + super(Hopper, self).initialize_episode(physics) + + def get_observation(self, physics): + """Returns an observation of positions, velocities and touch sensors.""" + obs = collections.OrderedDict() + # Ignores horizontal position to maintain translational invariance: + obs["position"] = physics.data.qpos[1:].copy() + obs["velocity"] = physics.velocity() + obs["touch"] = physics.touch() + return obs + + def get_reward(self, physics): + """Returns a reward applicable to the performed task.""" + standing = rewards.tolerance(physics.height(), (_STAND_HEIGHT, 2)) + if self._hopping: + hopping = rewards.tolerance( + physics.speed(), + bounds=(_HOP_SPEED, float("inf")), + margin=_HOP_SPEED / 2, + value_at_margin=0.5, + sigmoid="linear", + ) + return standing * hopping + else: + small_control = rewards.tolerance( + physics.control(), margin=1, value_at_margin=0, sigmoid="quadratic" + ).mean() + small_control = (small_control + 4) / 5 + return standing * small_control diff --git a/envs/fb_mtenv_dmc/hopper.xml b/envs/fb_mtenv_dmc/hopper.xml new file mode 100644 index 0000000000000000000000000000000000000000..0c8ec280a047346994ab739e706adb863f05350e --- /dev/null +++ b/envs/fb_mtenv_dmc/hopper.xml @@ -0,0 +1,66 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/humanoid.py b/envs/fb_mtenv_dmc/humanoid.py new file mode 100644 index 0000000000000000000000000000000000000000..75101c778dafbc2a73999f6dbc8a7d788146f8dd --- /dev/null +++ b/envs/fb_mtenv_dmc/humanoid.py @@ -0,0 +1,237 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Humanoid Domain.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from dm_control import mujoco +from dm_control.rl import control +from . import base +from . import common +from dm_control.suite.utils import randomizers +from dm_control.utils import containers +from dm_control.utils import rewards +import numpy as np + +_DEFAULT_TIME_LIMIT = 25 +_CONTROL_TIMESTEP = 0.025 + +# Height of head above which stand reward is 1. +_STAND_HEIGHT = 1.4 + +# Horizontal speeds above which move reward is 1. +_WALK_SPEED = 1 +_RUN_SPEED = 10 + + +SUITE = containers.TaggedTasks() + + +def get_model_and_assets(): + """Returns a tuple containing the model XML string and a dict of assets.""" + return common.read_model("humanoid.xml"), common.ASSETS + + +@SUITE.add("benchmarking") +def stand(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): + """Returns the Stand task.""" + physics = Physics.from_xml_string(*get_model_and_assets()) + task = Humanoid(move_speed=0, pure_state=False, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + time_limit=time_limit, + control_timestep=_CONTROL_TIMESTEP, + **environment_kwargs + ) + + +@SUITE.add("benchmarking") +def walk(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): + """Returns the Walk task.""" + physics = Physics.from_xml_string(*get_model_and_assets()) + task = Humanoid(move_speed=_WALK_SPEED, pure_state=False, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + time_limit=time_limit, + control_timestep=_CONTROL_TIMESTEP, + **environment_kwargs + ) + + +@SUITE.add("benchmarking") +def run(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): + """Returns the Run task.""" + physics = Physics.from_xml_string(*get_model_and_assets()) + task = Humanoid(move_speed=_RUN_SPEED, pure_state=False, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + time_limit=time_limit, + control_timestep=_CONTROL_TIMESTEP, + **environment_kwargs + ) + + +@SUITE.add() +def run_pure_state( + time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None +): + """Returns the Run task.""" + physics = Physics.from_xml_string(*get_model_and_assets()) + task = Humanoid(move_speed=_RUN_SPEED, pure_state=True, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + time_limit=time_limit, + control_timestep=_CONTROL_TIMESTEP, + **environment_kwargs + ) + + +class Physics(mujoco.Physics): + """Physics simulation with additional features for the Walker domain.""" + + def torso_upright(self): + """Returns projection from z-axes of torso to the z-axes of world.""" + return self.named.data.xmat["torso", "zz"] + + def head_height(self): + """Returns the height of the torso.""" + return self.named.data.xpos["head", "z"] + + def center_of_mass_position(self): + """Returns position of the center-of-mass.""" + return self.named.data.subtree_com["torso"].copy() + + def center_of_mass_velocity(self): + """Returns the velocity of the center-of-mass.""" + return self.named.data.sensordata["torso_subtreelinvel"].copy() + + def torso_vertical_orientation(self): + """Returns the z-projection of the torso orientation matrix.""" + return self.named.data.xmat["torso", ["zx", "zy", "zz"]] + + def joint_angles(self): + """Returns the state without global orientation or position.""" + return self.data.qpos[7:].copy() # Skip the 7 DoFs of the free root joint. + + def extremities(self): + """Returns end effector positions in egocentric frame.""" + torso_frame = self.named.data.xmat["torso"].reshape(3, 3) + torso_pos = self.named.data.xpos["torso"] + positions = [] + for side in ("left_", "right_"): + for limb in ("hand", "foot"): + torso_to_limb = self.named.data.xpos[side + limb] - torso_pos + positions.append(torso_to_limb.dot(torso_frame)) + return np.hstack(positions) + + +class Humanoid(base.Task): + """A humanoid task.""" + + def __init__(self, move_speed, pure_state, random=None): + """Initializes an instance of `Humanoid`. + + Args: + move_speed: A float. If this value is zero, reward is given simply for + standing up. Otherwise this specifies a target horizontal velocity for + the walking task. + pure_state: A bool. Whether the observations consist of the pure MuJoCo + state or includes some useful features thereof. + random: Optional, either a `numpy.random.RandomState` instance, an + integer seed for creating a new `RandomState`, or None to select a seed + automatically (default). + """ + self._move_speed = move_speed + self._pure_state = pure_state + super(Humanoid, self).__init__(random=random) + + def initialize_episode(self, physics): + """Sets the state of the environment at the start of each episode. + + Args: + physics: An instance of `Physics`. + + """ + # Find a collision-free random initial configuration. + penetrating = True + while penetrating: + randomizers.randomize_limited_and_rotational_joints(physics, self.random) + # Check for collisions. + physics.after_reset() + penetrating = physics.data.ncon > 0 + super(Humanoid, self).initialize_episode(physics) + + def get_observation(self, physics): + """Returns either the pure state or a set of egocentric features.""" + obs = collections.OrderedDict() + if self._pure_state: + obs["position"] = physics.position() + obs["velocity"] = physics.velocity() + else: + obs["joint_angles"] = physics.joint_angles() + obs["head_height"] = physics.head_height() + obs["extremities"] = physics.extremities() + obs["torso_vertical"] = physics.torso_vertical_orientation() + obs["com_velocity"] = physics.center_of_mass_velocity() + obs["velocity"] = physics.velocity() + return obs + + def get_reward(self, physics): + """Returns a reward to the agent.""" + standing = rewards.tolerance( + physics.head_height(), + bounds=(_STAND_HEIGHT, float("inf")), + margin=_STAND_HEIGHT / 4, + ) + upright = rewards.tolerance( + physics.torso_upright(), + bounds=(0.9, float("inf")), + sigmoid="linear", + margin=1.9, + value_at_margin=0, + ) + stand_reward = standing * upright + small_control = rewards.tolerance( + physics.control(), margin=1, value_at_margin=0, sigmoid="quadratic" + ).mean() + small_control = (4 + small_control) / 5 + if self._move_speed == 0: + horizontal_velocity = physics.center_of_mass_velocity()[[0, 1]] + dont_move = rewards.tolerance(horizontal_velocity, margin=2).mean() + return small_control * stand_reward * dont_move + else: + com_velocity = np.linalg.norm(physics.center_of_mass_velocity()[[0, 1]]) + move = rewards.tolerance( + com_velocity, + bounds=(self._move_speed, float("inf")), + margin=self._move_speed, + value_at_margin=0, + sigmoid="linear", + ) + move = (5 * move + 1) / 6 + return small_control * stand_reward * move diff --git a/envs/fb_mtenv_dmc/humanoid_CMU.py b/envs/fb_mtenv_dmc/humanoid_CMU.py new file mode 100644 index 0000000000000000000000000000000000000000..d4663bb9a0cbb0bf52a91fd4a4a057eed28ccd7a --- /dev/null +++ b/envs/fb_mtenv_dmc/humanoid_CMU.py @@ -0,0 +1,195 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Humanoid_CMU Domain.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from dm_control import mujoco +from dm_control.rl import control +from . import base +from . import common +from dm_control.suite.utils import randomizers +from dm_control.utils import containers +from dm_control.utils import rewards +import numpy as np + +_DEFAULT_TIME_LIMIT = 20 +_CONTROL_TIMESTEP = 0.02 + +# Height of head above which stand reward is 1. +_STAND_HEIGHT = 1.4 + +# Horizontal speeds above which move reward is 1. +_WALK_SPEED = 1 +_RUN_SPEED = 10 + +SUITE = containers.TaggedTasks() + + +def get_model_and_assets(): + """Returns a tuple containing the model XML string and a dict of assets.""" + return common.read_model("humanoid_CMU.xml"), common.ASSETS + + +@SUITE.add() +def stand(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): + """Returns the Stand task.""" + physics = Physics.from_xml_string(*get_model_and_assets()) + task = HumanoidCMU(move_speed=0, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + time_limit=time_limit, + control_timestep=_CONTROL_TIMESTEP, + **environment_kwargs + ) + + +@SUITE.add() +def run(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): + """Returns the Run task.""" + physics = Physics.from_xml_string(*get_model_and_assets()) + task = HumanoidCMU(move_speed=_RUN_SPEED, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + time_limit=time_limit, + control_timestep=_CONTROL_TIMESTEP, + **environment_kwargs + ) + + +class Physics(mujoco.Physics): + """Physics simulation with additional features for the humanoid_CMU domain.""" + + def thorax_upright(self): + """Returns projection from y-axes of thorax to the z-axes of world.""" + return self.named.data.xmat["thorax", "zy"] + + def head_height(self): + """Returns the height of the head.""" + return self.named.data.xpos["head", "z"] + + def center_of_mass_position(self): + """Returns position of the center-of-mass.""" + return self.named.data.subtree_com["thorax"] + + def center_of_mass_velocity(self): + """Returns the velocity of the center-of-mass.""" + return self.named.data.sensordata["thorax_subtreelinvel"].copy() + + def torso_vertical_orientation(self): + """Returns the z-projection of the thorax orientation matrix.""" + return self.named.data.xmat["thorax", ["zx", "zy", "zz"]] + + def joint_angles(self): + """Returns the state without global orientation or position.""" + return self.data.qpos[7:].copy() # Skip the 7 DoFs of the free root joint. + + def extremities(self): + """Returns end effector positions in egocentric frame.""" + torso_frame = self.named.data.xmat["thorax"].reshape(3, 3) + torso_pos = self.named.data.xpos["thorax"] + positions = [] + for side in ("l", "r"): + for limb in ("hand", "foot"): + torso_to_limb = self.named.data.xpos[side + limb] - torso_pos + positions.append(torso_to_limb.dot(torso_frame)) + return np.hstack(positions) + + +class HumanoidCMU(base.Task): + """A task for the CMU Humanoid.""" + + def __init__(self, move_speed, random=None): + """Initializes an instance of `Humanoid_CMU`. + + Args: + move_speed: A float. If this value is zero, reward is given simply for + standing up. Otherwise this specifies a target horizontal velocity for + the walking task. + random: Optional, either a `numpy.random.RandomState` instance, an + integer seed for creating a new `RandomState`, or None to select a seed + automatically (default). + """ + self._move_speed = move_speed + super(HumanoidCMU, self).__init__(random=random) + + def initialize_episode(self, physics): + """Sets a random collision-free configuration at the start of each episode. + + Args: + physics: An instance of `Physics`. + """ + penetrating = True + while penetrating: + randomizers.randomize_limited_and_rotational_joints(physics, self.random) + # Check for collisions. + physics.after_reset() + penetrating = physics.data.ncon > 0 + super(HumanoidCMU, self).initialize_episode(physics) + + def get_observation(self, physics): + """Returns a set of egocentric features.""" + obs = collections.OrderedDict() + obs["joint_angles"] = physics.joint_angles() + obs["head_height"] = physics.head_height() + obs["extremities"] = physics.extremities() + obs["torso_vertical"] = physics.torso_vertical_orientation() + obs["com_velocity"] = physics.center_of_mass_velocity() + obs["velocity"] = physics.velocity() + return obs + + def get_reward(self, physics): + """Returns a reward to the agent.""" + standing = rewards.tolerance( + physics.head_height(), + bounds=(_STAND_HEIGHT, float("inf")), + margin=_STAND_HEIGHT / 4, + ) + upright = rewards.tolerance( + physics.thorax_upright(), + bounds=(0.9, float("inf")), + sigmoid="linear", + margin=1.9, + value_at_margin=0, + ) + stand_reward = standing * upright + small_control = rewards.tolerance( + physics.control(), margin=1, value_at_margin=0, sigmoid="quadratic" + ).mean() + small_control = (4 + small_control) / 5 + if self._move_speed == 0: + horizontal_velocity = physics.center_of_mass_velocity()[[0, 1]] + dont_move = rewards.tolerance(horizontal_velocity, margin=2).mean() + return small_control * stand_reward * dont_move + else: + com_velocity = np.linalg.norm(physics.center_of_mass_velocity()[[0, 1]]) + move = rewards.tolerance( + com_velocity, + bounds=(self._move_speed, float("inf")), + margin=self._move_speed, + value_at_margin=0, + sigmoid="linear", + ) + move = (5 * move + 1) / 6 + return small_control * stand_reward * move diff --git a/envs/fb_mtenv_dmc/humanoid_CMU.xml b/envs/fb_mtenv_dmc/humanoid_CMU.xml new file mode 100644 index 0000000000000000000000000000000000000000..9a41a166833988e63d805284b1e997c1fb88742b --- /dev/null +++ b/envs/fb_mtenv_dmc/humanoid_CMU.xml @@ -0,0 +1,289 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/lqr.py b/envs/fb_mtenv_dmc/lqr.py new file mode 100644 index 0000000000000000000000000000000000000000..92e065dd5a139772ad957e9de4f29b75064aa9d5 --- /dev/null +++ b/envs/fb_mtenv_dmc/lqr.py @@ -0,0 +1,271 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Procedurally generated LQR domain.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import os + +from dm_control import mujoco +from dm_control.rl import control +from . import base +from . import common +from dm_control.utils import containers +from dm_control.utils import xml_tools +from lxml import etree +import numpy as np +from six.moves import range + +from dm_control.utils import io as resources + +_DEFAULT_TIME_LIMIT = float("inf") +_CONTROL_COST_COEF = 0.1 +SUITE = containers.TaggedTasks() + + +def get_model_and_assets(n_bodies, n_actuators, random): + """Returns the model description as an XML string and a dict of assets. + + Args: + n_bodies: An int, number of bodies of the LQR. + n_actuators: An int, number of actuated bodies of the LQR. `n_actuators` + should be less or equal than `n_bodies`. + random: A `numpy.random.RandomState` instance. + + Returns: + A tuple `(model_xml_string, assets)`, where `assets` is a dict consisting of + `{filename: contents_string}` pairs. + """ + return _make_model(n_bodies, n_actuators, random), common.ASSETS + + +@SUITE.add() +def lqr_2_1(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): + """Returns an LQR environment with 2 bodies of which the first is actuated.""" + return _make_lqr( + n_bodies=2, + n_actuators=1, + control_cost_coef=_CONTROL_COST_COEF, + time_limit=time_limit, + random=random, + environment_kwargs=environment_kwargs, + ) + + +@SUITE.add() +def lqr_6_2(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): + """Returns an LQR environment with 6 bodies of which first 2 are actuated.""" + return _make_lqr( + n_bodies=6, + n_actuators=2, + control_cost_coef=_CONTROL_COST_COEF, + time_limit=time_limit, + random=random, + environment_kwargs=environment_kwargs, + ) + + +def _make_lqr( + n_bodies, n_actuators, control_cost_coef, time_limit, random, environment_kwargs +): + """Returns a LQR environment. + + Args: + n_bodies: An int, number of bodies of the LQR. + n_actuators: An int, number of actuated bodies of the LQR. `n_actuators` + should be less or equal than `n_bodies`. + control_cost_coef: A number, the coefficient of the control cost. + time_limit: An int, maximum time for each episode in seconds. + random: Either an existing `numpy.random.RandomState` instance, an + integer seed for creating a new `RandomState`, or None to select a seed + automatically. + environment_kwargs: A `dict` specifying keyword arguments for the + environment, or None. + + Returns: + A LQR environment with `n_bodies` bodies of which first `n_actuators` are + actuated. + """ + + if not isinstance(random, np.random.RandomState): + random = np.random.RandomState(random) + + model_string, assets = get_model_and_assets(n_bodies, n_actuators, random=random) + physics = Physics.from_xml_string(model_string, assets=assets) + task = LQRLevel(control_cost_coef, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, task, time_limit=time_limit, **environment_kwargs + ) + + +def _make_body(body_id, stiffness_range, damping_range, random): + """Returns an `etree.Element` defining a body. + + Args: + body_id: Id of the created body. + stiffness_range: A tuple of (stiffness_lower_bound, stiffness_uppder_bound). + The stiffness of the joint is drawn uniformly from this range. + damping_range: A tuple of (damping_lower_bound, damping_upper_bound). The + damping of the joint is drawn uniformly from this range. + random: A `numpy.random.RandomState` instance. + + Returns: + A new instance of `etree.Element`. A body element with two children: joint + and geom. + """ + body_name = "body_{}".format(body_id) + joint_name = "joint_{}".format(body_id) + geom_name = "geom_{}".format(body_id) + + body = etree.Element("body", name=body_name) + body.set("pos", ".25 0 0") + joint = etree.SubElement(body, "joint", name=joint_name) + body.append(etree.Element("geom", name=geom_name)) + joint.set("stiffness", str(random.uniform(stiffness_range[0], stiffness_range[1]))) + joint.set("damping", str(random.uniform(damping_range[0], damping_range[1]))) + return body + + +def _make_model( + n_bodies, n_actuators, random, stiffness_range=(15, 25), damping_range=(0, 0) +): + """Returns an MJCF XML string defining a model of springs and dampers. + + Args: + n_bodies: An integer, the number of bodies (DoFs) in the system. + n_actuators: An integer, the number of actuated bodies. + random: A `numpy.random.RandomState` instance. + stiffness_range: A tuple containing minimum and maximum stiffness. Each + joint's stiffness is sampled uniformly from this interval. + damping_range: A tuple containing minimum and maximum damping. Each joint's + damping is sampled uniformly from this interval. + + Returns: + An MJCF string describing the linear system. + + Raises: + ValueError: If the number of bodies or actuators is erronous. + """ + if n_bodies < 1 or n_actuators < 1: + raise ValueError("At least 1 body and 1 actuator required.") + if n_actuators > n_bodies: + raise ValueError("At most 1 actuator per body.") + + file_path = os.path.join(os.path.dirname(__file__), "lqr.xml") + with resources.GetResourceAsFile(file_path) as xml_file: + mjcf = xml_tools.parse(xml_file) + parent = mjcf.find("./worldbody") + actuator = etree.SubElement(mjcf.getroot(), "actuator") + tendon = etree.SubElement(mjcf.getroot(), "tendon") + + for body in range(n_bodies): + # Inserting body. + child = _make_body(body, stiffness_range, damping_range, random) + site_name = "site_{}".format(body) + child.append(etree.Element("site", name=site_name)) + + if body == 0: + child.set("pos", ".25 0 .1") + # Add actuators to the first n_actuators bodies. + if body < n_actuators: + # Adding actuator. + joint_name = "joint_{}".format(body) + motor_name = "motor_{}".format(body) + child.find("joint").set("name", joint_name) + actuator.append(etree.Element("motor", name=motor_name, joint=joint_name)) + + # Add a tendon between consecutive bodies (for visualisation purposes only). + if body < n_bodies - 1: + child_site_name = "site_{}".format(body + 1) + tendon_name = "tendon_{}".format(body) + spatial = etree.SubElement(tendon, "spatial", name=tendon_name) + spatial.append(etree.Element("site", site=site_name)) + spatial.append(etree.Element("site", site=child_site_name)) + parent.append(child) + parent = child + + return etree.tostring(mjcf, pretty_print=True) + + +class Physics(mujoco.Physics): + """Physics simulation with additional features for the LQR domain.""" + + def state_norm(self): + """Returns the norm of the physics state.""" + return np.linalg.norm(self.state()) + + +class LQRLevel(base.Task): + """A Linear Quadratic Regulator `Task`.""" + + _TERMINAL_TOL = 1e-6 + + def __init__(self, control_cost_coef, random=None): + """Initializes an LQR level with cost = sum(states^2) + c*sum(controls^2). + + Args: + control_cost_coef: The coefficient of the control cost. + random: Optional, either a `numpy.random.RandomState` instance, an + integer seed for creating a new `RandomState`, or None to select a seed + automatically (default). + + Raises: + ValueError: If the control cost coefficient is not positive. + """ + if control_cost_coef <= 0: + raise ValueError("control_cost_coef must be positive.") + + self._control_cost_coef = control_cost_coef + super(LQRLevel, self).__init__(random=random) + + @property + def control_cost_coef(self): + return self._control_cost_coef + + def initialize_episode(self, physics): + """Random state sampled from a unit sphere.""" + ndof = physics.model.nq + unit = self.random.randn(ndof) + physics.data.qpos[:] = np.sqrt(2) * unit / np.linalg.norm(unit) + super(LQRLevel, self).initialize_episode(physics) + + def get_observation(self, physics): + """Returns an observation of the state.""" + obs = collections.OrderedDict() + obs["position"] = physics.position() + obs["velocity"] = physics.velocity() + return obs + + def get_reward(self, physics): + """Returns a quadratic state and control reward.""" + position = physics.position() + state_cost = 0.5 * np.dot(position, position) + control_signal = physics.control() + control_l2_norm = 0.5 * np.dot(control_signal, control_signal) + return 1 - (state_cost + control_l2_norm * self._control_cost_coef) + + def get_evaluation(self, physics): + """Returns a sparse evaluation reward that is not used for learning.""" + return float(physics.state_norm() <= 0.01) + + def get_termination(self, physics): + """Terminates when the state norm is smaller than epsilon.""" + if physics.state_norm() < self._TERMINAL_TOL: + return 0.0 diff --git a/envs/fb_mtenv_dmc/lqr.xml b/envs/fb_mtenv_dmc/lqr.xml new file mode 100644 index 0000000000000000000000000000000000000000..d403532a79ba6f629a7cc6278e06370709571084 --- /dev/null +++ b/envs/fb_mtenv_dmc/lqr.xml @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/lqr_solver.py b/envs/fb_mtenv_dmc/lqr_solver.py new file mode 100644 index 0000000000000000000000000000000000000000..9376eca98445074920cafa03f170aaf78f9be5c9 --- /dev/null +++ b/envs/fb_mtenv_dmc/lqr_solver.py @@ -0,0 +1,146 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +r"""Optimal policy for LQR levels. + +LQR control problem is described in +https://en.wikipedia.org/wiki/Linear-quadratic_regulator#Infinite-horizon.2C_discrete-time_LQR +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import logging +from dm_control.mujoco import wrapper +import numpy as np +from six.moves import range + +try: + import scipy.linalg as sp # pylint: disable=g-import-not-at-top +except ImportError: + sp = None + + +def _solve_dare(a, b, q, r): + """Solves the Discrete-time Algebraic Riccati Equation (DARE) by iteration. + + Algebraic Riccati Equation: + ```none + P_{t-1} = Q + A' * P_{t} * A - + A' * P_{t} * B * (R + B' * P_{t} * B)^{-1} * B' * P_{t} * A + ``` + + Args: + a: A 2 dimensional numpy array, transition matrix A. + b: A 2 dimensional numpy array, control matrix B. + q: A 2 dimensional numpy array, symmetric positive definite cost matrix. + r: A 2 dimensional numpy array, symmetric positive definite cost matrix + + Returns: + A numpy array, a real symmetric matrix P which is the solution to DARE. + + Raises: + RuntimeError: If the computed P matrix is not symmetric and + positive-definite. + """ + p = np.eye(len(a)) + for _ in range(1000000): + a_p = a.T.dot(p) # A' * P_t + a_p_b = np.dot(a_p, b) # A' * P_t * B + # Algebraic Riccati Equation. + p_next = ( + q + + np.dot(a_p, a) + - a_p_b.dot(np.linalg.solve(b.T.dot(p.dot(b)) + r, a_p_b.T)) + ) + p_next += p_next.T + p_next *= 0.5 + if np.abs(p - p_next).max() < 1e-12: + break + p = p_next + else: + logging.warning("DARE solver did not converge") + try: + # Check that the result is symmetric and positive-definite. + np.linalg.cholesky(p_next) + except np.linalg.LinAlgError: + raise RuntimeError( + "ARE solver failed: P matrix is not symmetric and " "positive-definite." + ) + return p_next + + +def solve(env): + """Returns the optimal value and policy for LQR problem. + + Args: + env: An instance of `control.EnvironmentV2` with LQR level. + + Returns: + p: A numpy array, the Hessian of the optimal total cost-to-go (value + function at state x) is V(x) = .5 * x' * p * x. + k: A numpy array which gives the optimal linear policy u = k * x. + beta: The maximum eigenvalue of (a + b * k). Under optimal policy, at + timestep n the state tends to 0 like beta^n. + + Raises: + RuntimeError: If the controlled system is unstable. + """ + n = env.physics.model.nq # number of DoFs + m = env.physics.model.nu # number of controls + + # Compute the mass matrix. + mass = np.zeros((n, n)) + wrapper.mjbindings.mjlib.mj_fullM(env.physics.model.ptr, mass, env.physics.data.qM) + + # Compute input matrices a, b, q and r to the DARE solvers. + # State transition matrix a. + stiffness = np.diag(env.physics.model.jnt_stiffness.ravel()) + damping = np.diag(env.physics.model.dof_damping.ravel()) + dt = env.physics.model.opt.timestep + + j = np.linalg.solve(-mass, np.hstack((stiffness, damping))) + a = np.eye(2 * n) + dt * np.vstack( + (dt * j + np.hstack((np.zeros((n, n)), np.eye(n))), j) + ) + + # Control transition matrix b. + b = env.physics.data.actuator_moment.T + bc = np.linalg.solve(mass, b) + b = dt * np.vstack((dt * bc, bc)) + + # State cost Hessian q. + q = np.diag(np.hstack([np.ones(n), np.zeros(n)])) + + # Control cost Hessian r. + r = env.task.control_cost_coef * np.eye(m) + + if sp: + # Use scipy's faster DARE solver if available. + solve_dare = sp.solve_discrete_are + else: + # Otherwise fall back on a slower internal implementation. + solve_dare = _solve_dare + + # Solve the discrete algebraic Riccati equation. + p = solve_dare(a, b, q, r) + k = -np.linalg.solve(b.T.dot(p.dot(b)) + r, b.T.dot(p.dot(a))) + + # Under optimal policy, state tends to 0 like beta^n_timesteps + beta = np.abs(np.linalg.eigvals(a + b.dot(k))).max() + if beta >= 1.0: + raise RuntimeError("Controlled system is unstable.") + return p, k, beta diff --git a/envs/fb_mtenv_dmc/manipulator.py b/envs/fb_mtenv_dmc/manipulator.py new file mode 100644 index 0000000000000000000000000000000000000000..5a9e28ebaf3c65d2c5e251aa2f18ca747e7526f4 --- /dev/null +++ b/envs/fb_mtenv_dmc/manipulator.py @@ -0,0 +1,329 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Planar Manipulator domain.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from dm_control import mujoco +from dm_control.rl import control +from . import base +from . import common +from dm_control.utils import containers +from dm_control.utils import rewards +from dm_control.utils import xml_tools + +from lxml import etree +import numpy as np + +_CLOSE = 0.01 # (Meters) Distance below which a thing is considered close. +_CONTROL_TIMESTEP = 0.01 # (Seconds) +_TIME_LIMIT = 10 # (Seconds) +_P_IN_HAND = 0.1 # Probabillity of object-in-hand initial state +_P_IN_TARGET = 0.1 # Probabillity of object-in-target initial state +_ARM_JOINTS = [ + "arm_root", + "arm_shoulder", + "arm_elbow", + "arm_wrist", + "finger", + "fingertip", + "thumb", + "thumbtip", +] +_ALL_PROPS = frozenset(["ball", "target_ball", "cup", "peg", "target_peg", "slot"]) + +SUITE = containers.TaggedTasks() + + +def make_model(use_peg, insert): + """Returns a tuple containing the model XML string and a dict of assets.""" + xml_string = common.read_model("manipulator.xml") + parser = etree.XMLParser(remove_blank_text=True) + mjcf = etree.XML(xml_string, parser) + + # Select the desired prop. + if use_peg: + required_props = ["peg", "target_peg"] + if insert: + required_props += ["slot"] + else: + required_props = ["ball", "target_ball"] + if insert: + required_props += ["cup"] + + # Remove unused props + for unused_prop in _ALL_PROPS.difference(required_props): + prop = xml_tools.find_element(mjcf, "body", unused_prop) + prop.getparent().remove(prop) + + return etree.tostring(mjcf, pretty_print=True), common.ASSETS + + +@SUITE.add("benchmarking", "hard") +def bring_ball( + fully_observable=True, time_limit=_TIME_LIMIT, random=None, environment_kwargs=None +): + """Returns manipulator bring task with the ball prop.""" + use_peg = False + insert = False + physics = Physics.from_xml_string(*make_model(use_peg, insert)) + task = Bring( + use_peg=use_peg, insert=insert, fully_observable=fully_observable, random=random + ) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + control_timestep=_CONTROL_TIMESTEP, + time_limit=time_limit, + **environment_kwargs + ) + + +@SUITE.add("hard") +def bring_peg( + fully_observable=True, time_limit=_TIME_LIMIT, random=None, environment_kwargs=None +): + """Returns manipulator bring task with the peg prop.""" + use_peg = True + insert = False + physics = Physics.from_xml_string(*make_model(use_peg, insert)) + task = Bring( + use_peg=use_peg, insert=insert, fully_observable=fully_observable, random=random + ) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + control_timestep=_CONTROL_TIMESTEP, + time_limit=time_limit, + **environment_kwargs + ) + + +@SUITE.add("hard") +def insert_ball( + fully_observable=True, time_limit=_TIME_LIMIT, random=None, environment_kwargs=None +): + """Returns manipulator insert task with the ball prop.""" + use_peg = False + insert = True + physics = Physics.from_xml_string(*make_model(use_peg, insert)) + task = Bring( + use_peg=use_peg, insert=insert, fully_observable=fully_observable, random=random + ) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + control_timestep=_CONTROL_TIMESTEP, + time_limit=time_limit, + **environment_kwargs + ) + + +@SUITE.add("hard") +def insert_peg( + fully_observable=True, time_limit=_TIME_LIMIT, random=None, environment_kwargs=None +): + """Returns manipulator insert task with the peg prop.""" + use_peg = True + insert = True + physics = Physics.from_xml_string(*make_model(use_peg, insert)) + task = Bring( + use_peg=use_peg, insert=insert, fully_observable=fully_observable, random=random + ) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + control_timestep=_CONTROL_TIMESTEP, + time_limit=time_limit, + **environment_kwargs + ) + + +class Physics(mujoco.Physics): + """Physics with additional features for the Planar Manipulator domain.""" + + def bounded_joint_pos(self, joint_names): + """Returns joint positions as (sin, cos) values.""" + joint_pos = self.named.data.qpos[joint_names] + return np.vstack([np.sin(joint_pos), np.cos(joint_pos)]).T + + def joint_vel(self, joint_names): + """Returns joint velocities.""" + return self.named.data.qvel[joint_names] + + def body_2d_pose(self, body_names, orientation=True): + """Returns positions and/or orientations of bodies.""" + if not isinstance(body_names, str): + body_names = np.array(body_names).reshape(-1, 1) # Broadcast indices. + pos = self.named.data.xpos[body_names, ["x", "z"]] + if orientation: + ori = self.named.data.xquat[body_names, ["qw", "qy"]] + return np.hstack([pos, ori]) + else: + return pos + + def touch(self): + return np.log1p(self.data.sensordata) + + def site_distance(self, site1, site2): + site1_to_site2 = np.diff(self.named.data.site_xpos[[site2, site1]], axis=0) + return np.linalg.norm(site1_to_site2) + + +class Bring(base.Task): + """A Bring `Task`: bring the prop to the target.""" + + def __init__(self, use_peg, insert, fully_observable, random=None): + """Initialize an instance of the `Bring` task. + + Args: + use_peg: A `bool`, whether to replace the ball prop with the peg prop. + insert: A `bool`, whether to insert the prop in a receptacle. + fully_observable: A `bool`, whether the observation should contain the + position and velocity of the object being manipulated and the target + location. + random: Optional, either a `numpy.random.RandomState` instance, an + integer seed for creating a new `RandomState`, or None to select a seed + automatically (default). + """ + self._use_peg = use_peg + self._target = "target_peg" if use_peg else "target_ball" + self._object = "peg" if self._use_peg else "ball" + self._object_joints = ["_".join([self._object, dim]) for dim in "xzy"] + self._receptacle = "slot" if self._use_peg else "cup" + self._insert = insert + self._fully_observable = fully_observable + super(Bring, self).__init__(random=random) + + def initialize_episode(self, physics): + """Sets the state of the environment at the start of each episode.""" + # Local aliases + choice = self.random.choice + uniform = self.random.uniform + model = physics.named.model + data = physics.named.data + + # Find a collision-free random initial configuration. + penetrating = True + while penetrating: + + # Randomise angles of arm joints. + is_limited = model.jnt_limited[_ARM_JOINTS].astype(np.bool) + joint_range = model.jnt_range[_ARM_JOINTS] + lower_limits = np.where(is_limited, joint_range[:, 0], -np.pi) + upper_limits = np.where(is_limited, joint_range[:, 1], np.pi) + angles = uniform(lower_limits, upper_limits) + data.qpos[_ARM_JOINTS] = angles + + # Symmetrize hand. + data.qpos["finger"] = data.qpos["thumb"] + + # Randomise target location. + target_x = uniform(-0.4, 0.4) + target_z = uniform(0.1, 0.4) + if self._insert: + target_angle = uniform(-np.pi / 3, np.pi / 3) + model.body_pos[self._receptacle, ["x", "z"]] = target_x, target_z + model.body_quat[self._receptacle, ["qw", "qy"]] = [ + np.cos(target_angle / 2), + np.sin(target_angle / 2), + ] + else: + target_angle = uniform(-np.pi, np.pi) + + model.body_pos[self._target, ["x", "z"]] = target_x, target_z + model.body_quat[self._target, ["qw", "qy"]] = [ + np.cos(target_angle / 2), + np.sin(target_angle / 2), + ] + + # Randomise object location. + object_init_probs = [ + _P_IN_HAND, + _P_IN_TARGET, + 1 - _P_IN_HAND - _P_IN_TARGET, + ] + init_type = choice(["in_hand", "in_target", "uniform"], p=object_init_probs) + if init_type == "in_target": + object_x = target_x + object_z = target_z + object_angle = target_angle + elif init_type == "in_hand": + physics.after_reset() + object_x = data.site_xpos["grasp", "x"] + object_z = data.site_xpos["grasp", "z"] + grasp_direction = data.site_xmat["grasp", ["xx", "zx"]] + object_angle = np.pi - np.arctan2( + grasp_direction[1], grasp_direction[0] + ) + else: + object_x = uniform(-0.5, 0.5) + object_z = uniform(0, 0.7) + object_angle = uniform(0, 2 * np.pi) + data.qvel[self._object + "_x"] = uniform(-5, 5) + + data.qpos[self._object_joints] = object_x, object_z, object_angle + + # Check for collisions. + physics.after_reset() + penetrating = physics.data.ncon > 0 + + super(Bring, self).initialize_episode(physics) + + def get_observation(self, physics): + """Returns either features or only sensors (to be used with pixels).""" + obs = collections.OrderedDict() + obs["arm_pos"] = physics.bounded_joint_pos(_ARM_JOINTS) + obs["arm_vel"] = physics.joint_vel(_ARM_JOINTS) + obs["touch"] = physics.touch() + if self._fully_observable: + obs["hand_pos"] = physics.body_2d_pose("hand") + obs["object_pos"] = physics.body_2d_pose(self._object) + obs["object_vel"] = physics.joint_vel(self._object_joints) + obs["target_pos"] = physics.body_2d_pose(self._target) + return obs + + def _is_close(self, distance): + return rewards.tolerance(distance, (0, _CLOSE), _CLOSE * 2) + + def _peg_reward(self, physics): + """Returns a reward for bringing the peg prop to the target.""" + grasp = self._is_close(physics.site_distance("peg_grasp", "grasp")) + pinch = self._is_close(physics.site_distance("peg_pinch", "pinch")) + grasping = (grasp + pinch) / 2 + bring = self._is_close(physics.site_distance("peg", "target_peg")) + bring_tip = self._is_close(physics.site_distance("target_peg_tip", "peg_tip")) + bringing = (bring + bring_tip) / 2 + return max(bringing, grasping / 3) + + def _ball_reward(self, physics): + """Returns a reward for bringing the ball prop to the target.""" + return self._is_close(physics.site_distance("ball", "target_ball")) + + def get_reward(self, physics): + """Returns a reward to the agent.""" + if self._use_peg: + return self._peg_reward(physics) + else: + return self._ball_reward(physics) diff --git a/envs/fb_mtenv_dmc/pendulum.xml b/envs/fb_mtenv_dmc/pendulum.xml new file mode 100644 index 0000000000000000000000000000000000000000..14377ae6faacd023b458ea68142c7f6f8f91892f --- /dev/null +++ b/envs/fb_mtenv_dmc/pendulum.xml @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/point_mass.py b/envs/fb_mtenv_dmc/point_mass.py new file mode 100644 index 0000000000000000000000000000000000000000..bbd4078647aaf2b589a8815ddc4e7ddaf34f66f0 --- /dev/null +++ b/envs/fb_mtenv_dmc/point_mass.py @@ -0,0 +1,134 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Point-mass domain.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from dm_control import mujoco +from dm_control.rl import control +from . import base +from . import common +from dm_control.suite.utils import randomizers +from dm_control.utils import containers +from dm_control.utils import rewards +import numpy as np + +_DEFAULT_TIME_LIMIT = 20 +SUITE = containers.TaggedTasks() + + +def get_model_and_assets(): + """Returns a tuple containing the model XML string and a dict of assets.""" + return common.read_model("point_mass.xml"), common.ASSETS + + +@SUITE.add("benchmarking", "easy") +def easy(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): + """Returns the easy point_mass task.""" + physics = Physics.from_xml_string(*get_model_and_assets()) + task = PointMass(randomize_gains=False, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, task, time_limit=time_limit, **environment_kwargs + ) + + +@SUITE.add() +def hard(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): + """Returns the hard point_mass task.""" + physics = Physics.from_xml_string(*get_model_and_assets()) + task = PointMass(randomize_gains=True, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, task, time_limit=time_limit, **environment_kwargs + ) + + +class Physics(mujoco.Physics): + """physics for the point_mass domain.""" + + def mass_to_target(self): + """Returns the vector from mass to target in global coordinate.""" + return ( + self.named.data.geom_xpos["target"] - self.named.data.geom_xpos["pointmass"] + ) + + def mass_to_target_dist(self): + """Returns the distance from mass to the target.""" + return np.linalg.norm(self.mass_to_target()) + + +class PointMass(base.Task): + """A point_mass `Task` to reach target with smooth reward.""" + + def __init__(self, randomize_gains, random=None): + """Initialize an instance of `PointMass`. + + Args: + randomize_gains: A `bool`, whether to randomize the actuator gains. + random: Optional, either a `numpy.random.RandomState` instance, an + integer seed for creating a new `RandomState`, or None to select a seed + automatically (default). + """ + self._randomize_gains = randomize_gains + super(PointMass, self).__init__(random=random) + + def initialize_episode(self, physics): + """Sets the state of the environment at the start of each episode. + + If _randomize_gains is True, the relationship between the controls and + the joints is randomized, so that each control actuates a random linear + combination of joints. + + Args: + physics: An instance of `mujoco.Physics`. + """ + randomizers.randomize_limited_and_rotational_joints(physics, self.random) + if self._randomize_gains: + dir1 = self.random.randn(2) + dir1 /= np.linalg.norm(dir1) + # Find another actuation direction that is not 'too parallel' to dir1. + parallel = True + while parallel: + dir2 = self.random.randn(2) + dir2 /= np.linalg.norm(dir2) + parallel = abs(np.dot(dir1, dir2)) > 0.9 + physics.model.wrap_prm[[0, 1]] = dir1 + physics.model.wrap_prm[[2, 3]] = dir2 + super(PointMass, self).initialize_episode(physics) + + def get_observation(self, physics): + """Returns an observation of the state.""" + obs = collections.OrderedDict() + obs["position"] = physics.position() + obs["velocity"] = physics.velocity() + return obs + + def get_reward(self, physics): + """Returns a reward to the agent.""" + target_size = physics.named.model.geom_size["target", 0] + near_target = rewards.tolerance( + physics.mass_to_target_dist(), bounds=(0, target_size), margin=target_size + ) + control_reward = rewards.tolerance( + physics.control(), margin=1, value_at_margin=0, sigmoid="quadratic" + ).mean() + small_control = (control_reward + 4) / 5 + return near_target * small_control diff --git a/envs/fb_mtenv_dmc/point_mass.xml b/envs/fb_mtenv_dmc/point_mass.xml new file mode 100644 index 0000000000000000000000000000000000000000..c447cf6148ebf6e17ced8cb282d553f2e43fea7d --- /dev/null +++ b/envs/fb_mtenv_dmc/point_mass.xml @@ -0,0 +1,49 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/quadruped.py b/envs/fb_mtenv_dmc/quadruped.py new file mode 100644 index 0000000000000000000000000000000000000000..422d70d82050929f1c4cf3011ec768c6821853bf --- /dev/null +++ b/envs/fb_mtenv_dmc/quadruped.py @@ -0,0 +1,514 @@ +# Copyright 2019 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Quadruped Domain.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from dm_control import mujoco +from dm_control.mujoco.wrapper import mjbindings +from dm_control.rl import control +from . import base +from . import common +from dm_control.utils import containers +from dm_control.utils import rewards +from dm_control.utils import xml_tools + +from lxml import etree +import numpy as np +from scipy import ndimage + +enums = mjbindings.enums +mjlib = mjbindings.mjlib + + +_DEFAULT_TIME_LIMIT = 20 +_CONTROL_TIMESTEP = 0.02 + +# Horizontal speeds above which the move reward is 1. +_RUN_SPEED = 5 +_WALK_SPEED = 0.5 + +# Constants related to terrain generation. +_HEIGHTFIELD_ID = 0 +_TERRAIN_SMOOTHNESS = 0.15 # 0.0: maximally bumpy; 1.0: completely smooth. +_TERRAIN_BUMP_SCALE = 2 # Spatial scale of terrain bumps (in meters). + +# Named model elements. +_TOES = ["toe_front_left", "toe_back_left", "toe_back_right", "toe_front_right"] +_WALLS = ["wall_px", "wall_py", "wall_nx", "wall_ny"] + +SUITE = containers.TaggedTasks() + + +def make_model( + floor_size=None, terrain=False, rangefinders=False, walls_and_ball=False +): + """Returns the model XML string.""" + xml_string = common.read_model("quadruped.xml") + parser = etree.XMLParser(remove_blank_text=True) + mjcf = etree.XML(xml_string, parser) + + # Set floor size. + if floor_size is not None: + floor_geom = mjcf.find(".//geom[@name={!r}]".format("floor")) + floor_geom.attrib["size"] = "{} {} .5".format(floor_size, floor_size) + + # Remove walls, ball and target. + if not walls_and_ball: + for wall in _WALLS: + wall_geom = xml_tools.find_element(mjcf, "geom", wall) + wall_geom.getparent().remove(wall_geom) + + # Remove ball. + ball_body = xml_tools.find_element(mjcf, "body", "ball") + ball_body.getparent().remove(ball_body) + + # Remove target. + target_site = xml_tools.find_element(mjcf, "site", "target") + target_site.getparent().remove(target_site) + + # Remove terrain. + if not terrain: + terrain_geom = xml_tools.find_element(mjcf, "geom", "terrain") + terrain_geom.getparent().remove(terrain_geom) + + # Remove rangefinders if they're not used, as range computations can be + # expensive, especially in a scene with heightfields. + if not rangefinders: + rangefinder_sensors = mjcf.findall(".//rangefinder") + for rf in rangefinder_sensors: + rf.getparent().remove(rf) + + return etree.tostring(mjcf, pretty_print=True) + + +@SUITE.add() +def walk(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): + """Returns the Walk task.""" + xml_string = make_model(floor_size=_DEFAULT_TIME_LIMIT * _WALK_SPEED) + physics = Physics.from_xml_string(xml_string, common.ASSETS) + task = Move(desired_speed=_WALK_SPEED, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + time_limit=time_limit, + control_timestep=_CONTROL_TIMESTEP, + **environment_kwargs + ) + + +@SUITE.add() +def run(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): + """Returns the Run task.""" + xml_string = make_model(floor_size=_DEFAULT_TIME_LIMIT * _RUN_SPEED) + physics = Physics.from_xml_string(xml_string, common.ASSETS) + task = Move(desired_speed=_RUN_SPEED, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + time_limit=time_limit, + control_timestep=_CONTROL_TIMESTEP, + **environment_kwargs + ) + + +@SUITE.add() +def escape(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): + """Returns the Escape task.""" + xml_string = make_model(floor_size=40, terrain=True, rangefinders=True) + physics = Physics.from_xml_string(xml_string, common.ASSETS) + task = Escape(random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + time_limit=time_limit, + control_timestep=_CONTROL_TIMESTEP, + **environment_kwargs + ) + + +@SUITE.add() +def fetch(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): + """Returns the Fetch task.""" + xml_string = make_model(walls_and_ball=True) + physics = Physics.from_xml_string(xml_string, common.ASSETS) + task = Fetch(random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + time_limit=time_limit, + control_timestep=_CONTROL_TIMESTEP, + **environment_kwargs + ) + + +class Physics(mujoco.Physics): + """Physics simulation with additional features for the Quadruped domain.""" + + def _reload_from_data(self, data): + super(Physics, self)._reload_from_data(data) + # Clear cached sensor names when the physics is reloaded. + self._sensor_types_to_names = {} + self._hinge_names = [] + + def _get_sensor_names(self, *sensor_types): + try: + sensor_names = self._sensor_types_to_names[sensor_types] + except KeyError: + [sensor_ids] = np.where(np.in1d(self.model.sensor_type, sensor_types)) + sensor_names = [self.model.id2name(s_id, "sensor") for s_id in sensor_ids] + self._sensor_types_to_names[sensor_types] = sensor_names + return sensor_names + + def torso_upright(self): + """Returns the dot-product of the torso z-axis and the global z-axis.""" + return np.asarray(self.named.data.xmat["torso", "zz"]) + + def torso_velocity(self): + """Returns the velocity of the torso, in the local frame.""" + return self.named.data.sensordata["velocimeter"].copy() + + def egocentric_state(self): + """Returns the state without global orientation or position.""" + if not self._hinge_names: + [hinge_ids] = np.nonzero(self.model.jnt_type == enums.mjtJoint.mjJNT_HINGE) + self._hinge_names = [ + self.model.id2name(j_id, "joint") for j_id in hinge_ids + ] + return np.hstack( + ( + self.named.data.qpos[self._hinge_names], + self.named.data.qvel[self._hinge_names], + self.data.act, + ) + ) + + def toe_positions(self): + """Returns toe positions in egocentric frame.""" + torso_frame = self.named.data.xmat["torso"].reshape(3, 3) + torso_pos = self.named.data.xpos["torso"] + torso_to_toe = self.named.data.xpos[_TOES] - torso_pos + return torso_to_toe.dot(torso_frame) + + def force_torque(self): + """Returns scaled force/torque sensor readings at the toes.""" + force_torque_sensors = self._get_sensor_names( + enums.mjtSensor.mjSENS_FORCE, enums.mjtSensor.mjSENS_TORQUE + ) + return np.arcsinh(self.named.data.sensordata[force_torque_sensors]) + + def imu(self): + """Returns IMU-like sensor readings.""" + imu_sensors = self._get_sensor_names( + enums.mjtSensor.mjSENS_GYRO, enums.mjtSensor.mjSENS_ACCELEROMETER + ) + return self.named.data.sensordata[imu_sensors] + + def rangefinder(self): + """Returns scaled rangefinder sensor readings.""" + rf_sensors = self._get_sensor_names(enums.mjtSensor.mjSENS_RANGEFINDER) + rf_readings = self.named.data.sensordata[rf_sensors] + no_intersection = -1.0 + return np.where(rf_readings == no_intersection, 1.0, np.tanh(rf_readings)) + + def origin_distance(self): + """Returns the distance from the origin to the workspace.""" + return np.asarray(np.linalg.norm(self.named.data.site_xpos["workspace"])) + + def origin(self): + """Returns origin position in the torso frame.""" + torso_frame = self.named.data.xmat["torso"].reshape(3, 3) + torso_pos = self.named.data.xpos["torso"] + return -torso_pos.dot(torso_frame) + + def ball_state(self): + """Returns ball position and velocity relative to the torso frame.""" + data = self.named.data + torso_frame = data.xmat["torso"].reshape(3, 3) + ball_rel_pos = data.xpos["ball"] - data.xpos["torso"] + ball_rel_vel = data.qvel["ball_root"][:3] - data.qvel["root"][:3] + ball_rot_vel = data.qvel["ball_root"][3:] + ball_state = np.vstack((ball_rel_pos, ball_rel_vel, ball_rot_vel)) + return ball_state.dot(torso_frame).ravel() + + def target_position(self): + """Returns target position in torso frame.""" + torso_frame = self.named.data.xmat["torso"].reshape(3, 3) + torso_pos = self.named.data.xpos["torso"] + torso_to_target = self.named.data.site_xpos["target"] - torso_pos + return torso_to_target.dot(torso_frame) + + def ball_to_target_distance(self): + """Returns horizontal distance from the ball to the target.""" + ball_to_target = ( + self.named.data.site_xpos["target"] - self.named.data.xpos["ball"] + ) + return np.linalg.norm(ball_to_target[:2]) + + def self_to_ball_distance(self): + """Returns horizontal distance from the quadruped workspace to the ball.""" + self_to_ball = ( + self.named.data.site_xpos["workspace"] - self.named.data.xpos["ball"] + ) + return np.linalg.norm(self_to_ball[:2]) + + +def _find_non_contacting_height(physics, orientation, x_pos=0.0, y_pos=0.0): + """Find a height with no contacts given a body orientation. + + Args: + physics: An instance of `Physics`. + orientation: A quaternion. + x_pos: A float. Position along global x-axis. + y_pos: A float. Position along global y-axis. + Raises: + RuntimeError: If a non-contacting configuration has not been found after + 10,000 attempts. + """ + z_pos = 0.0 # Start embedded in the floor. + num_contacts = 1 + num_attempts = 0 + # Move up in 1cm increments until no contacts. + while num_contacts > 0: + try: + with physics.reset_context(): + physics.named.data.qpos["root"][:3] = x_pos, y_pos, z_pos + physics.named.data.qpos["root"][3:] = orientation + except control.PhysicsError: + # We may encounter a PhysicsError here due to filling the contact + # buffer, in which case we simply increment the height and continue. + pass + num_contacts = physics.data.ncon + z_pos += 0.01 + num_attempts += 1 + if num_attempts > 10000: + raise RuntimeError("Failed to find a non-contacting configuration.") + + +def _common_observations(physics): + """Returns the observations common to all tasks.""" + obs = collections.OrderedDict() + obs["egocentric_state"] = physics.egocentric_state() + obs["torso_velocity"] = physics.torso_velocity() + obs["torso_upright"] = physics.torso_upright() + obs["imu"] = physics.imu() + obs["force_torque"] = physics.force_torque() + return obs + + +def _upright_reward(physics, deviation_angle=0): + """Returns a reward proportional to how upright the torso is. + + Args: + physics: an instance of `Physics`. + deviation_angle: A float, in degrees. The reward is 0 when the torso is + exactly upside-down and 1 when the torso's z-axis is less than + `deviation_angle` away from the global z-axis. + """ + deviation = np.cos(np.deg2rad(deviation_angle)) + return rewards.tolerance( + physics.torso_upright(), + bounds=(deviation, float("inf")), + sigmoid="linear", + margin=1 + deviation, + value_at_margin=0, + ) + + +class Move(base.Task): + """A quadruped task solved by moving forward at a designated speed.""" + + def __init__(self, desired_speed, random=None): + """Initializes an instance of `Move`. + + Args: + desired_speed: A float. If this value is zero, reward is given simply + for standing upright. Otherwise this specifies the horizontal velocity + at which the velocity-dependent reward component is maximized. + random: Optional, either a `numpy.random.RandomState` instance, an + integer seed for creating a new `RandomState`, or None to select a seed + automatically (default). + """ + self._desired_speed = desired_speed + super(Move, self).__init__(random=random) + + def initialize_episode(self, physics): + """Sets the state of the environment at the start of each episode. + + Args: + physics: An instance of `Physics`. + + """ + # Initial configuration. + orientation = self.random.randn(4) + orientation /= np.linalg.norm(orientation) + _find_non_contacting_height(physics, orientation) + super(Move, self).initialize_episode(physics) + + def get_observation(self, physics): + """Returns an observation to the agent.""" + return _common_observations(physics) + + def get_reward(self, physics): + """Returns a reward to the agent.""" + + # Move reward term. + move_reward = rewards.tolerance( + physics.torso_velocity()[0], + bounds=(self._desired_speed, float("inf")), + margin=self._desired_speed, + value_at_margin=0.5, + sigmoid="linear", + ) + + return _upright_reward(physics) * move_reward + + +class Escape(base.Task): + """A quadruped task solved by escaping a bowl-shaped terrain.""" + + def initialize_episode(self, physics): + """Sets the state of the environment at the start of each episode. + + Args: + physics: An instance of `Physics`. + + """ + # Get heightfield resolution, assert that it is square. + res = physics.model.hfield_nrow[_HEIGHTFIELD_ID] + assert res == physics.model.hfield_ncol[_HEIGHTFIELD_ID] + # Sinusoidal bowl shape. + row_grid, col_grid = np.ogrid[-1 : 1 : res * 1j, -1 : 1 : res * 1j] + radius = np.clip(np.sqrt(col_grid ** 2 + row_grid ** 2), 0.04, 1) + bowl_shape = 0.5 - np.cos(2 * np.pi * radius) / 2 + # Random smooth bumps. + terrain_size = 2 * physics.model.hfield_size[_HEIGHTFIELD_ID, 0] + bump_res = int(terrain_size / _TERRAIN_BUMP_SCALE) + bumps = self.random.uniform(_TERRAIN_SMOOTHNESS, 1, (bump_res, bump_res)) + smooth_bumps = ndimage.zoom(bumps, res / float(bump_res)) + # Terrain is elementwise product. + terrain = bowl_shape * smooth_bumps + start_idx = physics.model.hfield_adr[_HEIGHTFIELD_ID] + physics.model.hfield_data[start_idx : start_idx + res ** 2] = terrain.ravel() + super(Escape, self).initialize_episode(physics) + + # If we have a rendering context, we need to re-upload the modified + # heightfield data. + if physics.contexts: + with physics.contexts.gl.make_current() as ctx: + ctx.call( + mjlib.mjr_uploadHField, + physics.model.ptr, + physics.contexts.mujoco.ptr, + _HEIGHTFIELD_ID, + ) + + # Initial configuration. + orientation = self.random.randn(4) + orientation /= np.linalg.norm(orientation) + _find_non_contacting_height(physics, orientation) + + def get_observation(self, physics): + """Returns an observation to the agent.""" + obs = _common_observations(physics) + obs["origin"] = physics.origin() + obs["rangefinder"] = physics.rangefinder() + return obs + + def get_reward(self, physics): + """Returns a reward to the agent.""" + + # Escape reward term. + terrain_size = physics.model.hfield_size[_HEIGHTFIELD_ID, 0] + escape_reward = rewards.tolerance( + physics.origin_distance(), + bounds=(terrain_size, float("inf")), + margin=terrain_size, + value_at_margin=0, + sigmoid="linear", + ) + + return _upright_reward(physics, deviation_angle=20) * escape_reward + + +class Fetch(base.Task): + """A quadruped task solved by bringing a ball to the origin.""" + + def initialize_episode(self, physics): + """Sets the state of the environment at the start of each episode. + + Args: + physics: An instance of `Physics`. + + """ + # Initial configuration, random azimuth and horizontal position. + azimuth = self.random.uniform(0, 2 * np.pi) + orientation = np.array((np.cos(azimuth / 2), 0, 0, np.sin(azimuth / 2))) + spawn_radius = 0.9 * physics.named.model.geom_size["floor", 0] + x_pos, y_pos = self.random.uniform(-spawn_radius, spawn_radius, size=(2,)) + _find_non_contacting_height(physics, orientation, x_pos, y_pos) + + # Initial ball state. + physics.named.data.qpos["ball_root"][:2] = self.random.uniform( + -spawn_radius, spawn_radius, size=(2,) + ) + physics.named.data.qpos["ball_root"][2] = 2 + physics.named.data.qvel["ball_root"][:2] = 5 * self.random.randn(2) + super(Fetch, self).initialize_episode(physics) + + def get_observation(self, physics): + """Returns an observation to the agent.""" + obs = _common_observations(physics) + obs["ball_state"] = physics.ball_state() + obs["target_position"] = physics.target_position() + return obs + + def get_reward(self, physics): + """Returns a reward to the agent.""" + + # Reward for moving close to the ball. + arena_radius = physics.named.model.geom_size["floor", 0] * np.sqrt(2) + workspace_radius = physics.named.model.site_size["workspace", 0] + ball_radius = physics.named.model.geom_size["ball", 0] + reach_reward = rewards.tolerance( + physics.self_to_ball_distance(), + bounds=(0, workspace_radius + ball_radius), + sigmoid="linear", + margin=arena_radius, + value_at_margin=0, + ) + + # Reward for bringing the ball to the target. + target_radius = physics.named.model.site_size["target", 0] + fetch_reward = rewards.tolerance( + physics.ball_to_target_distance(), + bounds=(0, target_radius), + sigmoid="linear", + margin=arena_radius, + value_at_margin=0, + ) + + reach_then_fetch = reach_reward * (0.5 + 0.5 * fetch_reward) + + return _upright_reward(physics) * reach_then_fetch diff --git a/envs/fb_mtenv_dmc/reacher.py b/envs/fb_mtenv_dmc/reacher.py new file mode 100644 index 0000000000000000000000000000000000000000..f18c792305581e5a16bc437c831b44851222ffcb --- /dev/null +++ b/envs/fb_mtenv_dmc/reacher.py @@ -0,0 +1,120 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Reacher domain.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from dm_control import mujoco +from dm_control.rl import control +from . import base +from . import common +from dm_control.suite.utils import randomizers +from dm_control.utils import containers +from dm_control.utils import rewards +import numpy as np + +SUITE = containers.TaggedTasks() +_DEFAULT_TIME_LIMIT = 20 +_BIG_TARGET = 0.05 +_SMALL_TARGET = 0.015 + + +def get_model_and_assets(): + """Returns a tuple containing the model XML string and a dict of assets.""" + return common.read_model("reacher.xml"), common.ASSETS + + +@SUITE.add("benchmarking", "easy") +def easy(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): + """Returns reacher with sparse reward with 5e-2 tol and randomized target.""" + physics = Physics.from_xml_string(*get_model_and_assets()) + task = Reacher(target_size=_BIG_TARGET, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, task, time_limit=time_limit, **environment_kwargs + ) + + +@SUITE.add("benchmarking") +def hard(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): + """Returns reacher with sparse reward with 1e-2 tol and randomized target.""" + physics = Physics.from_xml_string(*get_model_and_assets()) + task = Reacher(target_size=_SMALL_TARGET, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, task, time_limit=time_limit, **environment_kwargs + ) + + +class Physics(mujoco.Physics): + """Physics simulation with additional features for the Reacher domain.""" + + def finger_to_target(self): + """Returns the vector from target to finger in global coordinates.""" + return ( + self.named.data.geom_xpos["target", :2] + - self.named.data.geom_xpos["finger", :2] + ) + + def finger_to_target_dist(self): + """Returns the signed distance between the finger and target surface.""" + return np.linalg.norm(self.finger_to_target()) + + +class Reacher(base.Task): + """A reacher `Task` to reach the target.""" + + def __init__(self, target_size, random=None): + """Initialize an instance of `Reacher`. + + Args: + target_size: A `float`, tolerance to determine whether finger reached the + target. + random: Optional, either a `numpy.random.RandomState` instance, an + integer seed for creating a new `RandomState`, or None to select a seed + automatically (default). + """ + self._target_size = target_size + super(Reacher, self).__init__(random=random) + + def initialize_episode(self, physics): + """Sets the state of the environment at the start of each episode.""" + physics.named.model.geom_size["target", 0] = self._target_size + randomizers.randomize_limited_and_rotational_joints(physics, self.random) + + # Randomize target position + angle = self.random.uniform(0, 2 * np.pi) + radius = self.random.uniform(0.05, 0.20) + physics.named.model.geom_pos["target", "x"] = radius * np.sin(angle) + physics.named.model.geom_pos["target", "y"] = radius * np.cos(angle) + + super(Reacher, self).initialize_episode(physics) + + def get_observation(self, physics): + """Returns an observation of the state and the target position.""" + obs = collections.OrderedDict() + obs["position"] = physics.position() + obs["to_target"] = physics.finger_to_target() + obs["velocity"] = physics.velocity() + return obs + + def get_reward(self, physics): + radii = physics.named.model.geom_size[["target", "finger"], 0].sum() + return rewards.tolerance(physics.finger_to_target_dist(), (0, radii)) diff --git a/envs/fb_mtenv_dmc/reacher.xml b/envs/fb_mtenv_dmc/reacher.xml new file mode 100644 index 0000000000000000000000000000000000000000..343f799c01ec76abd396e74bcb011b7088f02273 --- /dev/null +++ b/envs/fb_mtenv_dmc/reacher.xml @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/stacker.py b/envs/fb_mtenv_dmc/stacker.py new file mode 100644 index 0000000000000000000000000000000000000000..a25609bed7aa210866bb00f46efc497a3a7bd867 --- /dev/null +++ b/envs/fb_mtenv_dmc/stacker.py @@ -0,0 +1,224 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Planar Stacker domain.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from dm_control import mujoco +from dm_control.rl import control +from . import base +from . import common +from dm_control.utils import containers +from dm_control.utils import rewards +from dm_control.utils import xml_tools + +from lxml import etree +import numpy as np + + +_CLOSE = 0.01 # (Meters) Distance below which a thing is considered close. +_CONTROL_TIMESTEP = 0.01 # (Seconds) +_TIME_LIMIT = 10 # (Seconds) +_ARM_JOINTS = [ + "arm_root", + "arm_shoulder", + "arm_elbow", + "arm_wrist", + "finger", + "fingertip", + "thumb", + "thumbtip", +] + +SUITE = containers.TaggedTasks() + + +def make_model(n_boxes): + """Returns a tuple containing the model XML string and a dict of assets.""" + xml_string = common.read_model("stacker.xml") + parser = etree.XMLParser(remove_blank_text=True) + mjcf = etree.XML(xml_string, parser) + + # Remove unused boxes + for b in range(n_boxes, 4): + box = xml_tools.find_element(mjcf, "body", "box" + str(b)) + box.getparent().remove(box) + + return etree.tostring(mjcf, pretty_print=True), common.ASSETS + + +@SUITE.add("hard") +def stack_2( + fully_observable=True, time_limit=_TIME_LIMIT, random=None, environment_kwargs=None +): + """Returns stacker task with 2 boxes.""" + n_boxes = 2 + physics = Physics.from_xml_string(*make_model(n_boxes=n_boxes)) + task = Stack(n_boxes=n_boxes, fully_observable=fully_observable, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + control_timestep=_CONTROL_TIMESTEP, + time_limit=time_limit, + **environment_kwargs + ) + + +@SUITE.add("hard") +def stack_4( + fully_observable=True, time_limit=_TIME_LIMIT, random=None, environment_kwargs=None +): + """Returns stacker task with 4 boxes.""" + n_boxes = 4 + physics = Physics.from_xml_string(*make_model(n_boxes=n_boxes)) + task = Stack(n_boxes=n_boxes, fully_observable=fully_observable, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + control_timestep=_CONTROL_TIMESTEP, + time_limit=time_limit, + **environment_kwargs + ) + + +class Physics(mujoco.Physics): + """Physics with additional features for the Planar Manipulator domain.""" + + def bounded_joint_pos(self, joint_names): + """Returns joint positions as (sin, cos) values.""" + joint_pos = self.named.data.qpos[joint_names] + return np.vstack([np.sin(joint_pos), np.cos(joint_pos)]).T + + def joint_vel(self, joint_names): + """Returns joint velocities.""" + return self.named.data.qvel[joint_names] + + def body_2d_pose(self, body_names, orientation=True): + """Returns positions and/or orientations of bodies.""" + if not isinstance(body_names, str): + body_names = np.array(body_names).reshape(-1, 1) # Broadcast indices. + pos = self.named.data.xpos[body_names, ["x", "z"]] + if orientation: + ori = self.named.data.xquat[body_names, ["qw", "qy"]] + return np.hstack([pos, ori]) + else: + return pos + + def touch(self): + return np.log1p(self.data.sensordata) + + def site_distance(self, site1, site2): + site1_to_site2 = np.diff(self.named.data.site_xpos[[site2, site1]], axis=0) + return np.linalg.norm(site1_to_site2) + + +class Stack(base.Task): + """A Stack `Task`: stack the boxes.""" + + def __init__(self, n_boxes, fully_observable, random=None): + """Initialize an instance of the `Stack` task. + + Args: + n_boxes: An `int`, number of boxes to stack. + fully_observable: A `bool`, whether the observation should contain the + positions and velocities of the boxes and the location of the target. + random: Optional, either a `numpy.random.RandomState` instance, an + integer seed for creating a new `RandomState`, or None to select a seed + automatically (default). + """ + self._n_boxes = n_boxes + self._box_names = ["box" + str(b) for b in range(n_boxes)] + self._box_joint_names = [] + for name in self._box_names: + for dim in "xyz": + self._box_joint_names.append("_".join([name, dim])) + self._fully_observable = fully_observable + super(Stack, self).__init__(random=random) + + def initialize_episode(self, physics): + """Sets the state of the environment at the start of each episode.""" + # Local aliases + randint = self.random.randint + uniform = self.random.uniform + model = physics.named.model + data = physics.named.data + + # Find a collision-free random initial configuration. + penetrating = True + while penetrating: + + # Randomise angles of arm joints. + is_limited = model.jnt_limited[_ARM_JOINTS].astype(np.bool) + joint_range = model.jnt_range[_ARM_JOINTS] + lower_limits = np.where(is_limited, joint_range[:, 0], -np.pi) + upper_limits = np.where(is_limited, joint_range[:, 1], np.pi) + angles = uniform(lower_limits, upper_limits) + data.qpos[_ARM_JOINTS] = angles + + # Symmetrize hand. + data.qpos["finger"] = data.qpos["thumb"] + + # Randomise target location. + target_height = 2 * randint(self._n_boxes) + 1 + box_size = model.geom_size["target", 0] + model.body_pos["target", "z"] = box_size * target_height + model.body_pos["target", "x"] = uniform(-0.37, 0.37) + + # Randomise box locations. + for name in self._box_names: + data.qpos[name + "_x"] = uniform(0.1, 0.3) + data.qpos[name + "_z"] = uniform(0, 0.7) + data.qpos[name + "_y"] = uniform(0, 2 * np.pi) + + # Check for collisions. + physics.after_reset() + penetrating = physics.data.ncon > 0 + + super(Stack, self).initialize_episode(physics) + + def get_observation(self, physics): + """Returns either features or only sensors (to be used with pixels).""" + obs = collections.OrderedDict() + obs["arm_pos"] = physics.bounded_joint_pos(_ARM_JOINTS) + obs["arm_vel"] = physics.joint_vel(_ARM_JOINTS) + obs["touch"] = physics.touch() + if self._fully_observable: + obs["hand_pos"] = physics.body_2d_pose("hand") + obs["box_pos"] = physics.body_2d_pose(self._box_names) + obs["box_vel"] = physics.joint_vel(self._box_joint_names) + obs["target_pos"] = physics.body_2d_pose("target", orientation=False) + return obs + + def get_reward(self, physics): + """Returns a reward to the agent.""" + box_size = physics.named.model.geom_size["target", 0] + min_box_to_target_distance = min( + physics.site_distance(name, "target") for name in self._box_names + ) + box_is_close = rewards.tolerance( + min_box_to_target_distance, margin=2 * box_size + ) + hand_to_target_distance = physics.site_distance("grasp", "target") + hand_is_far = rewards.tolerance( + hand_to_target_distance, bounds=(0.1, float("inf")), margin=_CLOSE + ) + return box_is_close * hand_is_far diff --git a/envs/fb_mtenv_dmc/swimmer.xml b/envs/fb_mtenv_dmc/swimmer.xml new file mode 100644 index 0000000000000000000000000000000000000000..29c7bc813f04d3515b6343c099ce6720a5dd004e --- /dev/null +++ b/envs/fb_mtenv_dmc/swimmer.xml @@ -0,0 +1,57 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/envs/fb_mtenv_dmc/tests/domains_test.py b/envs/fb_mtenv_dmc/tests/domains_test.py new file mode 100644 index 0000000000000000000000000000000000000000..615401a32d63fb82ddf64d33fd557da9ba19e92d --- /dev/null +++ b/envs/fb_mtenv_dmc/tests/domains_test.py @@ -0,0 +1,319 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Tests for dm_control.suite domains.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Internal dependencies. +from absl.testing import absltest +from absl.testing import parameterized +from dm_control import suite +from dm_control.rl import control +import mock +import numpy as np +import six +from six.moves import range +from six.moves import zip + + +def uniform_random_policy(action_spec, random=None): + lower_bounds = action_spec.minimum + upper_bounds = action_spec.maximum + # Draw values between -1 and 1 for unbounded actions. + lower_bounds = np.where(np.isinf(lower_bounds), -1.0, lower_bounds) + upper_bounds = np.where(np.isinf(upper_bounds), 1.0, upper_bounds) + random_state = np.random.RandomState(random) + + def policy(time_step): + del time_step # Unused. + return random_state.uniform(lower_bounds, upper_bounds) + + return policy + + +def step_environment(env, policy, num_episodes=5, max_steps_per_episode=10): + for _ in range(num_episodes): + step_count = 0 + time_step = env.reset() + yield time_step + while not time_step.last(): + action = policy(time_step) + time_step = env.step(action) + step_count += 1 + yield time_step + if step_count >= max_steps_per_episode: + break + + +def make_trajectory(domain, task, seed, **trajectory_kwargs): + env = suite.load(domain, task, task_kwargs={"random": seed}) + policy = uniform_random_policy(env.action_spec(), random=seed) + return step_environment(env, policy, **trajectory_kwargs) + + +class DomainTest(parameterized.TestCase): + """Tests run on all the tasks registered.""" + + def test_constants(self): + num_tasks = sum(len(tasks) for tasks in six.itervalues(suite.TASKS_BY_DOMAIN)) + + self.assertLen(suite.ALL_TASKS, num_tasks) + + def _validate_observation(self, observation_dict, observation_spec): + obs = observation_dict.copy() + for name, spec in six.iteritems(observation_spec): + arr = obs.pop(name) + self.assertEqual(arr.shape, spec.shape) + self.assertEqual(arr.dtype, spec.dtype) + self.assertTrue( + np.all(np.isfinite(arr)), + msg="{!r} has non-finite value(s): {!r}".format(name, arr), + ) + self.assertEmpty( + obs, + msg="Observation contains arrays(s) that are not in the spec: {!r}".format( + obs + ), + ) + + def _validate_reward_range(self, time_step): + if time_step.first(): + self.assertIsNone(time_step.reward) + else: + self.assertIsInstance(time_step.reward, float) + self.assertBetween(time_step.reward, 0, 1) + + def _validate_discount(self, time_step): + if time_step.first(): + self.assertIsNone(time_step.discount) + else: + self.assertIsInstance(time_step.discount, float) + self.assertBetween(time_step.discount, 0, 1) + + def _validate_control_range(self, lower_bounds, upper_bounds): + for b in lower_bounds: + self.assertEqual(b, -1.0) + for b in upper_bounds: + self.assertEqual(b, 1.0) + + @parameterized.parameters(*suite.ALL_TASKS) + def test_components_have_names(self, domain, task): + env = suite.load(domain, task) + model = env.physics.model + + object_types_and_size_fields = [ + ("body", "nbody"), + ("joint", "njnt"), + ("geom", "ngeom"), + ("site", "nsite"), + ("camera", "ncam"), + ("light", "nlight"), + ("mesh", "nmesh"), + ("hfield", "nhfield"), + ("texture", "ntex"), + ("material", "nmat"), + ("equality", "neq"), + ("tendon", "ntendon"), + ("actuator", "nu"), + ("sensor", "nsensor"), + ("numeric", "nnumeric"), + ("text", "ntext"), + ("tuple", "ntuple"), + ] + for object_type, size_field in object_types_and_size_fields: + for idx in range(getattr(model, size_field)): + object_name = model.id2name(idx, object_type) + self.assertNotEqual( + object_name, + "", + msg="Model {!r} contains unnamed {!r} with ID {}.".format( + model.name, object_type, idx + ), + ) + + @parameterized.parameters(*suite.ALL_TASKS) + def test_model_has_at_least_2_cameras(self, domain, task): + env = suite.load(domain, task) + model = env.physics.model + self.assertGreaterEqual( + model.ncam, + 2, + "Model {!r} should have at least 2 cameras, has {}.".format( + model.name, model.ncam + ), + ) + + @parameterized.parameters(*suite.ALL_TASKS) + def test_task_conforms_to_spec(self, domain, task): + """Tests that the environment timesteps conform to specifications.""" + is_benchmark = (domain, task) in suite.BENCHMARKING + env = suite.load(domain, task) + observation_spec = env.observation_spec() + action_spec = env.action_spec() + + # Check action bounds. + if is_benchmark: + self._validate_control_range(action_spec.minimum, action_spec.maximum) + + # Step through the environment, applying random actions sampled within the + # valid range and check the observations, rewards, and discounts. + policy = uniform_random_policy(action_spec) + for time_step in step_environment(env, policy): + self._validate_observation(time_step.observation, observation_spec) + self._validate_discount(time_step) + if is_benchmark: + self._validate_reward_range(time_step) + + @parameterized.parameters(*suite.ALL_TASKS) + def test_environment_is_deterministic(self, domain, task): + """Tests that identical seeds and actions produce identical trajectories.""" + seed = 0 + # Iterate over two trajectories generated using identical sequences of + # random actions, and with identical task random states. Check that the + # observations, rewards, discounts and step types are identical. + trajectory1 = make_trajectory(domain=domain, task=task, seed=seed) + trajectory2 = make_trajectory(domain=domain, task=task, seed=seed) + for time_step1, time_step2 in zip(trajectory1, trajectory2): + self.assertEqual(time_step1.step_type, time_step2.step_type) + self.assertEqual(time_step1.reward, time_step2.reward) + self.assertEqual(time_step1.discount, time_step2.discount) + for key in six.iterkeys(time_step1.observation): + np.testing.assert_array_equal( + time_step1.observation[key], + time_step2.observation[key], + err_msg="Observation {!r} is not equal.".format(key), + ) + + def assertCorrectColors(self, physics, reward): + colors = physics.named.model.mat_rgba + for material_name in ("self", "effector", "target"): + highlight = colors[material_name + "_highlight"] + default = colors[material_name + "_default"] + blend_coef = reward ** 4 + expected = blend_coef * highlight + (1.0 - blend_coef) * default + actual = colors[material_name] + err_msg = ( + "Material {!r} has unexpected color.\nExpected: {!r}\n" + "Actual: {!r}".format(material_name, expected, actual) + ) + np.testing.assert_array_almost_equal(expected, actual, err_msg=err_msg) + + @parameterized.parameters(*suite.ALL_TASKS) + def test_visualize_reward(self, domain, task): + env = suite.load(domain, task) + env.task.visualize_reward = True + action = np.zeros(env.action_spec().shape) + + with mock.patch.object(env.task, "get_reward") as mock_get_reward: + mock_get_reward.return_value = -3.0 # Rewards < 0 should be clipped. + env.reset() + mock_get_reward.assert_called_with(env.physics) + self.assertCorrectColors(env.physics, reward=0.0) + + mock_get_reward.reset_mock() + mock_get_reward.return_value = 0.5 + env.step(action) + mock_get_reward.assert_called_with(env.physics) + self.assertCorrectColors(env.physics, reward=mock_get_reward.return_value) + + mock_get_reward.reset_mock() + mock_get_reward.return_value = 2.0 # Rewards > 1 should be clipped. + env.step(action) + mock_get_reward.assert_called_with(env.physics) + self.assertCorrectColors(env.physics, reward=1.0) + + mock_get_reward.reset_mock() + mock_get_reward.return_value = 0.25 + env.reset() + mock_get_reward.assert_called_with(env.physics) + self.assertCorrectColors(env.physics, reward=mock_get_reward.return_value) + + @parameterized.parameters(*suite.ALL_TASKS) + def test_task_supports_environment_kwargs(self, domain, task): + env = suite.load(domain, task, environment_kwargs=dict(flat_observation=True)) + # Check that the kwargs are actually passed through to the environment. + self.assertSetEqual(set(env.observation_spec()), {control.FLAT_OBSERVATION_KEY}) + + @parameterized.parameters(*suite.ALL_TASKS) + def test_observation_arrays_dont_share_memory(self, domain, task): + env = suite.load(domain, task) + first_timestep = env.reset() + action = np.zeros(env.action_spec().shape) + second_timestep = env.step(action) + for name, first_array in six.iteritems(first_timestep.observation): + second_array = second_timestep.observation[name] + self.assertFalse( + np.may_share_memory(first_array, second_array), + msg="Consecutive observations of {!r} may share memory.".format(name), + ) + + @parameterized.parameters(*suite.ALL_TASKS) + def test_observations_dont_contain_constant_elements(self, domain, task): + env = suite.load(domain, task) + trajectory = make_trajectory( + domain=domain, task=task, seed=0, num_episodes=2, max_steps_per_episode=1000 + ) + observations = {name: [] for name in env.observation_spec()} + for time_step in trajectory: + for name, array in six.iteritems(time_step.observation): + observations[name].append(array) + + failures = [] + + for name, array_list in six.iteritems(observations): + # Sampling random uniform actions generally isn't sufficient to trigger + # these touch sensors. + if ( + domain in ("manipulator", "stacker") + and name == "touch" + or domain == "quadruped" + and name == "force_torque" + ): + continue + stacked_arrays = np.array(array_list) + is_constant = np.all(stacked_arrays == stacked_arrays[0], axis=0) + has_constant_elements = ( + is_constant if np.isscalar(is_constant) else np.any(is_constant) + ) + if has_constant_elements: + failures.append((name, is_constant)) + + self.assertEmpty( + failures, + msg="The following observation(s) contain constant elements:\n{}".format( + "\n".join( + ":\t".join([name, str(is_constant)]) + for (name, is_constant) in failures + ) + ), + ) + + @parameterized.parameters(*suite.ALL_TASKS) + def test_initial_state_is_randomized(self, domain, task): + env = suite.load(domain, task, task_kwargs={"random": 42}) + obs1 = env.reset().observation + obs2 = env.reset().observation + self.assertFalse( + all(np.all(obs1[k] == obs2[k]) for k in obs1), + "Two consecutive initial states have identical observations.\n" + "First: {}\nSecond: {}".format(obs1, obs2), + ) + + +if __name__ == "__main__": + absltest.main() diff --git a/envs/fb_mtenv_dmc/tests/loader_test.py b/envs/fb_mtenv_dmc/tests/loader_test.py new file mode 100644 index 0000000000000000000000000000000000000000..8570bf9f7079616756277730e1fcfa5f2350d0a5 --- /dev/null +++ b/envs/fb_mtenv_dmc/tests/loader_test.py @@ -0,0 +1,51 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Tests for the dm_control.suite loader.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Internal dependencies. + +from absl.testing import absltest + +from dm_control import suite +from dm_control.rl import control + + +class LoaderTest(absltest.TestCase): + def test_load_without_kwargs(self): + env = suite.load("cartpole", "swingup") + self.assertIsInstance(env, control.Environment) + + def test_load_with_kwargs(self): + env = suite.load( + "cartpole", "swingup", task_kwargs={"time_limit": 40, "random": 99} + ) + self.assertIsInstance(env, control.Environment) + + +class LoaderConstantsTest(absltest.TestCase): + def testSuiteConstants(self): + self.assertNotEmpty(suite.BENCHMARKING) + self.assertNotEmpty(suite.EASY) + self.assertNotEmpty(suite.HARD) + self.assertNotEmpty(suite.EXTRA) + + +if __name__ == "__main__": + absltest.main() diff --git a/envs/fb_mtenv_dmc/tests/lqr_test.py b/envs/fb_mtenv_dmc/tests/lqr_test.py new file mode 100644 index 0000000000000000000000000000000000000000..7d168c480f2273b08a645c74da85bf145584f34c --- /dev/null +++ b/envs/fb_mtenv_dmc/tests/lqr_test.py @@ -0,0 +1,87 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Tests specific to the LQR domain.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import unittest + +# Internal dependencies. +from absl import logging + +from absl.testing import absltest +from absl.testing import parameterized + +from . import lqr +from . import lqr_solver + +import numpy as np +from six.moves import range + + +class LqrTest(parameterized.TestCase): + @parameterized.named_parameters(("lqr_2_1", lqr.lqr_2_1), ("lqr_6_2", lqr.lqr_6_2)) + def test_lqr_optimal_policy(self, make_env): + env = make_env() + p, k, beta = lqr_solver.solve(env) + self.assertPolicyisOptimal(env, p, k, beta) + + @parameterized.named_parameters(("lqr_2_1", lqr.lqr_2_1), ("lqr_6_2", lqr.lqr_6_2)) + @unittest.skipUnless( + condition=lqr_solver.sp, + reason="scipy is not available, so non-scipy DARE solver is the default.", + ) + def test_lqr_optimal_policy_no_scipy(self, make_env): + env = make_env() + old_sp = lqr_solver.sp + try: + lqr_solver.sp = None # Force the solver to use the non-scipy code path. + p, k, beta = lqr_solver.solve(env) + finally: + lqr_solver.sp = old_sp + self.assertPolicyisOptimal(env, p, k, beta) + + def assertPolicyisOptimal(self, env, p, k, beta): + tolerance = 1e-3 + n_steps = int(math.ceil(math.log10(tolerance) / math.log10(beta))) + logging.info("%d timesteps for %g convergence.", n_steps, tolerance) + total_loss = 0.0 + + timestep = env.reset() + initial_state = np.hstack( + (timestep.observation["position"], timestep.observation["velocity"]) + ) + logging.info("Measuring total cost over %d steps.", n_steps) + for _ in range(n_steps): + x = np.hstack( + (timestep.observation["position"], timestep.observation["velocity"]) + ) + # u = k*x is the optimal policy + u = k.dot(x) + total_loss += 1 - (timestep.reward or 0.0) + timestep = env.step(u) + + logging.info("Analytical expected total cost is .5*x^T*p*x.") + expected_loss = 0.5 * initial_state.T.dot(p).dot(initial_state) + logging.info("Comparing measured and predicted costs.") + np.testing.assert_allclose(expected_loss, total_loss, rtol=tolerance) + + +if __name__ == "__main__": + absltest.main() diff --git a/envs/fb_mtenv_dmc/utils/__init__.py b/envs/fb_mtenv_dmc/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ea19cfadfcc6ab5b7cefb0cacfa54a9ad37662a --- /dev/null +++ b/envs/fb_mtenv_dmc/utils/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Utility functions used in the control suite.""" diff --git a/envs/fb_mtenv_dmc/utils/parse_amc.py b/envs/fb_mtenv_dmc/utils/parse_amc.py new file mode 100644 index 0000000000000000000000000000000000000000..51c314ee621ba99ad381e8de56c6d6b582ee029b --- /dev/null +++ b/envs/fb_mtenv_dmc/utils/parse_amc.py @@ -0,0 +1,301 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Parse and convert amc motion capture data.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from dm_control.mujoco.wrapper import mjbindings +import numpy as np +from scipy import interpolate +from six.moves import range + +mjlib = mjbindings.mjlib + +MOCAP_DT = 1.0 / 120.0 +CONVERSION_LENGTH = 0.056444 + +_CMU_MOCAP_JOINT_ORDER = ( + "root0", + "root1", + "root2", + "root3", + "root4", + "root5", + "lowerbackrx", + "lowerbackry", + "lowerbackrz", + "upperbackrx", + "upperbackry", + "upperbackrz", + "thoraxrx", + "thoraxry", + "thoraxrz", + "lowerneckrx", + "lowerneckry", + "lowerneckrz", + "upperneckrx", + "upperneckry", + "upperneckrz", + "headrx", + "headry", + "headrz", + "rclaviclery", + "rclaviclerz", + "rhumerusrx", + "rhumerusry", + "rhumerusrz", + "rradiusrx", + "rwristry", + "rhandrx", + "rhandrz", + "rfingersrx", + "rthumbrx", + "rthumbrz", + "lclaviclery", + "lclaviclerz", + "lhumerusrx", + "lhumerusry", + "lhumerusrz", + "lradiusrx", + "lwristry", + "lhandrx", + "lhandrz", + "lfingersrx", + "lthumbrx", + "lthumbrz", + "rfemurrx", + "rfemurry", + "rfemurrz", + "rtibiarx", + "rfootrx", + "rfootrz", + "rtoesrx", + "lfemurrx", + "lfemurry", + "lfemurrz", + "ltibiarx", + "lfootrx", + "lfootrz", + "ltoesrx", +) + +Converted = collections.namedtuple("Converted", ["qpos", "qvel", "time"]) + + +def convert(file_name, physics, timestep): + """Converts the parsed .amc values into qpos and qvel values and resamples. + + Args: + file_name: The .amc file to be parsed and converted. + physics: The corresponding physics instance. + timestep: Desired output interval between resampled frames. + + Returns: + A namedtuple with fields: + `qpos`, a numpy array containing converted positional variables. + `qvel`, a numpy array containing converted velocity variables. + `time`, a numpy array containing the corresponding times. + """ + frame_values = parse(file_name) + joint2index = {} + for name in physics.named.data.qpos.axes.row.names: + joint2index[name] = physics.named.data.qpos.axes.row.convert_key_item(name) + index2joint = {} + for joint, index in joint2index.items(): + if isinstance(index, slice): + indices = range(index.start, index.stop) + else: + indices = [index] + for ii in indices: + index2joint[ii] = joint + + # Convert frame_values to qpos + amcvals2qpos_transformer = Amcvals2qpos(index2joint, _CMU_MOCAP_JOINT_ORDER) + qpos_values = [] + for frame_value in frame_values: + qpos_values.append(amcvals2qpos_transformer(frame_value)) + qpos_values = np.stack(qpos_values) # Time by nq + + # Interpolate/resample. + # Note: interpolate quaternions rather than euler angles (slerp). + # see https://en.wikipedia.org/wiki/Slerp + qpos_values_resampled = [] + time_vals = np.arange(0, len(frame_values) * MOCAP_DT - 1e-8, MOCAP_DT) + time_vals_new = np.arange(0, len(frame_values) * MOCAP_DT, timestep) + while time_vals_new[-1] > time_vals[-1]: + time_vals_new = time_vals_new[:-1] + + for i in range(qpos_values.shape[1]): + f = interpolate.splrep(time_vals, qpos_values[:, i]) + qpos_values_resampled.append(interpolate.splev(time_vals_new, f)) + + qpos_values_resampled = np.stack(qpos_values_resampled) # nq by ntime + + qvel_list = [] + for t in range(qpos_values_resampled.shape[1] - 1): + p_tp1 = qpos_values_resampled[:, t + 1] + p_t = qpos_values_resampled[:, t] + qvel = [ + (p_tp1[:3] - p_t[:3]) / timestep, + mj_quat2vel(mj_quatdiff(p_t[3:7], p_tp1[3:7]), timestep), + (p_tp1[7:] - p_t[7:]) / timestep, + ] + qvel_list.append(np.concatenate(qvel)) + + qvel_values_resampled = np.vstack(qvel_list).T + + return Converted(qpos_values_resampled, qvel_values_resampled, time_vals_new) + + +def parse(file_name): + """Parses the amc file format.""" + values = [] + fid = open(file_name, "r") + line = fid.readline().strip() + frame_ind = 1 + first_frame = True + while True: + # Parse first frame. + if first_frame and line[0] == str(frame_ind): + first_frame = False + frame_ind += 1 + frame_vals = [] + while True: + line = fid.readline().strip() + if not line or line == str(frame_ind): + values.append(np.array(frame_vals, dtype=np.float)) + break + tokens = line.split() + frame_vals.extend(tokens[1:]) + # Parse other frames. + elif line == str(frame_ind): + frame_ind += 1 + frame_vals = [] + while True: + line = fid.readline().strip() + if not line or line == str(frame_ind): + values.append(np.array(frame_vals, dtype=np.float)) + break + tokens = line.split() + frame_vals.extend(tokens[1:]) + else: + line = fid.readline().strip() + if not line: + break + return values + + +class Amcvals2qpos(object): + """Callable that converts .amc values for a frame and to MuJoCo qpos format.""" + + def __init__(self, index2joint, joint_order): + """Initializes a new Amcvals2qpos instance. + + Args: + index2joint: List of joint angles in .amc file. + joint_order: List of joint names in MuJoco MJCF. + """ + # Root is x,y,z, then quat. + # need to get indices of qpos that order for amc default order + self.qpos_root_xyz_ind = [0, 1, 2] + self.root_xyz_ransform = ( + np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]]) * CONVERSION_LENGTH + ) + self.qpos_root_quat_ind = [3, 4, 5, 6] + amc2qpos_transform = np.zeros((len(index2joint), len(joint_order))) + for i in range(len(index2joint)): + for j in range(len(joint_order)): + if index2joint[i] == joint_order[j]: + if "rx" in index2joint[i]: + amc2qpos_transform[i][j] = 1 + elif "ry" in index2joint[i]: + amc2qpos_transform[i][j] = 1 + elif "rz" in index2joint[i]: + amc2qpos_transform[i][j] = 1 + self.amc2qpos_transform = amc2qpos_transform + + def __call__(self, amc_val): + """Converts a `.amc` frame to MuJoCo qpos format.""" + amc_val_rad = np.deg2rad(amc_val) + qpos = np.dot(self.amc2qpos_transform, amc_val_rad) + + # Root. + qpos[:3] = np.dot(self.root_xyz_ransform, amc_val[:3]) + qpos_quat = euler2quat(amc_val[3], amc_val[4], amc_val[5]) + qpos_quat = mj_quatprod(euler2quat(90, 0, 0), qpos_quat) + + for i, ind in enumerate(self.qpos_root_quat_ind): + qpos[ind] = qpos_quat[i] + + return qpos + + +def euler2quat(ax, ay, az): + """Converts euler angles to a quaternion. + + Note: rotation order is zyx + + Args: + ax: Roll angle (deg) + ay: Pitch angle (deg). + az: Yaw angle (deg). + + Returns: + A numpy array representing the rotation as a quaternion. + """ + r1 = az + r2 = ay + r3 = ax + + c1 = np.cos(np.deg2rad(r1 / 2)) + s1 = np.sin(np.deg2rad(r1 / 2)) + c2 = np.cos(np.deg2rad(r2 / 2)) + s2 = np.sin(np.deg2rad(r2 / 2)) + c3 = np.cos(np.deg2rad(r3 / 2)) + s3 = np.sin(np.deg2rad(r3 / 2)) + + q0 = c1 * c2 * c3 + s1 * s2 * s3 + q1 = c1 * c2 * s3 - s1 * s2 * c3 + q2 = c1 * s2 * c3 + s1 * c2 * s3 + q3 = s1 * c2 * c3 - c1 * s2 * s3 + + return np.array([q0, q1, q2, q3]) + + +def mj_quatprod(q, r): + quaternion = np.zeros(4) + mjlib.mju_mulQuat(quaternion, np.ascontiguousarray(q), np.ascontiguousarray(r)) + return quaternion + + +def mj_quat2vel(q, dt): + vel = np.zeros(3) + mjlib.mju_quat2Vel(vel, np.ascontiguousarray(q), dt) + return vel + + +def mj_quatneg(q): + quaternion = np.zeros(4) + mjlib.mju_negQuat(quaternion, np.ascontiguousarray(q)) + return quaternion + + +def mj_quatdiff(source, target): + return mj_quatprod(mj_quatneg(source), np.ascontiguousarray(target)) diff --git a/envs/fb_mtenv_dmc/utils/parse_amc_test.py b/envs/fb_mtenv_dmc/utils/parse_amc_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4d3e6c8cf69a62d2add068939f2c9626541a110a --- /dev/null +++ b/envs/fb_mtenv_dmc/utils/parse_amc_test.py @@ -0,0 +1,68 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Tests for parse_amc utility.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +# Internal dependencies. + +from absl.testing import absltest +from . import humanoid_CMU +from dm_control.suite.utils import parse_amc + +from dm_control.utils import io as resources + +_TEST_AMC_PATH = resources.GetResourceFilename( + os.path.join(os.path.dirname(__file__), "../demos/zeros.amc") +) + + +class ParseAMCTest(absltest.TestCase): + def test_sizes_of_parsed_data(self): + + # Instantiate the humanoid environment. + env = humanoid_CMU.stand() + + # Parse and convert specified clip. + converted = parse_amc.convert( + _TEST_AMC_PATH, env.physics, env.control_timestep() + ) + + self.assertEqual(converted.qpos.shape[0], 63) + self.assertEqual(converted.qvel.shape[0], 62) + self.assertEqual(converted.time.shape[0], converted.qpos.shape[1]) + self.assertEqual(converted.qpos.shape[1], converted.qvel.shape[1] + 1) + + # Parse and convert specified clip -- WITH SMALLER TIMESTEP + converted2 = parse_amc.convert( + _TEST_AMC_PATH, env.physics, 0.5 * env.control_timestep() + ) + + self.assertEqual(converted2.qpos.shape[0], 63) + self.assertEqual(converted2.qvel.shape[0], 62) + self.assertEqual(converted2.time.shape[0], converted2.qpos.shape[1]) + self.assertEqual(converted.qpos.shape[1], converted.qvel.shape[1] + 1) + + # Compare sizes of parsed objects for different timesteps + self.assertEqual(converted.qpos.shape[1] * 2, converted2.qpos.shape[1]) + + +if __name__ == "__main__": + absltest.main() diff --git a/envs/fb_mtenv_dmc/utils/randomizers.py b/envs/fb_mtenv_dmc/utils/randomizers.py new file mode 100644 index 0000000000000000000000000000000000000000..2e472d2418403a2cdcdaa4f780fa09277742e491 --- /dev/null +++ b/envs/fb_mtenv_dmc/utils/randomizers.py @@ -0,0 +1,90 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Randomization functions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from dm_control.mujoco.wrapper import mjbindings +import numpy as np +from six.moves import range + + +def random_limited_quaternion(random, limit): + """Generates a random quaternion limited to the specified rotations.""" + axis = random.randn(3) + axis /= np.linalg.norm(axis) + angle = random.rand() * limit + + quaternion = np.zeros(4) + mjbindings.mjlib.mju_axisAngle2Quat(quaternion, axis, angle) + + return quaternion + + +def randomize_limited_and_rotational_joints(physics, random=None): + """Randomizes the positions of joints defined in the physics body. + + The following randomization rules apply: + - Bounded joints (hinges or sliders) are sampled uniformly in the bounds. + - Unbounded hinges are samples uniformly in [-pi, pi] + - Quaternions for unlimited free joints and ball joints are sampled + uniformly on the unit 3-sphere. + - Quaternions for limited ball joints are sampled uniformly on a sector + of the unit 3-sphere. + - The linear degrees of freedom of free joints are not randomized. + + Args: + physics: Instance of 'Physics' class that holds a loaded model. + random: Optional instance of 'np.random.RandomState'. Defaults to the global + NumPy random state. + """ + random = random or np.random + + hinge = mjbindings.enums.mjtJoint.mjJNT_HINGE + slide = mjbindings.enums.mjtJoint.mjJNT_SLIDE + ball = mjbindings.enums.mjtJoint.mjJNT_BALL + free = mjbindings.enums.mjtJoint.mjJNT_FREE + + qpos = physics.named.data.qpos + + for joint_id in range(physics.model.njnt): + joint_name = physics.model.id2name(joint_id, "joint") + joint_type = physics.model.jnt_type[joint_id] + is_limited = physics.model.jnt_limited[joint_id] + range_min, range_max = physics.model.jnt_range[joint_id] + + if is_limited: + if joint_type == hinge or joint_type == slide: + qpos[joint_name] = random.uniform(range_min, range_max) + + elif joint_type == ball: + qpos[joint_name] = random_limited_quaternion(random, range_max) + + else: + if joint_type == hinge: + qpos[joint_name] = random.uniform(-np.pi, np.pi) + + elif joint_type == ball: + quat = random.randn(4) + quat /= np.linalg.norm(quat) + qpos[joint_name] = quat + + elif joint_type == free: + quat = random.rand(4) + quat /= np.linalg.norm(quat) + qpos[joint_name][3:] = quat diff --git a/envs/fb_mtenv_dmc/utils/randomizers_test.py b/envs/fb_mtenv_dmc/utils/randomizers_test.py new file mode 100644 index 0000000000000000000000000000000000000000..fcfb0ddcbf43352804223452421dade67520902e --- /dev/null +++ b/envs/fb_mtenv_dmc/utils/randomizers_test.py @@ -0,0 +1,177 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Tests for randomizers.py.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Internal dependencies. +from absl.testing import absltest +from absl.testing import parameterized +from dm_control import mujoco +from dm_control.mujoco.wrapper import mjbindings +from dm_control.suite.utils import randomizers +import numpy as np +from six.moves import range + +mjlib = mjbindings.mjlib + + +class RandomizeUnlimitedJointsTest(parameterized.TestCase): + def setUp(self): + self.rand = np.random.RandomState(100) + + def test_single_joint_of_each_type(self): + physics = mujoco.Physics.from_xml_string( + """ + + + + + + + + + + + + + + + + + + + + + + + + + """ + ) + + randomizers.randomize_limited_and_rotational_joints(physics, self.rand) + self.assertNotEqual(0.0, physics.named.data.qpos["hinge"]) + self.assertNotEqual(0.0, physics.named.data.qpos["limited_hinge"]) + self.assertNotEqual(0.0, physics.named.data.qpos["limited_slide"]) + + self.assertNotEqual(0.0, np.sum(physics.named.data.qpos["ball"])) + self.assertNotEqual(0.0, np.sum(physics.named.data.qpos["limited_ball"])) + + self.assertNotEqual(0.0, np.sum(physics.named.data.qpos["free"][3:])) + + # Unlimited slide and the positional part of the free joint remains + # uninitialized. + self.assertEqual(0.0, physics.named.data.qpos["slide"]) + self.assertEqual(0.0, np.sum(physics.named.data.qpos["free"][:3])) + + def test_multiple_joints_of_same_type(self): + physics = mujoco.Physics.from_xml_string( + """ + + + + + + + + + """ + ) + + randomizers.randomize_limited_and_rotational_joints(physics, self.rand) + self.assertNotEqual(0.0, physics.named.data.qpos["hinge_1"]) + self.assertNotEqual(0.0, physics.named.data.qpos["hinge_2"]) + self.assertNotEqual(0.0, physics.named.data.qpos["hinge_3"]) + + self.assertNotEqual( + physics.named.data.qpos["hinge_1"], physics.named.data.qpos["hinge_2"] + ) + + self.assertNotEqual( + physics.named.data.qpos["hinge_2"], physics.named.data.qpos["hinge_3"] + ) + + self.assertNotEqual( + physics.named.data.qpos["hinge_1"], physics.named.data.qpos["hinge_3"] + ) + + def test_unlimited_hinge_randomization_range(self): + physics = mujoco.Physics.from_xml_string( + """ + + + + + + + """ + ) + + for _ in range(10): + randomizers.randomize_limited_and_rotational_joints(physics, self.rand) + self.assertBetween(physics.named.data.qpos["hinge"], -np.pi, np.pi) + + def test_limited_1d_joint_limits_are_respected(self): + physics = mujoco.Physics.from_xml_string( + """ + + + + + + + + + + + """ + ) + + for _ in range(10): + randomizers.randomize_limited_and_rotational_joints(physics, self.rand) + self.assertBetween( + physics.named.data.qpos["hinge"], np.deg2rad(0), np.deg2rad(10) + ) + self.assertBetween(physics.named.data.qpos["slide"], 30, 50) + + def test_limited_ball_joint_are_respected(self): + physics = mujoco.Physics.from_xml_string( + """ + + + + + + + """ + ) + + body_axis = np.array([1.0, 0.0, 0.0]) + joint_axis = np.zeros(3) + for _ in range(10): + randomizers.randomize_limited_and_rotational_joints(physics, self.rand) + + quat = physics.named.data.qpos["ball"] + mjlib.mju_rotVecQuat(joint_axis, body_axis, quat) + angle_cos = np.dot(body_axis, joint_axis) + self.assertGreater(angle_cos, 0.5) # cos(60) = 0.5 + + +if __name__ == "__main__": + absltest.main() diff --git a/envs/fb_mtenv_dmc/walker.py b/envs/fb_mtenv_dmc/walker.py new file mode 100644 index 0000000000000000000000000000000000000000..b9f319993ec567405dcc61d3ef9d619ebcc0578d --- /dev/null +++ b/envs/fb_mtenv_dmc/walker.py @@ -0,0 +1,190 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Planar Walker Domain.""" + +from __future__ import absolute_import, division, print_function + +import collections + +from dm_control import mujoco +from dm_control.rl import control +from dm_control.suite.utils import randomizers +from dm_control.utils import containers, rewards + +from . import base, common + +_DEFAULT_TIME_LIMIT = 25 +_CONTROL_TIMESTEP = 0.025 + +# Minimal height of torso over foot above which stand reward is 1. +_STAND_HEIGHT = 1.2 + +# Horizontal speeds (meters/second) above which move reward is 1. +_WALK_SPEED = 1 +_RUN_SPEED = 8 + + +SUITE = containers.TaggedTasks() + + +def get_model_and_assets(xml_file_id): + """Returns a tuple containing the model XML string and a dict of assets.""" + if xml_file_id is not None: + filename = f"walker_{xml_file_id}.xml" + print(filename) + else: + filename = f"walker.xml" + return common.read_model(filename), common.ASSETS + + +@SUITE.add("benchmarking") +def stand( + time_limit=_DEFAULT_TIME_LIMIT, + xml_file_id=None, + random=None, + environment_kwargs=None, +): + """Returns the Stand task.""" + physics = Physics.from_xml_string(*get_model_and_assets(xml_file_id)) + task = PlanarWalker(move_speed=0, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + time_limit=time_limit, + control_timestep=_CONTROL_TIMESTEP, + **environment_kwargs, + ) + + +@SUITE.add("benchmarking") +def walk( + time_limit=_DEFAULT_TIME_LIMIT, + xml_file_id=None, + random=None, + environment_kwargs=None, +): + """Returns the Walk task.""" + physics = Physics.from_xml_string(*get_model_and_assets(xml_file_id)) + task = PlanarWalker(move_speed=_WALK_SPEED, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + time_limit=time_limit, + control_timestep=_CONTROL_TIMESTEP, + **environment_kwargs, + ) + + +@SUITE.add("benchmarking") +def run( + time_limit=_DEFAULT_TIME_LIMIT, + xml_file_id=None, + random=None, + environment_kwargs=None, +): + """Returns the Run task.""" + physics = Physics.from_xml_string(*get_model_and_assets(xml_file_id)) + task = PlanarWalker(move_speed=_RUN_SPEED, random=random) + environment_kwargs = environment_kwargs or {} + return control.Environment( + physics, + task, + time_limit=time_limit, + control_timestep=_CONTROL_TIMESTEP, + **environment_kwargs, + ) + + +class Physics(mujoco.Physics): + """Physics simulation with additional features for the Walker domain.""" + + def torso_upright(self): + """Returns projection from z-axes of torso to the z-axes of world.""" + return self.named.data.xmat["torso", "zz"] + + def torso_height(self): + """Returns the height of the torso.""" + return self.named.data.xpos["torso", "z"] + + def horizontal_velocity(self): + """Returns the horizontal velocity of the center-of-mass.""" + return self.named.data.sensordata["torso_subtreelinvel"][0] + + def orientations(self): + """Returns planar orientations of all bodies.""" + return self.named.data.xmat[1:, ["xx", "xz"]].ravel() + + +class PlanarWalker(base.Task): + """A planar walker task.""" + + def __init__(self, move_speed, random=None): + """Initializes an instance of `PlanarWalker`. + + Args: + move_speed: A float. If this value is zero, reward is given simply for + standing up. Otherwise this specifies a target horizontal velocity for + the walking task. + random: Optional, either a `numpy.random.RandomState` instance, an + integer seed for creating a new `RandomState`, or None to select a seed + automatically (default). + """ + self._move_speed = move_speed + super(PlanarWalker, self).__init__(random=random) + + def initialize_episode(self, physics): + """Sets the state of the environment at the start of each episode. + + In 'standing' mode, use initial orientation and small velocities. + In 'random' mode, randomize joint angles and let fall to the floor. + + Args: + physics: An instance of `Physics`. + + """ + randomizers.randomize_limited_and_rotational_joints(physics, self.random) + super(PlanarWalker, self).initialize_episode(physics) + + def get_observation(self, physics): + """Returns an observation of body orientations, height and velocites.""" + obs = collections.OrderedDict() + obs["orientations"] = physics.orientations() + obs["height"] = physics.torso_height() + obs["velocity"] = physics.velocity() + return obs + + def get_reward(self, physics): + """Returns a reward to the agent.""" + standing = rewards.tolerance( + physics.torso_height(), + bounds=(_STAND_HEIGHT, float("inf")), + margin=_STAND_HEIGHT / 2, + ) + upright = (1 + physics.torso_upright()) / 2 + stand_reward = (3 * standing + upright) / 4 + if self._move_speed == 0: + return stand_reward + else: + move_reward = rewards.tolerance( + physics.horizontal_velocity(), + bounds=(self._move_speed, float("inf")), + margin=self._move_speed / 2, + value_at_margin=0.5, + sigmoid="linear", + ) + return stand_reward * (5 * move_reward + 1) / 6 diff --git a/envs/fb_mtenv_dmc/walker.xml b/envs/fb_mtenv_dmc/walker.xml new file mode 100644 index 0000000000000000000000000000000000000000..95098933ffb71da35b5c4a2342035567157fcc08 --- /dev/null +++ b/envs/fb_mtenv_dmc/walker.xml @@ -0,0 +1,70 @@ + + + + + + diff --git a/envs/fb_mtenv_dmc/walker_friction_10.xml b/envs/fb_mtenv_dmc/walker_friction_10.xml new file mode 100644 index 0000000000000000000000000000000000000000..659d8b74471021f68f7ad50a1985e12681aa9da2 --- /dev/null +++ b/envs/fb_mtenv_dmc/walker_friction_10.xml @@ -0,0 +1,70 @@ + + + + + + diff --git a/envs/fb_mtenv_dmc/walker_friction_8.xml b/envs/fb_mtenv_dmc/walker_friction_8.xml new file mode 100644 index 0000000000000000000000000000000000000000..86b171d438f51a65c55fcf9e1ad69d51721a3e5b --- /dev/null +++ b/envs/fb_mtenv_dmc/walker_friction_8.xml @@ -0,0 +1,70 @@ + + + + + + diff --git a/envs/fb_mtenv_dmc/walker_friction_9.xml b/envs/fb_mtenv_dmc/walker_friction_9.xml new file mode 100644 index 0000000000000000000000000000000000000000..94f1651465de50471cda85e186f860d299a9bce0 --- /dev/null +++ b/envs/fb_mtenv_dmc/walker_friction_9.xml @@ -0,0 +1,70 @@ + + + + + + diff --git a/envs/fb_mtenv_dmc/walker_len_8.xml b/envs/fb_mtenv_dmc/walker_len_8.xml new file mode 100644 index 0000000000000000000000000000000000000000..44e82bda067acd51f49857c0c5721ca9e013285c --- /dev/null +++ b/envs/fb_mtenv_dmc/walker_len_8.xml @@ -0,0 +1,70 @@ + + + + + + diff --git a/envs/fb_mtenv_dmc/walker_len_9.xml b/envs/fb_mtenv_dmc/walker_len_9.xml new file mode 100644 index 0000000000000000000000000000000000000000..3de95b7ae6ed823752520bcfabe6658835bb1b28 --- /dev/null +++ b/envs/fb_mtenv_dmc/walker_len_9.xml @@ -0,0 +1,70 @@ + + + + + + diff --git a/envs/fb_mtenv_dmc/wrappers/__init__.py b/envs/fb_mtenv_dmc/wrappers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f7e4a687e04c3b1e2e0aa05659f76bc2cc87ca84 --- /dev/null +++ b/envs/fb_mtenv_dmc/wrappers/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2018 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Environment wrappers used to extend or modify environment behaviour.""" diff --git a/envs/fb_mtenv_dmc/wrappers/action_noise.py b/envs/fb_mtenv_dmc/wrappers/action_noise.py new file mode 100644 index 0000000000000000000000000000000000000000..8799bae9ddb0b861db72e700fa95a2fa0138e68b --- /dev/null +++ b/envs/fb_mtenv_dmc/wrappers/action_noise.py @@ -0,0 +1,77 @@ +# Copyright 2018 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Wrapper control suite environments that adds Gaussian noise to actions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import dm_env +import numpy as np + + +_BOUNDS_MUST_BE_FINITE = ( + "All bounds in `env.action_spec()` must be finite, got: {action_spec}" +) + + +class Wrapper(dm_env.Environment): + """Wraps a control environment and adds Gaussian noise to actions.""" + + def __init__(self, env, scale=0.01): + """Initializes a new action noise Wrapper. + + Args: + env: The control suite environment to wrap. + scale: The standard deviation of the noise, expressed as a fraction + of the max-min range for each action dimension. + + Raises: + ValueError: If any of the action dimensions of the wrapped environment are + unbounded. + """ + action_spec = env.action_spec() + if not ( + np.all(np.isfinite(action_spec.minimum)) + and np.all(np.isfinite(action_spec.maximum)) + ): + raise ValueError(_BOUNDS_MUST_BE_FINITE.format(action_spec=action_spec)) + self._minimum = action_spec.minimum + self._maximum = action_spec.maximum + self._noise_std = scale * (action_spec.maximum - action_spec.minimum) + self._env = env + + def step(self, action): + noisy_action = action + self._env.task.random.normal(scale=self._noise_std) + # Clip the noisy actions in place so that they fall within the bounds + # specified by the `action_spec`. Note that MuJoCo implicitly clips out-of- + # bounds control inputs, but we also clip here in case the actions do not + # correspond directly to MuJoCo actuators, or if there are other wrapper + # layers that expect the actions to be within bounds. + np.clip(noisy_action, self._minimum, self._maximum, out=noisy_action) + return self._env.step(noisy_action) + + def reset(self): + return self._env.reset() + + def observation_spec(self): + return self._env.observation_spec() + + def action_spec(self): + return self._env.action_spec() + + def __getattr__(self, name): + return getattr(self._env, name) diff --git a/envs/fb_mtenv_dmc/wrappers/action_noise_test.py b/envs/fb_mtenv_dmc/wrappers/action_noise_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f2de9830a712e0104596e40ff3abed15c37b9636 --- /dev/null +++ b/envs/fb_mtenv_dmc/wrappers/action_noise_test.py @@ -0,0 +1,143 @@ +# Copyright 2018 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Tests for the action noise wrapper.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Internal dependencies. +from absl.testing import absltest +from absl.testing import parameterized +from dm_control.rl import control +from dm_control.suite.wrappers import action_noise +from dm_env import specs +import mock +import numpy as np + + +class ActionNoiseTest(parameterized.TestCase): + def make_action_spec(self, lower=(-1.0,), upper=(1.0,)): + lower, upper = np.broadcast_arrays(lower, upper) + return specs.BoundedArray( + shape=lower.shape, dtype=float, minimum=lower, maximum=upper + ) + + def make_mock_env(self, action_spec=None): + action_spec = action_spec or self.make_action_spec() + env = mock.Mock(spec=control.Environment) + env.action_spec.return_value = action_spec + return env + + def assertStepCalledOnceWithCorrectAction(self, env, expected_action): + # NB: `assert_called_once_with()` doesn't support numpy arrays. + env.step.assert_called_once() + actual_action = env.step.call_args_list[0][0][0] + np.testing.assert_array_equal(expected_action, actual_action) + + @parameterized.parameters( + [ + dict(lower=np.r_[-1.0, 0.0], upper=np.r_[1.0, 2.0], scale=0.05), + dict(lower=np.r_[-1.0, 0.0], upper=np.r_[1.0, 2.0], scale=0.0), + dict(lower=np.r_[-1.0, 0.0], upper=np.r_[-1.0, 0.0], scale=0.05), + ] + ) + def test_step(self, lower, upper, scale): + seed = 0 + std = scale * (upper - lower) + expected_noise = np.random.RandomState(seed).normal(scale=std) + action = np.random.RandomState(seed).uniform(lower, upper) + expected_noisy_action = np.clip(action + expected_noise, lower, upper) + task = mock.Mock(spec=control.Task) + task.random = np.random.RandomState(seed) + action_spec = self.make_action_spec(lower=lower, upper=upper) + env = self.make_mock_env(action_spec=action_spec) + env.task = task + wrapped_env = action_noise.Wrapper(env, scale=scale) + time_step = wrapped_env.step(action) + self.assertStepCalledOnceWithCorrectAction(env, expected_noisy_action) + self.assertIs(time_step, env.step(expected_noisy_action)) + + @parameterized.named_parameters( + [ + dict(testcase_name="within_bounds", action=np.r_[-1.0], noise=np.r_[0.1]), + dict(testcase_name="below_lower", action=np.r_[-1.0], noise=np.r_[-0.1]), + dict(testcase_name="above_upper", action=np.r_[1.0], noise=np.r_[0.1]), + ] + ) + def test_action_clipping(self, action, noise): + lower = -1.0 + upper = 1.0 + expected_noisy_action = np.clip(action + noise, lower, upper) + task = mock.Mock(spec=control.Task) + task.random = mock.Mock(spec=np.random.RandomState) + task.random.normal.return_value = noise + action_spec = self.make_action_spec(lower=lower, upper=upper) + env = self.make_mock_env(action_spec=action_spec) + env.task = task + wrapped_env = action_noise.Wrapper(env) + time_step = wrapped_env.step(action) + self.assertStepCalledOnceWithCorrectAction(env, expected_noisy_action) + self.assertIs(time_step, env.step(expected_noisy_action)) + + @parameterized.parameters( + [ + dict(lower=np.r_[-1.0, 0.0], upper=np.r_[1.0, np.inf]), + dict(lower=np.r_[np.nan, 0.0], upper=np.r_[1.0, 2.0]), + ] + ) + def test_error_if_action_bounds_non_finite(self, lower, upper): + action_spec = self.make_action_spec(lower=lower, upper=upper) + env = self.make_mock_env(action_spec=action_spec) + with self.assertRaisesWithLiteralMatch( + ValueError, + action_noise._BOUNDS_MUST_BE_FINITE.format(action_spec=action_spec), + ): + _ = action_noise.Wrapper(env) + + def test_reset(self): + env = self.make_mock_env() + wrapped_env = action_noise.Wrapper(env) + time_step = wrapped_env.reset() + env.reset.assert_called_once_with() + self.assertIs(time_step, env.reset()) + + def test_observation_spec(self): + env = self.make_mock_env() + wrapped_env = action_noise.Wrapper(env) + observation_spec = wrapped_env.observation_spec() + env.observation_spec.assert_called_once_with() + self.assertIs(observation_spec, env.observation_spec()) + + def test_action_spec(self): + env = self.make_mock_env() + wrapped_env = action_noise.Wrapper(env) + # `env.action_spec()` is called in `Wrapper.__init__()` + env.action_spec.reset_mock() + action_spec = wrapped_env.action_spec() + env.action_spec.assert_called_once_with() + self.assertIs(action_spec, env.action_spec()) + + @parameterized.parameters(["task", "physics", "control_timestep"]) + def test_getattr(self, attribute_name): + env = self.make_mock_env() + wrapped_env = action_noise.Wrapper(env) + attr = getattr(wrapped_env, attribute_name) + self.assertIs(attr, getattr(env, attribute_name)) + + +if __name__ == "__main__": + absltest.main() diff --git a/envs/fb_mtenv_dmc/wrappers/pixels.py b/envs/fb_mtenv_dmc/wrappers/pixels.py new file mode 100644 index 0000000000000000000000000000000000000000..626195830f985fccb0dc2fd267f31396465e5165 --- /dev/null +++ b/envs/fb_mtenv_dmc/wrappers/pixels.py @@ -0,0 +1,123 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Wrapper that adds pixel observations to a control environment.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +import dm_env +from dm_env import specs + +STATE_KEY = "state" + + +class Wrapper(dm_env.Environment): + """Wraps a control environment and adds a rendered pixel observation.""" + + def __init__( + self, env, pixels_only=True, render_kwargs=None, observation_key="pixels" + ): + """Initializes a new pixel Wrapper. + + Args: + env: The environment to wrap. + pixels_only: If True (default), the original set of 'state' observations + returned by the wrapped environment will be discarded, and the + `OrderedDict` of observations will only contain pixels. If False, the + `OrderedDict` will contain the original observations as well as the + pixel observations. + render_kwargs: Optional `dict` containing keyword arguments passed to the + `mujoco.Physics.render` method. + observation_key: Optional custom string specifying the pixel observation's + key in the `OrderedDict` of observations. Defaults to 'pixels'. + + Raises: + ValueError: If `env`'s observation spec is not compatible with the + wrapper. Supported formats are a single array, or a dict of arrays. + ValueError: If `env`'s observation already contains the specified + `observation_key`. + """ + if render_kwargs is None: + render_kwargs = {} + + wrapped_observation_spec = env.observation_spec() + + if isinstance(wrapped_observation_spec, specs.Array): + self._observation_is_dict = False + invalid_keys = set([STATE_KEY]) + elif isinstance(wrapped_observation_spec, collections.MutableMapping): + self._observation_is_dict = True + invalid_keys = set(wrapped_observation_spec.keys()) + else: + raise ValueError("Unsupported observation spec structure.") + + if not pixels_only and observation_key in invalid_keys: + raise ValueError( + "Duplicate or reserved observation key {!r}.".format(observation_key) + ) + + if pixels_only: + self._observation_spec = collections.OrderedDict() + elif self._observation_is_dict: + self._observation_spec = wrapped_observation_spec.copy() + else: + self._observation_spec = collections.OrderedDict() + self._observation_spec[STATE_KEY] = wrapped_observation_spec + + # Extend observation spec. + pixels = env.physics.render(**render_kwargs) + pixels_spec = specs.Array( + shape=pixels.shape, dtype=pixels.dtype, name=observation_key + ) + self._observation_spec[observation_key] = pixels_spec + + self._env = env + self._pixels_only = pixels_only + self._render_kwargs = render_kwargs + self._observation_key = observation_key + + def reset(self): + time_step = self._env.reset() + return self._add_pixel_observation(time_step) + + def step(self, action): + time_step = self._env.step(action) + return self._add_pixel_observation(time_step) + + def observation_spec(self): + return self._observation_spec + + def action_spec(self): + return self._env.action_spec() + + def _add_pixel_observation(self, time_step): + if self._pixels_only: + observation = collections.OrderedDict() + elif self._observation_is_dict: + observation = type(time_step.observation)(time_step.observation) + else: + observation = collections.OrderedDict() + observation[STATE_KEY] = time_step.observation + + pixels = self._env.physics.render(**self._render_kwargs) + observation[self._observation_key] = pixels + return time_step._replace(observation=observation) + + def __getattr__(self, name): + return getattr(self._env, name) diff --git a/envs/fb_mtenv_dmc/wrappers/pixels_test.py b/envs/fb_mtenv_dmc/wrappers/pixels_test.py new file mode 100644 index 0000000000000000000000000000000000000000..473ad810be4593f865b29fd72a716d61640d3dfc --- /dev/null +++ b/envs/fb_mtenv_dmc/wrappers/pixels_test.py @@ -0,0 +1,135 @@ +# Copyright 2017 The dm_control Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Tests for the pixel wrapper.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +# Internal dependencies. +from absl.testing import absltest +from absl.testing import parameterized +from . import cartpole +from dm_control.suite.wrappers import pixels +import dm_env +from dm_env import specs + +import numpy as np + + +class FakePhysics(object): + def render(self, *args, **kwargs): + del args + del kwargs + return np.zeros((4, 5, 3), dtype=np.uint8) + + +class FakeArrayObservationEnvironment(dm_env.Environment): + def __init__(self): + self.physics = FakePhysics() + + def reset(self): + return dm_env.restart(np.zeros((2,))) + + def step(self, action): + del action + return dm_env.transition(0.0, np.zeros((2,))) + + def action_spec(self): + pass + + def observation_spec(self): + return specs.Array(shape=(2,), dtype=np.float) + + +class PixelsTest(parameterized.TestCase): + @parameterized.parameters(True, False) + def test_dict_observation(self, pixels_only): + pixel_key = "rgb" + + env = cartpole.swingup() + + # Make sure we are testing the right environment for the test. + observation_spec = env.observation_spec() + self.assertIsInstance(observation_spec, collections.OrderedDict) + + width = 320 + height = 240 + + # The wrapper should only add one observation. + wrapped = pixels.Wrapper( + env, + observation_key=pixel_key, + pixels_only=pixels_only, + render_kwargs={"width": width, "height": height}, + ) + + wrapped_observation_spec = wrapped.observation_spec() + self.assertIsInstance(wrapped_observation_spec, collections.OrderedDict) + + if pixels_only: + self.assertLen(wrapped_observation_spec, 1) + self.assertEqual([pixel_key], list(wrapped_observation_spec.keys())) + else: + expected_length = len(observation_spec) + 1 + self.assertLen(wrapped_observation_spec, expected_length) + expected_keys = list(observation_spec.keys()) + [pixel_key] + self.assertEqual(expected_keys, list(wrapped_observation_spec.keys())) + + # Check that the added spec item is consistent with the added observation. + time_step = wrapped.reset() + rgb_observation = time_step.observation[pixel_key] + wrapped_observation_spec[pixel_key].validate(rgb_observation) + + self.assertEqual(rgb_observation.shape, (height, width, 3)) + self.assertEqual(rgb_observation.dtype, np.uint8) + + @parameterized.parameters(True, False) + def test_single_array_observation(self, pixels_only): + pixel_key = "depth" + + env = FakeArrayObservationEnvironment() + observation_spec = env.observation_spec() + self.assertIsInstance(observation_spec, specs.Array) + + wrapped = pixels.Wrapper( + env, observation_key=pixel_key, pixels_only=pixels_only + ) + wrapped_observation_spec = wrapped.observation_spec() + self.assertIsInstance(wrapped_observation_spec, collections.OrderedDict) + + if pixels_only: + self.assertLen(wrapped_observation_spec, 1) + self.assertEqual([pixel_key], list(wrapped_observation_spec.keys())) + else: + self.assertLen(wrapped_observation_spec, 2) + self.assertEqual( + [pixels.STATE_KEY, pixel_key], list(wrapped_observation_spec.keys()) + ) + + time_step = wrapped.reset() + + depth_observation = time_step.observation[pixel_key] + wrapped_observation_spec[pixel_key].validate(depth_observation) + + self.assertEqual(depth_observation.shape, (4, 5, 3)) + self.assertEqual(depth_observation.dtype, np.uint8) + + +if __name__ == "__main__": + absltest.main() diff --git a/offlinedv2/LICENSE b/offlinedv2/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d16ec1a9a1c60ce00c9eb21845840b84214e5af4 --- /dev/null +++ b/offlinedv2/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2020 Danijar Hafner + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/offlinedv2/conda_env.yml b/offlinedv2/conda_env.yml new file mode 100644 index 0000000000000000000000000000000000000000..cff5ad2fcee721b2dbe684e54aab8d4ae83d5cf1 --- /dev/null +++ b/offlinedv2/conda_env.yml @@ -0,0 +1,205 @@ +name: dreamerv2 +channels: + - anaconda + - conda-forge + - defaults +dependencies: + - _libgcc_mutex=0.1 + - _openmp_mutex=4.5 + - anyio=3.5.0 + - argon2-cffi=20.1.0 + - async_generator=1.10 + - attrs=21.2.0 + - babel=2.9.1 + - backcall=0.2.0 + - bleach=4.1.0 + - brotlipy=0.7.0 + - ca-certificates=2021.10.8 + - certifi=2021.10.8 + - cffi=1.14.6 + - charset-normalizer=2.0.10 + - cryptography=35.0.0 + - dbus=1.13.18 + - debugpy=1.5.1 + - defusedxml=0.7.1 + - entrypoints=0.3 + - expat=2.2.10 + - fontconfig=2.13.0 + - freetype=2.10.4 + - glew=2.1.0 + - glib=2.56.2 + - gst-plugins-base=1.14.0 + - gstreamer=1.14.0 + - icu=58.2 + - importlib_metadata=4.8.2 + - ipykernel=6.4.1 + - ipython=7.29.0 + - ipython_genutils=0.2.0 + - ipywidgets=7.5.1 + - jedi=0.18.0 + - jinja2=3.0.2 + - jpeg=9b + - json5=0.9.5 + - jsonschema=3.2.0 + - jupyter=1.0.0 + - jupyter_client=7.1.0 + - jupyter_console=6.2.0 + - jupyter_core=4.9.1 + - jupyter_server=1.13.2 + - jupyterlab=3.2.7 + - jupyterlab_pygments=0.1.2 + - jupyterlab_server=2.10.3 + - ld_impl_linux-64=2.35.1 + - libffi=3.3 + - libgcc-ng=9.3.0 + - libglu=9.0.0 + - libgomp=9.3.0 + - libpng=1.6.37 + - libsodium=1.0.18 + - libstdcxx-ng=9.3.0 + - libuuid=1.0.3 + - libxcb=1.13 + - libxml2=2.9.10 + - markupsafe=2.0.1 + - matplotlib-inline=0.1.2 + - mistune=0.8.4 + - nb_conda_kernels=2.3.1 + - nbclassic=0.3.5 + - nbclient=0.5.3 + - nbconvert=6.3.0 + - nbformat=5.1.3 + - ncurses=6.3 + - nest-asyncio=1.5.1 + - notebook=6.4.10 + - openssl=1.1.1n + - packaging=21.3 + - pandocfilters=1.4.3 + - parso=0.8.3 + - pcre=8.44 + - pexpect=4.8.0 + - pickleshare=0.7.5 + - pip=21.2.4 + - prometheus_client=0.12.0 + - prompt-toolkit=3.0.20 + - prompt_toolkit=3.0.20 + - pthread-stubs=0.4 + - ptyprocess=0.7.0 + - pycparser=2.21 + - pygments=2.10.0 + - pyopenssl=21.0.0 + - pyqt=5.9.2 + - pyrsistent=0.18.0 + - pysocks=1.7.1 + - python=3.9.7 + - python-dateutil=2.8.2 + - python_abi=3.9 + - pytz=2021.3 + - pyzmq=22.3.0 + - qt=5.9.7 + - qtconsole=4.7.7 + - qtpy=1.9.0 + - readline=8.1.2 + - requests=2.27.1 + - send2trash=1.8.0 + - setuptools=58.0.4 + - sip=4.19.13 + - six=1.16.0 + - sniffio=1.2.0 + - sqlite=3.37.0 + - terminado=0.9.4 + - testpath=0.5.0 + - tk=8.6.11 + - tornado=6.1 + - traitlets=5.1.1 + - tzdata=2021e + - urllib3=1.26.8 + - wcwidth=0.2.5 + - webencodings=0.5.1 + - websocket-client=1.2.3 + - wheel=0.37.1 + - widgetsnbextension=3.5.1 + - xorg-kbproto=1.0.7 + - xorg-libx11=1.7.2 + - xorg-libxau=1.0.9 + - xorg-libxdmcp=1.1.3 + - xorg-libxext=1.3.4 + - xorg-xextproto=7.3.0 + - xorg-xproto=7.0.31 + - xz=5.2.5 + - zeromq=4.3.4 + - zipp=3.7.0 + - zlib=1.2.11 + - pip: + - absl-py==1.0.0 + - ale-py==0.7.3 + - astunparse==1.6.3 + - atari-py==0.2.9 + - cachetools==4.2.4 + - cloudpickle==2.0.0 + - crafter==1.6.3 + - cycler==0.11.0 + - decorator==5.1.1 + - dill==0.3.4 + - dm-control==0.0.403778684 + - dm-env==1.5 + - dm-tree==0.1.6 + - flatbuffers==2.0 + - fonttools==4.28.5 + - future==0.18.2 + - gast==0.4.0 + - glfw==2.5.0 + - google-auth==2.3.3 + - google-auth-oauthlib==0.4.6 + - google-pasta==0.2.0 + - grpcio==1.43.0 + - gym==0.21.0 + - h5py==3.6.0 + - hickle==4.0.4 + - idna==3.3 + - imageio==2.13.5 + - importlib-metadata==4.10.0 + - importlib-resources==5.4.0 + - joblib==1.1.0 + - keras==2.7.0 + - keras-preprocessing==1.1.2 + - kiwisolver==1.3.2 + - labmaze==1.0.5 + - libclang==12.0.0 + - lxml==4.7.1 + - markdown==3.3.6 + - matplotlib==3.5.1 + - numpy==1.22.0 + - oauthlib==3.1.1 + - opencv-python==4.5.5.62 + - opensimplex==0.4 + - opt-einsum==3.3.0 + - pandas==1.3.5 + - patchelf==0.14.3.0 + - pillow==9.0.0 + - protobuf==3.19.3 + - pyasn1==0.4.8 + - pyasn1-modules==0.2.8 + - pyopengl==3.1.5 + - pyparsing==3.0.6 + - pyyaml==6.0 + - requests-oauthlib==1.3.0 + - rsa==4.8 + - ruamel-yaml==0.17.20 + - ruamel-yaml-clib==0.2.6 + - scikit-learn==1.0.2 + - scipy==1.7.3 + - seaborn==0.11.2 + - sklearn==0.0 + - tensorboard==2.7.0 + - tensorboard-data-server==0.6.1 + - tensorboard-plugin-wit==1.8.1 + - tensorflow==2.7.0 + - tensorflow-estimator==2.7.0 + - tensorflow-io-gcs-filesystem==0.23.1 + - tensorflow-probability==0.15.0 + - termcolor==1.1.0 + - threadpoolctl==3.0.0 + - tqdm==4.62.3 + - typing-extensions==4.0.1 + - werkzeug==2.0.2 + - wrapt==1.13.3 diff --git a/offlinedv2/dreamerv2/agent.py b/offlinedv2/dreamerv2/agent.py new file mode 100644 index 0000000000000000000000000000000000000000..f273f232624493dda19e1bf95a1908741d829080 --- /dev/null +++ b/offlinedv2/dreamerv2/agent.py @@ -0,0 +1,401 @@ +import tensorflow as tf +import tensorflow_probability as tfp +from tensorflow.keras import mixed_precision as prec + +from dreamerv2 import common +from dreamerv2 import expl + +tfd = tfp.distributions + + +class Agent(common.Module): + def __init__(self, config, obs_space, act_space, step): + self.config = config + self.obs_space = obs_space + self.act_space = act_space['action'] + self.step = step + self.tfstep = tf.Variable(int(self.step), tf.int64) + self.wm = WorldModel(config, obs_space, self.tfstep) + self._task_behavior = ActorCritic(config, self.act_space, self.tfstep) + if config.expl_behavior == 'greedy': + self._expl_behavior = self._task_behavior + else: + self._expl_behavior = getattr(expl, config.expl_behavior)( + self.config, self.act_space, self.wm, self.tfstep, + lambda seq: self.wm.heads['reward'](seq['feat']).mode()) + if self.config.offline_tune_lmbd: + self.log_lmbd = tf.Variable(0, dtype=tf.float32) + self.lmbd_optimizer = tf.keras.optimizers.Adam(learning_rate=3e-4) + else: + self.lmbd = self.config.offline_lmbd + + @tf.function + def policy(self, obs, state=None, mode='train'): + obs = tf.nest.map_structure(tf.tensor, obs) + tf.py_function(lambda: self.tfstep.assign( + int(self.step), read_value=False), [], []) + if state is None: + latent = self.wm.rssm.initial(len(obs['reward'])) + action = tf.zeros((len(obs['reward']),) + self.act_space.shape) + state = latent, action + latent, action = state + embed = self.wm.encoder(self.wm.preprocess(obs)) + sample = (mode == 'train') or not self.config.eval_state_mean + latent, _ = self.wm.rssm.obs_step( + latent, action, embed, obs['is_first'], sample) + feat = self.wm.rssm.get_feat(latent) + if mode == 'eval': + actor = self._task_behavior.actor(feat) + action = actor.mode() + noise = self.config.eval_noise + elif mode == 'explore': + actor = self._expl_behavior.actor(feat) + action = actor.sample() + noise = self.config.expl_noise + elif mode == 'train': + actor = self._task_behavior.actor(feat) + action = actor.sample() + noise = self.config.expl_noise + action = common.action_noise(action, noise, self.act_space) + outputs = {'action': action} + state = (latent, action) + return outputs, state + + @tf.function + def train(self, data, state=None): + metrics = {} + state, outputs, mets = self.wm.train(data, state) + metrics.update(mets) + start = outputs['post'] + reward = lambda seq: self.wm.heads['reward'](seq['feat']).mode() + metrics.update(self._task_behavior.train( + self.wm, start, data['is_terminal'], reward)) + if self.config.expl_behavior != 'greedy': + mets = self._expl_behavior.train(start, outputs, data)[-1] + metrics.update({'expl_' + key: value for key, value in mets.items()}) + return state, metrics + + # The folllowing methods split the above for offline training + @tf.function + def model_train(self, data): + _, _, metrics = self.wm.train(data) + return metrics + + @tf.function + def agent_train(self, data): + data = self.wm.preprocess(data) + embed = self.wm.encoder(data) + start, _ = self.wm.rssm.observe(embed, data['action'], data['is_first']) + + reward = lambda seq: self.penalized_reward(seq) + metrics = self._task_behavior.train(self.wm, start, data['is_terminal'], reward) + if self.config.offline_tune_lmbd: + metrics['lambda'] = tf.exp(self.log_lmbd) + else: + metrics['lambda'] = self.lmbd + return metrics + + @tf.function + def compute_penalty(self, seq): + if self.config.offline_penalty_type == 'log_prob_ens': + dist = self.wm.rssm.get_dist(seq, ensemble=True) + penalty = tf.math.reduce_std(dist.log_prob(seq['stoch']), axis=0) + elif self.config.offline_penalty_type == 'meandis': + dist = self.wm.rssm.get_dist(seq, ensemble=True) + m = dist.mean() + mean_pred = tf.math.reduce_mean(m, axis=0) + penalty = tf.math.reduce_mean(tf.norm(m - mean_pred, axis=[-1, -2]), axis=0) + elif self.config.offline_penalty_type == 'mixstd_mean': + dist = self.wm.rssm.get_dist(seq, ensemble=True) + gm = tfd.MixtureSameFamily( + mixture_distribution=tfd.Categorical(probs=[1 / self.config.rssm.ensemble] * self.config.rssm.ensemble), + components_distribution=tfd.BatchReshape( + distribution=dist, + batch_shape=dist.batch_shape[1:] + dist.batch_shape[0], + validate_args=True + )) + penalty = tf.math.reduce_mean(gm.stddev(), axis=[-1, -2]) + else: + penalty = 0 + + return penalty + + @tf.function + def penalized_reward(self, seq): + rew = self.wm.heads['reward'](seq['feat']).mode() + penalty = self.compute_penalty(seq) + + if self.config.offline_tune_lmbd: + with tf.GradientTape() as tape: + lmbd = tf.exp(self.log_lmbd) + lambda_loss = tf.math.reduce_mean( + self.log_lmbd * (tf.stop_gradient(lmbd * penalty) - self.config.offline_lmbd_cons)) + variables = [self.log_lmbd] + grads = tape.gradient(lambda_loss, variables) + self.lmbd_optimizer.apply_gradients(zip(grads, variables)) + else: + lmbd = self.lmbd + + return rew - lmbd * penalty + + @tf.function + def report(self, data): + report = {} + data = self.wm.preprocess(data) + for key in self.wm.heads['decoder'].cnn_keys: + name = key.replace('/', '_') + report[f'openl_{name}'] = self.wm.video_pred(data, key) + return report + + +class WorldModel(common.Module): + def __init__(self, config, obs_space, tfstep): + shapes = {k: tuple(v.shape) for k, v in obs_space.items()} + self.config = config + self.tfstep = tfstep + self.rssm = common.EnsembleRSSM(**config.rssm) + self.encoder = common.Encoder(shapes, **config.encoder) + self.heads = { + 'decoder': common.Decoder(shapes, **config.decoder), + 'reward': common.MLP([], **config.reward_head), + } + if config.pred_discount: + self.heads['discount'] = common.MLP([], **config.discount_head) + for name in config.grad_heads: + assert name in self.heads, name + self.model_opt = common.Optimizer('model', **config.model_opt) + + def train(self, data, state=None): + with tf.GradientTape() as model_tape: + model_loss, state, outputs, metrics = self.loss(data, state) + modules = [self.encoder, self.rssm, *self.heads.values()] + metrics.update(self.model_opt(model_tape, model_loss, modules)) + return state, outputs, metrics + + def loss(self, data, state=None): + data = self.preprocess(data) + embed = self.encoder(data) + post, prior = self.rssm.observe( + embed, data['action'], data['is_first'], state) + kl_loss, kl_value = self.rssm.kl_loss(post, prior, **self.config.kl) + assert len(kl_loss.shape) == 0 + likes = {} + losses = {'kl': kl_loss} + feat = self.rssm.get_feat(post) + for name, head in self.heads.items(): + grad_head = (name in self.config.grad_heads) + inp = feat if grad_head else tf.stop_gradient(feat) + out = head(inp) + dists = out if isinstance(out, dict) else {name: out} + for key, dist in dists.items(): + like = tf.cast(dist.log_prob(data[key]), tf.float32) + likes[key] = like + losses[key] = -like.mean() + model_loss = sum( + self.config.loss_scales.get(k, 1.0) * v for k, v in losses.items()) + outs = dict( + embed=embed, feat=feat, post=post, + prior=prior, likes=likes, kl=kl_value) + metrics = {f'{name}_loss': value for name, value in losses.items()} + metrics['model_kl'] = kl_value.mean() + metrics['prior_ent'] = self.rssm.get_dist(prior).entropy().mean() + metrics['post_ent'] = self.rssm.get_dist(post).entropy().mean() + last_state = {k: v[:, -1] for k, v in post.items()} + return model_loss, last_state, outs, metrics + + def imagine(self, policy, start, is_terminal, horizon): + flatten = lambda x: x.reshape([-1] + list(x.shape[2:])) + start = {k: flatten(v) for k, v in start.items()} + start['feat'] = self.rssm.get_feat(start) + start['action'] = tf.zeros_like(policy(start['feat']).mode()) + seq = {k: [v] for k, v in start.items()} + for _ in range(horizon): + action = policy(tf.stop_gradient(seq['feat'][-1])).sample() + state = self.rssm.img_step({k: v[-1] for k, v in seq.items()}, action) + feat = self.rssm.get_feat(state) + for key, value in {**state, 'action': action, 'feat': feat}.items(): + seq[key].append(value) + seq = {k: tf.stack(v, 0) for k, v in seq.items()} + if 'discount' in self.heads: + disc = self.heads['discount'](seq['feat']).mean() + if is_terminal is not None: + # Override discount prediction for the first step with the true + # discount factor from the replay buffer. + true_first = 1.0 - flatten(is_terminal).astype(disc.dtype) + true_first *= self.config.discount + disc = tf.concat([true_first[None], disc[1:]], 0) + else: + disc = self.config.discount * tf.ones(seq['feat'].shape[:-1]) + seq['discount'] = disc + # Shift discount factors because they imply whether the following state + # will be valid, not whether the current state is valid. + seq['weight'] = tf.math.cumprod( + tf.concat([tf.ones_like(disc[:1]), disc[:-1]], 0), 0) + return seq + + @tf.function + def preprocess(self, obs): + dtype = prec.global_policy().compute_dtype + obs = obs.copy() + for key, value in obs.items(): + if key.startswith('log_'): + continue + if value.dtype == tf.int32: + value = value.astype(dtype) + if value.dtype == tf.uint8: + value = value.astype(dtype) / 255.0 - 0.5 + obs[key] = value + obs['reward'] = { + 'identity': tf.identity, + 'sign': tf.sign, + 'tanh': tf.tanh, + }[self.config.clip_rewards](obs['reward']) + if 'discount' not in obs: + obs['discount'] = 1.0 - obs['is_terminal'].astype(dtype) + obs['discount'] *= self.config.discount + return obs + + @tf.function + def video_pred(self, data, key): + decoder = self.heads['decoder'] + truth = data[key][:6] + 0.5 + embed = self.encoder(data) + states, _ = self.rssm.observe( + embed[:6, :5], data['action'][:6, :5], data['is_first'][:6, :5]) + recon = decoder(self.rssm.get_feat(states))[key].mode()[:6] + init = {k: v[:, -1] for k, v in states.items()} + prior = self.rssm.imagine(data['action'][:6, 5:], init) + openl = decoder(self.rssm.get_feat(prior))[key].mode() + model = tf.concat([recon[:, :5] + 0.5, openl + 0.5], 1) + error = (model - truth + 1) / 2 + video = tf.concat([truth, model, error], 2) + B, T, H, W, C = video.shape + return video.transpose((1, 2, 0, 3, 4)).reshape((T, H, B * W, C)) + + +class ActorCritic(common.Module): + def __init__(self, config, act_space, tfstep): + self.config = config + self.act_space = act_space + self.tfstep = tfstep + discrete = hasattr(act_space, 'n') + if self.config.actor.dist == 'auto': + self.config = self.config.update({ + 'actor.dist': 'onehot' if discrete else 'trunc_normal'}) + if self.config.actor_grad == 'auto': + self.config = self.config.update({ + 'actor_grad': 'reinforce' if discrete else 'dynamics'}) + self.actor = common.MLP(act_space.shape[0], **self.config.actor) + self.critic = common.MLP([], **self.config.critic) + if self.config.slow_target: + self._target_critic = common.MLP([], **self.config.critic) + self._updates = tf.Variable(0, tf.int64) + else: + self._target_critic = self.critic + self.actor_opt = common.Optimizer('actor', **self.config.actor_opt) + self.critic_opt = common.Optimizer('critic', **self.config.critic_opt) + self.rewnorm = common.StreamNorm(**self.config.reward_norm) + + def train(self, world_model, start, is_terminal, reward_fn): + metrics = {} + hor = self.config.imag_horizon + # The weights are is_terminal flags for the imagination start states. + # Technically, they should multiply the losses from the second trajectory + # step onwards, which is the first imagined step. However, we are not + # training the action that led into the first step anyway, so we can use + # them to scale the whole sequence. + with tf.GradientTape() as actor_tape: + seq = world_model.imagine(self.actor, start, is_terminal, hor) + reward = reward_fn(seq) + seq['reward'], mets1 = self.rewnorm(reward) + mets1 = {f'reward_{k}': v for k, v in mets1.items()} + target, mets2 = self.target(seq) + actor_loss, mets3 = self.actor_loss(seq, target) + with tf.GradientTape() as critic_tape: + critic_loss, mets4 = self.critic_loss(seq, target) + metrics.update(self.actor_opt(actor_tape, actor_loss, self.actor)) + metrics.update(self.critic_opt(critic_tape, critic_loss, self.critic)) + metrics.update(**mets1, **mets2, **mets3, **mets4) + self.update_slow_target() # Variables exist after first forward pass. + return metrics + + def actor_loss(self, seq, target): + # Actions: 0 [a1] [a2] a3 + # ^ | ^ | ^ | + # / v / v / v + # States: [z0]->[z1]-> z2 -> z3 + # Targets: t0 [t1] [t2] + # Baselines: [v0] [v1] v2 v3 + # Entropies: [e1] [e2] + # Weights: [ 1] [w1] w2 w3 + # Loss: l1 l2 + metrics = {} + # Two states are lost at the end of the trajectory, one for the boostrap + # value prediction and one because the corresponding action does not lead + # anywhere anymore. One target is lost at the start of the trajectory + # because the initial state comes from the replay buffer. + policy = self.actor(tf.stop_gradient(seq['feat'][:-2])) + if self.config.actor_grad == 'dynamics': + objective = target[1:] + elif self.config.actor_grad == 'reinforce': + baseline = self._target_critic(seq['feat'][:-2]).mode() + advantage = tf.stop_gradient(target[1:] - baseline) + objective = policy.log_prob(seq['action'][1:-1]) * advantage + elif self.config.actor_grad == 'both': + baseline = self._target_critic(seq['feat'][:-2]).mode() + advantage = tf.stop_gradient(target[1:] - baseline) + objective = policy.log_prob(seq['action'][1:-1]) * advantage + mix = common.schedule(self.config.actor_grad_mix, self.tfstep) + objective = mix * target[1:] + (1 - mix) * objective + metrics['actor_grad_mix'] = mix + else: + raise NotImplementedError(self.config.actor_grad) + ent = policy.entropy() + ent_scale = common.schedule(self.config.actor_ent, self.tfstep) + objective += ent_scale * ent + weight = tf.stop_gradient(seq['weight']) + actor_loss = -(weight[:-2] * objective).mean() + metrics['actor_ent'] = ent.mean() + metrics['actor_ent_scale'] = ent_scale + return actor_loss, metrics + + def critic_loss(self, seq, target): + # States: [z0] [z1] [z2] z3 + # Rewards: [r0] [r1] [r2] r3 + # Values: [v0] [v1] [v2] v3 + # Weights: [ 1] [w1] [w2] w3 + # Targets: [t0] [t1] [t2] + # Loss: l0 l1 l2 + dist = self.critic(seq['feat'][:-1]) + target = tf.stop_gradient(target) + weight = tf.stop_gradient(seq['weight']) + critic_loss = -(dist.log_prob(target) * weight[:-1]).mean() + metrics = {'critic': dist.mode().mean()} + return critic_loss, metrics + + def target(self, seq): + # States: [z0] [z1] [z2] [z3] + # Rewards: [r0] [r1] [r2] r3 + # Values: [v0] [v1] [v2] [v3] + # Discount: [d0] [d1] [d2] d3 + # Targets: t0 t1 t2 + reward = tf.cast(seq['reward'], tf.float32) + disc = tf.cast(seq['discount'], tf.float32) + value = self._target_critic(seq['feat']).mode() + # Skipping last time step because it is used for bootstrapping. + target = common.lambda_return( + reward[:-1], value[:-1], disc[:-1], + bootstrap=value[-1], + lambda_=self.config.discount_lambda, + axis=0) + metrics = {'critic_slow': value.mean(), 'critic_target': target.mean()} + return target, metrics + + def update_slow_target(self): + if self.config.slow_target: + if self._updates % self.config.slow_target_update == 0: + mix = 1.0 if self._updates == 0 else float( + self.config.slow_target_fraction) + for s, d in zip(self.critic.variables, self._target_critic.variables): + d.assign(mix * s + (1 - mix) * d) + self._updates.assign_add(1) diff --git a/offlinedv2/dreamerv2/api.py b/offlinedv2/dreamerv2/api.py new file mode 100644 index 0000000000000000000000000000000000000000..a9c3e4e834a938cb381c5b31b16680b9d22d3194 --- /dev/null +++ b/offlinedv2/dreamerv2/api.py @@ -0,0 +1,127 @@ +import collections +import logging +import os +import pathlib +import re +import sys +import warnings + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +logging.getLogger().setLevel('ERROR') +warnings.filterwarnings('ignore', '.*box bound precision lowered.*') + +sys.path.append(str(pathlib.Path(__file__).parent)) +sys.path.append(str(pathlib.Path(__file__).parent.parent)) + +import numpy as np +import ruamel.yaml as yaml + +from dreamerv2 import agent +from dreamerv2 import common + +from dreamerv2.common import Config +from dreamerv2.common import GymWrapper +from dreamerv2.common import RenderImage +from dreamerv2.common import TerminalOutput +from dreamerv2.common import JSONLOutput +from dreamerv2.common import TensorBoardOutput + +configs = yaml.safe_load( + (pathlib.Path(__file__).parent / 'configs.yaml').read_text()) +defaults = common.Config(configs.pop('defaults')) + + +def train(env, config, outputs=None): + logdir = pathlib.Path(config.logdir).expanduser() + logdir.mkdir(parents=True, exist_ok=True) + config.save(logdir / 'config.yaml') + print(config, '\n') + print('Logdir', logdir) + + outputs = outputs or [ + common.TerminalOutput(), + common.JSONLOutput(config.logdir), + common.TensorBoardOutput(config.logdir), + ] + replay = common.Replay(logdir / 'train_episodes', **config.replay) + step = common.Counter(replay.stats['total_steps']) + logger = common.Logger(step, outputs, multiplier=config.action_repeat) + metrics = collections.defaultdict(list) + + should_train = common.Every(config.train_every) + should_log = common.Every(config.log_every) + should_video = common.Every(config.log_every) + should_expl = common.Until(config.expl_until) + + def per_episode(ep): + length = len(ep['reward']) - 1 + score = float(ep['reward'].astype(np.float64).sum()) + print(f'Episode has {length} steps and return {score:.1f}.') + logger.scalar('return', score) + logger.scalar('length', length) + for key, value in ep.items(): + if re.match(config.log_keys_sum, key): + logger.scalar(f'sum_{key}', ep[key].sum()) + if re.match(config.log_keys_mean, key): + logger.scalar(f'mean_{key}', ep[key].mean()) + if re.match(config.log_keys_max, key): + logger.scalar(f'max_{key}', ep[key].max(0).mean()) + if should_video(step): + for key in config.log_keys_video: + logger.video(f'policy_{key}', ep[key]) + logger.add(replay.stats) + logger.write() + + env = common.GymWrapper(env) + env = common.ResizeImage(env) + if hasattr(env.act_space['action'], 'n'): + env = common.OneHotAction(env) + else: + env = common.NormalizeAction(env) + env = common.TimeLimit(env, config.time_limit) + + driver = common.Driver([env]) + driver.on_episode(per_episode) + driver.on_step(lambda tran, worker: step.increment()) + driver.on_step(replay.add_step) + driver.on_reset(replay.add_step) + + prefill = max(0, config.prefill - replay.stats['total_steps']) + if prefill: + print(f'Prefill dataset ({prefill} steps).') + random_agent = common.RandomAgent(env.act_space) + driver(random_agent, steps=prefill, episodes=1) + driver.reset() + + print('Create agent.') + agnt = agent.Agent(config, env.obs_space, env.act_space, step) + dataset = iter(replay.dataset(**config.dataset)) + train_agent = common.CarryOverState(agnt.train) + train_agent(next(dataset)) + if (logdir / 'variables.pkl').exists(): + agnt.load(logdir / 'variables.pkl') + else: + print('Pretrain agent.') + for _ in range(config.pretrain): + train_agent(next(dataset)) + policy = lambda *args: agnt.policy( + *args, mode='explore' if should_expl(step) else 'train') + + def train_step(tran, worker): + if should_train(step): + for _ in range(config.train_steps): + mets = train_agent(next(dataset)) + [metrics[key].append(value) for key, value in mets.items()] + if should_log(step): + for name, values in metrics.items(): + logger.scalar(name, np.array(values, np.float64).mean()) + metrics[name].clear() + logger.add(agnt.report(next(dataset))) + logger.write(fps=True) + + driver.on_step(train_step) + + while step < config.steps: + logger.write() + driver(policy, steps=config.eval_every) + agnt.save(logdir / 'variables.pkl') diff --git a/offlinedv2/dreamerv2/common/__init__.py b/offlinedv2/dreamerv2/common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7c6f81d4602aa5e72082c5ec92ea8d30e539d44b --- /dev/null +++ b/offlinedv2/dreamerv2/common/__init__.py @@ -0,0 +1,17 @@ +# General tools. +from .config import * +from .counter import * +from .flags import * +from .logger import * +from .when import * + +# RL tools. +from .other import * +from .driver import * +from .envs import * +from .replay import * + +# TensorFlow tools. +from .tfutils import * +from .dists import * +from .nets import * diff --git a/offlinedv2/dreamerv2/common/config.py b/offlinedv2/dreamerv2/common/config.py new file mode 100644 index 0000000000000000000000000000000000000000..7a484125efb67edae123cdc58bcdd37bb3d3fac5 --- /dev/null +++ b/offlinedv2/dreamerv2/common/config.py @@ -0,0 +1,188 @@ +import json +import pathlib +import re + + +class Config(dict): + SEP = '.' + IS_PATTERN = re.compile(r'.*[^A-Za-z0-9_.-].*') + + def __init__(self, *args, **kwargs): + mapping = dict(*args, **kwargs) + mapping = self._flatten(mapping) + mapping = self._ensure_keys(mapping) + mapping = self._ensure_values(mapping) + self._flat = mapping + self._nested = self._nest(mapping) + # Need to assign the values to the base class dictionary so that + # conversion to dict does not lose the content. + super().__init__(self._nested) + + @property + def flat(self): + return self._flat.copy() + + def save(self, filename): + filename = pathlib.Path(filename) + if filename.suffix == '.json': + filename.write_text(json.dumps(dict(self))) + elif filename.suffix in ('.yml', '.yaml'): + import ruamel.yaml as yaml + with filename.open('w') as f: + yaml.safe_dump(dict(self), f) + else: + raise NotImplementedError(filename.suffix) + + @classmethod + def load(cls, filename): + filename = pathlib.Path(filename) + if filename.suffix == '.json': + return cls(json.loads(filename.read_text())) + elif filename.suffix in ('.yml', '.yaml'): + import ruamel.yaml as yaml + return cls(yaml.safe_load(filename.read_text())) + else: + raise NotImplementedError(filename.suffix) + + def parse_flags(self, argv=None, known_only=False, help_exists=None): + from . import flags + return flags.Flags(self).parse(argv, known_only, help_exists) + + def __contains__(self, name): + try: + self[name] + return True + except KeyError: + return False + + def __getattr__(self, name): + if name.startswith('_'): + return super().__getattr__(name) + try: + return self[name] + except KeyError: + raise AttributeError(name) + + def __getitem__(self, name): + result = self._nested + for part in name.split(self.SEP): + result = result[part] + if isinstance(result, dict): + result = type(self)(result) + return result + + def __setattr__(self, key, value): + if key.startswith('_'): + return super().__setattr__(key, value) + message = f"Tried to set key '{key}' on immutable config. Use update()." + raise AttributeError(message) + + def __setitem__(self, key, value): + if key.startswith('_'): + return super().__setitem__(key, value) + message = f"Tried to set key '{key}' on immutable config. Use update()." + raise AttributeError(message) + + def __reduce__(self): + return (type(self), (dict(self),)) + + def __str__(self): + lines = ['\nConfig:'] + keys, vals, typs = [], [], [] + for key, val in self.flat.items(): + keys.append(key + ':') + vals.append(self._format_value(val)) + typs.append(self._format_type(val)) + max_key = max(len(k) for k in keys) if keys else 0 + max_val = max(len(v) for v in vals) if vals else 0 + for key, val, typ in zip(keys, vals, typs): + key = key.ljust(max_key) + val = val.ljust(max_val) + lines.append(f'{key} {val} ({typ})') + return '\n'.join(lines) + + def update(self, *args, **kwargs): + result = self._flat.copy() + inputs = self._flatten(dict(*args, **kwargs)) + for key, new in inputs.items(): + if self.IS_PATTERN.match(key): + pattern = re.compile(key) + keys = {k for k in result if pattern.match(k)} + else: + keys = [key] + if not keys: + raise KeyError(f'Unknown key or pattern {key}.') + for key in keys: + old = result[key] + try: + if isinstance(old, int) and isinstance(new, float): + if float(int(new)) != new: + message = f"Cannot convert fractional float {new} to int." + raise ValueError(message) + result[key] = type(old)(new) + except (ValueError, TypeError): + raise TypeError( + f"Cannot convert '{new}' to type '{type(old).__name__}' " + + f"of value '{old}' for key '{key}'.") + return type(self)(result) + + def _flatten(self, mapping): + result = {} + for key, value in mapping.items(): + if isinstance(value, dict): + for k, v in self._flatten(value).items(): + if self.IS_PATTERN.match(key) or self.IS_PATTERN.match(k): + combined = f'{key}\\{self.SEP}{k}' + else: + combined = f'{key}{self.SEP}{k}' + result[combined] = v + else: + result[key] = value + return result + + def _nest(self, mapping): + result = {} + for key, value in mapping.items(): + parts = key.split(self.SEP) + node = result + for part in parts[:-1]: + if part not in node: + node[part] = {} + node = node[part] + node[parts[-1]] = value + return result + + def _ensure_keys(self, mapping): + for key in mapping: + assert not self.IS_PATTERN.match(key), key + return mapping + + def _ensure_values(self, mapping): + result = json.loads(json.dumps(mapping)) + for key, value in result.items(): + if isinstance(value, list): + value = tuple(value) + if isinstance(value, tuple): + if len(value) == 0: + message = 'Empty lists are disallowed because their type is unclear.' + raise TypeError(message) + if not isinstance(value[0], (str, float, int, bool)): + message = 'Lists can only contain strings, floats, ints, bools' + message += f' but not {type(value[0])}' + raise TypeError(message) + if not all(isinstance(x, type(value[0])) for x in value[1:]): + message = 'Elements of a list must all be of the same type.' + raise TypeError(message) + result[key] = value + return result + + def _format_value(self, value): + if isinstance(value, (list, tuple)): + return '[' + ', '.join(self._format_value(x) for x in value) + ']' + return str(value) + + def _format_type(self, value): + if isinstance(value, (list, tuple)): + assert len(value) > 0, value + return self._format_type(value[0]) + 's' + return str(type(value).__name__) diff --git a/offlinedv2/dreamerv2/common/counter.py b/offlinedv2/dreamerv2/common/counter.py new file mode 100644 index 0000000000000000000000000000000000000000..ba5507df807f011fe0d3aa1248c00b412b02dad2 --- /dev/null +++ b/offlinedv2/dreamerv2/common/counter.py @@ -0,0 +1,28 @@ +import functools + + +@functools.total_ordering +class Counter: + def __init__(self, initial=0): + self.value = initial + + def __int__(self): + return int(self.value) + + def __eq__(self, other): + return int(self) == other + + def __ne__(self, other): + return int(self) != other + + def __lt__(self, other): + return int(self) < other + + def __add__(self, other): + return int(self) + other + + def __mod__(self, other): + return int(self) % other + + def increment(self, amount=1): + self.value += amount diff --git a/offlinedv2/dreamerv2/common/dists.py b/offlinedv2/dreamerv2/common/dists.py new file mode 100644 index 0000000000000000000000000000000000000000..10a501329314a1a05b1179cd7f7b36fbb4a319cf --- /dev/null +++ b/offlinedv2/dreamerv2/common/dists.py @@ -0,0 +1,120 @@ +import tensorflow as tf +import tensorflow_probability as tfp +from tensorflow_probability import distributions as tfd + +# Patch to ignore seed to avoid synchronization across GPUs. +_orig_random_categorical = tf.random.categorical + + +def random_categorical(*args, **kwargs): + kwargs['seed'] = None + return _orig_random_categorical(*args, **kwargs) + + +tf.random.categorical = random_categorical + +# Patch to ignore seed to avoid synchronization across GPUs. +_orig_random_normal = tf.random.normal + + +def random_normal(*args, **kwargs): + kwargs['seed'] = None + return _orig_random_normal(*args, **kwargs) + + +tf.random.normal = random_normal + + +class SampleDist: + + def __init__(self, dist, samples=100): + self._dist = dist + self._samples = samples + + @property + def name(self): + return 'SampleDist' + + def __getattr__(self, name): + return getattr(self._dist, name) + + def mean(self): + samples = self._dist.sample(self._samples) + return samples.mean(0) + + def mode(self): + sample = self._dist.sample(self._samples) + logprob = self._dist.log_prob(sample) + return tf.gather(sample, tf.argmax(logprob))[0] + + def entropy(self): + sample = self._dist.sample(self._samples) + logprob = self.log_prob(sample) + return -logprob.mean(0) + + +class OneHotDist(tfd.OneHotCategorical): + + def __init__(self, logits=None, probs=None, dtype=None): + self._sample_dtype = dtype or tf.float32 + super().__init__(logits=logits, probs=probs) + + def mode(self): + return tf.cast(super().mode(), self._sample_dtype) + + def sample(self, sample_shape=(), seed=None): + # Straight through biased gradient estimator. + sample = tf.cast(super().sample(sample_shape, seed), self._sample_dtype) + probs = self._pad(super().probs_parameter(), sample.shape) + sample += tf.cast(probs - tf.stop_gradient(probs), self._sample_dtype) + return sample + + def _pad(self, tensor, shape): + tensor = super().probs_parameter() + while len(tensor.shape) < len(shape): + tensor = tensor[None] + return tensor + + +class TruncNormalDist(tfd.TruncatedNormal): + + def __init__(self, loc, scale, low, high, clip=1e-6, mult=1): + super().__init__(loc, scale, low, high) + self._clip = clip + self._mult = mult + + def sample(self, *args, **kwargs): + event = super().sample(*args, **kwargs) + if self._clip: + clipped = tf.clip_by_value( + event, self.low + self._clip, self.high - self._clip) + event = event - tf.stop_gradient(event) + tf.stop_gradient(clipped) + if self._mult: + event *= self._mult + return event + + +class TanhBijector(tfp.bijectors.Bijector): + + def __init__(self, validate_args=False, name='tanh'): + super().__init__( + forward_min_event_ndims=0, + validate_args=validate_args, + name=name) + + def _forward(self, x): + return tf.nn.tanh(x) + + def _inverse(self, y): + dtype = y.dtype + y = tf.cast(y, tf.float32) + y = tf.where( + tf.less_equal(tf.abs(y), 1.), + tf.clip_by_value(y, -0.99999997, 0.99999997), y) + y = tf.atanh(y) + y = tf.cast(y, dtype) + return y + + def _forward_log_det_jacobian(self, x): + log2 = tf.math.log(tf.constant(2.0, dtype=x.dtype)) + return 2.0 * (log2 - x - tf.nn.softplus(-2.0 * x)) diff --git a/offlinedv2/dreamerv2/common/driver.py b/offlinedv2/dreamerv2/common/driver.py new file mode 100644 index 0000000000000000000000000000000000000000..91819f2a294e182857747ebc8b74b51ac8a16075 --- /dev/null +++ b/offlinedv2/dreamerv2/common/driver.py @@ -0,0 +1,138 @@ +import numpy as np + + +class Driver: + + def __init__(self, envs, **kwargs): + self._envs = envs + self._kwargs = kwargs + self._on_steps = [] + self._on_resets = [] + self._on_episodes = [] + self._act_spaces = [env.act_space for env in envs] + self.reset() + + def on_step(self, callback): + self._on_steps.append(callback) + + def on_reset(self, callback): + self._on_resets.append(callback) + + def on_episode(self, callback): + self._on_episodes.append(callback) + + def reset(self): + self._obs = [None] * len(self._envs) + self._eps = [None] * len(self._envs) + self._state = None + + def __call__(self, policy, steps=0, episodes=0): + step, episode = 0, 0 + while step < steps or episode < episodes: + obs = { + i: self._envs[i].reset() + for i, ob in enumerate(self._obs) if ob is None or ob['is_last']} + for i, ob in obs.items(): + self._obs[i] = ob() if callable(ob) else ob + act = {k: np.zeros(v.shape) for k, v in self._act_spaces[i].items()} + tran = {k: self._convert(v) for k, v in {**ob, **act}.items()} + [fn(tran, worker=i, **self._kwargs) for fn in self._on_resets] + self._eps[i] = [tran] + obs = {k: np.stack([o[k] for o in self._obs]) for k in self._obs[0]} + actions, self._state = policy(obs, self._state, **self._kwargs) + actions = [ + {k: np.array(actions[k][i]) for k in actions} + for i in range(len(self._envs))] + assert len(actions) == len(self._envs) + obs = [e.step(a) for e, a in zip(self._envs, actions)] + obs = [ob() if callable(ob) else ob for ob in obs] + for i, (act, ob) in enumerate(zip(actions, obs)): + tran = {k: self._convert(v) for k, v in {**ob, **act}.items()} + [fn(tran, worker=i, **self._kwargs) for fn in self._on_steps] + self._eps[i].append(tran) + step += 1 + if ob['is_last']: + ep = self._eps[i] + ep = {k: self._convert([t[k] for t in ep]) for k in ep[0]} + [fn(ep, **self._kwargs) for fn in self._on_episodes] + episode += 1 + self._obs = obs + + def _convert(self, value): + value = np.array(value) + if np.issubdtype(value.dtype, np.floating): + return value.astype(np.float32) + elif np.issubdtype(value.dtype, np.signedinteger): + return value.astype(np.int32) + elif np.issubdtype(value.dtype, np.uint8): + return value.astype(np.uint8) + return value + + +class MultiEnvDriver: + + def __init__(self, envs, modes, **kwargs): + self._envs = envs + self._kwargs = kwargs + self._on_steps = [] + self._on_resets = [] + self._on_episodes = [] + self._act_spaces = [env.act_space for env in envs] + self.reset() + self.modes = modes + + def on_step(self, callback): + self._on_steps.append(callback) + + def on_reset(self, callback): + self._on_resets.append(callback) + + def on_episode(self, callback): + self._on_episodes.append(callback) + + def reset(self): + self._obs = [None] * len(self._envs) + self._eps = [None] * len(self._envs) + self._state = None + + def __call__(self, policy, steps=0, episodes=0): + step, episode = 0, 0 + while step < steps or episode < episodes: + obs = { + i: self._envs[i].reset() + for i, ob in enumerate(self._obs) if ob is None or ob['is_last']} + for i, ob in obs.items(): + self._obs[i] = ob() if callable(ob) else ob + act = {k: np.zeros(v.shape) for k, v in self._act_spaces[i].items()} + tran = {k: self._convert(v) for k, v in {**ob, **act}.items()} + [fn(tran, worker=i, **self._kwargs) for fn in self._on_resets] + self._eps[i] = [tran] + obs = {k: np.stack([o[k] for o in self._obs]) for k in self._obs[0]} + actions, self._state = policy(obs, self._state, **self._kwargs) + actions = [ + {k: np.array(actions[k][i]) for k in actions} + for i in range(len(self._envs))] + assert len(actions) == len(self._envs) + obs = [e.step(a) for e, a in zip(self._envs, actions)] + obs = [ob() if callable(ob) else ob for ob in obs] + for i, (act, ob) in enumerate(zip(actions, obs)): + tran = {k: self._convert(v) for k, v in {**ob, **act}.items()} + [fn(tran, worker=i, **self._kwargs) for fn in self._on_steps] + self._eps[i].append(tran) + step += 1 + if ob['is_last']: + ep = self._eps[i] + ep = {k: self._convert([t[k] for t in ep]) for k in ep[0]} + [fn(ep, self.modes[i], **self._kwargs) for fn in self._on_episodes] + episode += 1 + self._obs = obs + + def _convert(self, value): + value = np.array(value) + if np.issubdtype(value.dtype, np.floating): + return value.astype(np.float32) + elif np.issubdtype(value.dtype, np.signedinteger): + return value.astype(np.int32) + elif np.issubdtype(value.dtype, np.uint8): + return value.astype(np.uint8) + return value diff --git a/offlinedv2/dreamerv2/common/envs.py b/offlinedv2/dreamerv2/common/envs.py new file mode 100644 index 0000000000000000000000000000000000000000..e01845fd0a90557e23797eaaeb2a5d1e9f4fbaa6 --- /dev/null +++ b/offlinedv2/dreamerv2/common/envs.py @@ -0,0 +1,815 @@ +import atexit +import os +import sys +import threading +import traceback + +import cloudpickle +import gym +import numpy as np + + +class GymWrapper: + + def __init__(self, env, obs_key='image', act_key='action'): + self._env = env + self._obs_is_dict = hasattr(self._env.observation_space, 'spaces') + self._act_is_dict = hasattr(self._env.action_space, 'spaces') + self._obs_key = obs_key + self._act_key = act_key + + def __getattr__(self, name): + if name.startswith('__'): + raise AttributeError(name) + try: + return getattr(self._env, name) + except AttributeError: + raise ValueError(name) + + @property + def obs_space(self): + if self._obs_is_dict: + spaces = self._env.observation_space.spaces.copy() + else: + spaces = {self._obs_key: self._env.observation_space} + return { + **spaces, + 'reward': gym.spaces.Box(-np.inf, np.inf, (), dtype=np.float32), + 'is_first': gym.spaces.Box(0, 1, (), dtype=np.bool), + 'is_last': gym.spaces.Box(0, 1, (), dtype=np.bool), + 'is_terminal': gym.spaces.Box(0, 1, (), dtype=np.bool), + } + + @property + def act_space(self): + if self._act_is_dict: + return self._env.action_space.spaces.copy() + else: + return {self._act_key: self._env.action_space} + + def step(self, action): + if not self._act_is_dict: + action = action[self._act_key] + obs, reward, done, info = self._env.step(action) + if not self._obs_is_dict: + obs = {self._obs_key: obs} + obs['reward'] = float(reward) + obs['is_first'] = False + obs['is_last'] = done + obs['is_terminal'] = info.get('is_terminal', done) + return obs + + def reset(self): + obs = self._env.reset() + if not self._obs_is_dict: + obs = {self._obs_key: obs} + obs['reward'] = 0.0 + obs['is_first'] = True + obs['is_last'] = False + obs['is_terminal'] = False + return obs + + +class DMC: + def __init__(self, name, action_repeat=1, size=(64, 64), camera=None, **kwargs): + os.environ['MUJOCO_GL'] = 'egl' + domain, task = name.split('_', 1) + if domain == 'cup': # Only domain with multiple words. + domain = 'ball_in_cup' + if domain == 'manip': + from dm_control import manipulation + self._env = manipulation.load(task + '_vision') + elif domain == 'locom': + from dm_control.locomotion.examples import basic_rodent_2020 + self._env = getattr(basic_rodent_2020, task)() + else: + from dm_control import suite + self._env = suite.load(domain, task, **kwargs) + self._action_repeat = action_repeat + self._size = size + if camera in (-1, None): + camera = dict( + quadruped_walk=2, quadruped_run=2, quadruped_escape=2, + quadruped_fetch=2, locom_rodent_maze_forage=1, + locom_rodent_two_touch=1, + ).get(name, 0) + self._camera = camera + self._ignored_keys = ['orientations', 'height', 'velocity', 'pixels'] + for key, value in self._env.observation_spec().items(): + if value.shape == (0,): + print(f"Ignoring empty observation key '{key}'.") + self._ignored_keys.append(key) + + @property + def obs_space(self): + spaces = { + 'image': gym.spaces.Box(0, 255, self._size + (3,), dtype=np.uint8), + 'reward': gym.spaces.Box(-np.inf, np.inf, (), dtype=np.float32), + 'is_first': gym.spaces.Box(0, 1, (), dtype=np.bool), + 'is_last': gym.spaces.Box(0, 1, (), dtype=np.bool), + 'is_terminal': gym.spaces.Box(0, 1, (), dtype=np.bool), + } + for key, value in self._env.observation_spec().items(): + if key in self._ignored_keys: + continue + if value.dtype == np.float64: + spaces[key] = gym.spaces.Box(-np.inf, np.inf, value.shape, np.float32) + elif value.dtype == np.uint8: + spaces[key] = gym.spaces.Box(0, 255, value.shape, np.uint8) + else: + raise NotImplementedError(value.dtype) + return spaces + + @property + def act_space(self): + spec = self._env.action_spec() + action = gym.spaces.Box(spec.minimum, spec.maximum, dtype=np.float32) + return {'action': action} + + def step(self, action): + assert np.isfinite(action['action']).all(), action['action'] + reward = 0.0 + for _ in range(self._action_repeat): + time_step = self._env.step(action['action']) + reward += time_step.reward or 0.0 + if time_step.last(): + break + assert time_step.discount in (0, 1) + obs = { + 'reward': reward, + 'is_first': False, + 'is_last': time_step.last(), + 'is_terminal': time_step.discount == 0, + 'image': self._env.physics.render(*self._size, camera_id=self._camera), + } + obs.update({ + k: v for k, v in dict(time_step.observation).items() + if k not in self._ignored_keys}) + return obs + + def reset(self): + time_step = self._env.reset() + obs = { + 'reward': 0.0, + 'is_first': True, + 'is_last': False, + 'is_terminal': False, + 'image': self._env.physics.render(*self._size, camera_id=self._camera), + } + obs.update({ + k: v for k, v in dict(time_step.observation).items() + if k not in self._ignored_keys}) + return obs + + +class Atari: + LOCK = threading.Lock() + + def __init__( + self, name, action_repeat=4, size=(84, 84), grayscale=True, noops=30, + life_done=False, sticky=True, all_actions=False): + assert size[0] == size[1] + import gym.wrappers + import gym.envs.atari + if name == 'james_bond': + name = 'jamesbond' + with self.LOCK: + env = gym.envs.atari.AtariEnv( + game=name, obs_type='image', frameskip=1, + repeat_action_probability=0.25 if sticky else 0.0, + full_action_space=all_actions) + # Avoid unnecessary rendering in inner env. + env._get_obs = lambda: None + # Tell wrapper that the inner env has no action repeat. + env.spec = gym.envs.registration.EnvSpec('NoFrameskip-v0') + self._env = gym.wrappers.AtariPreprocessing( + env, noops, action_repeat, size[0], life_done, grayscale) + self._size = size + self._grayscale = grayscale + + @property + def obs_space(self): + shape = self._size + (1 if self._grayscale else 3,) + return { + 'image': gym.spaces.Box(0, 255, shape, np.uint8), + 'ram': gym.spaces.Box(0, 255, (128,), np.uint8), + 'reward': gym.spaces.Box(-np.inf, np.inf, (), dtype=np.float32), + 'is_first': gym.spaces.Box(0, 1, (), dtype=np.bool), + 'is_last': gym.spaces.Box(0, 1, (), dtype=np.bool), + 'is_terminal': gym.spaces.Box(0, 1, (), dtype=np.bool), + } + + @property + def act_space(self): + return {'action': self._env.action_space} + + def step(self, action): + image, reward, done, info = self._env.step(action['action']) + if self._grayscale: + image = image[..., None] + return { + 'image': image, + 'ram': self._env.env._get_ram(), + 'reward': reward, + 'is_first': False, + 'is_last': done, + 'is_terminal': done, + } + + def reset(self): + with self.LOCK: + image = self._env.reset() + if self._grayscale: + image = image[..., None] + return { + 'image': image, + 'ram': self._env.env._get_ram(), + 'reward': 0.0, + 'is_first': True, + 'is_last': False, + 'is_terminal': False, + } + + def close(self): + return self._env.close() + + +class Crafter: + + def __init__(self, outdir=None, reward=True, seed=None): + import crafter + self._env = crafter.Env(reward=reward, seed=seed) + self._env = crafter.Recorder( + self._env, outdir, + save_stats=True, + save_video=False, + save_episode=False, + ) + self._achievements = crafter.constants.achievements.copy() + + @property + def obs_space(self): + spaces = { + 'image': self._env.observation_space, + 'reward': gym.spaces.Box(-np.inf, np.inf, (), dtype=np.float32), + 'is_first': gym.spaces.Box(0, 1, (), dtype=np.bool), + 'is_last': gym.spaces.Box(0, 1, (), dtype=np.bool), + 'is_terminal': gym.spaces.Box(0, 1, (), dtype=np.bool), + 'log_reward': gym.spaces.Box(-np.inf, np.inf, (), np.float32), + } + spaces.update({ + f'log_achievement_{k}': gym.spaces.Box(0, 2 ** 31 - 1, (), np.int32) + for k in self._achievements}) + return spaces + + @property + def act_space(self): + return {'action': self._env.action_space} + + def step(self, action): + image, reward, done, info = self._env.step(action['action']) + obs = { + 'image': image, + 'reward': reward, + 'is_first': False, + 'is_last': done, + 'is_terminal': info['discount'] == 0, + 'log_reward': info['reward'], + } + obs.update({ + f'log_achievement_{k}': v + for k, v in info['achievements'].items()}) + return obs + + def reset(self): + obs = { + 'image': self._env.reset(), + 'reward': 0.0, + 'is_first': True, + 'is_last': False, + 'is_terminal': False, + 'log_reward': 0.0, + } + obs.update({ + f'log_achievement_{k}': 0 + for k in self._achievements}) + return obs + + +class Dummy: + + def __init__(self): + pass + + @property + def obs_space(self): + return { + 'image': gym.spaces.Box(0, 255, (64, 64, 3), dtype=np.uint8), + 'reward': gym.spaces.Box(-np.inf, np.inf, (), dtype=np.float32), + 'is_first': gym.spaces.Box(0, 1, (), dtype=np.bool), + 'is_last': gym.spaces.Box(0, 1, (), dtype=np.bool), + 'is_terminal': gym.spaces.Box(0, 1, (), dtype=np.bool), + } + + @property + def act_space(self): + return {'action': gym.spaces.Box(-1, 1, (6,), dtype=np.float32)} + + def step(self, action): + return { + 'image': np.zeros((64, 64, 3)), + 'reward': 0.0, + 'is_first': False, + 'is_last': False, + 'is_terminal': False, + } + + def reset(self): + return { + 'image': np.zeros((64, 64, 3)), + 'reward': 0.0, + 'is_first': True, + 'is_last': False, + 'is_terminal': False, + } + + +class TimeLimit: + + def __init__(self, env, duration): + self._env = env + self._duration = duration + self._step = None + + def __getattr__(self, name): + if name.startswith('__'): + raise AttributeError(name) + try: + return getattr(self._env, name) + except AttributeError: + raise ValueError(name) + + def step(self, action): + assert self._step is not None, 'Must reset environment.' + obs = self._env.step(action) + self._step += 1 + if self._duration and self._step >= self._duration: + obs['is_last'] = True + self._step = None + return obs + + def reset(self): + self._step = 0 + return self._env.reset() + + +class NormalizeAction: + + def __init__(self, env, key='action'): + self._env = env + self._key = key + space = env.act_space[key] + self._mask = np.isfinite(space.low) & np.isfinite(space.high) + self._low = np.where(self._mask, space.low, -1) + self._high = np.where(self._mask, space.high, 1) + + def __getattr__(self, name): + if name.startswith('__'): + raise AttributeError(name) + try: + return getattr(self._env, name) + except AttributeError: + raise ValueError(name) + + @property + def act_space(self): + low = np.where(self._mask, -np.ones_like(self._low), self._low) + high = np.where(self._mask, np.ones_like(self._low), self._high) + space = gym.spaces.Box(low, high, dtype=np.float32) + return {**self._env.act_space, self._key: space} + + def step(self, action): + orig = (action[self._key] + 1) / 2 * (self._high - self._low) + self._low + orig = np.where(self._mask, orig, action[self._key]) + return self._env.step({**action, self._key: orig}) + + +class OneHotAction: + + def __init__(self, env, key='action'): + assert hasattr(env.act_space[key], 'n') + self._env = env + self._key = key + self._random = np.random.RandomState() + + def __getattr__(self, name): + if name.startswith('__'): + raise AttributeError(name) + try: + return getattr(self._env, name) + except AttributeError: + raise ValueError(name) + + @property + def act_space(self): + shape = (self._env.act_space[self._key].n,) + space = gym.spaces.Box(low=0, high=1, shape=shape, dtype=np.float32) + space.sample = self._sample_action + space.n = shape[0] + return {**self._env.act_space, self._key: space} + + def step(self, action): + index = np.argmax(action[self._key]).astype(int) + reference = np.zeros_like(action[self._key]) + reference[index] = 1 + if not np.allclose(reference, action[self._key]): + raise ValueError(f'Invalid one-hot action:\n{action}') + return self._env.step({**action, self._key: index}) + + def reset(self): + return self._env.reset() + + def _sample_action(self): + actions = self._env.act_space.n + index = self._random.randint(0, actions) + reference = np.zeros(actions, dtype=np.float32) + reference[index] = 1.0 + return reference + + +class ResizeImage: + + def __init__(self, env, size=(64, 64)): + self._env = env + self._size = size + self._keys = [ + k for k, v in env.obs_space.items() + if len(v.shape) > 1 and v.shape[:2] != size] + print(f'Resizing keys {",".join(self._keys)} to {self._size}.') + if self._keys: + from PIL import Image + self._Image = Image + + def __getattr__(self, name): + if name.startswith('__'): + raise AttributeError(name) + try: + return getattr(self._env, name) + except AttributeError: + raise ValueError(name) + + @property + def obs_space(self): + spaces = self._env.obs_space + for key in self._keys: + shape = self._size + spaces[key].shape[2:] + spaces[key] = gym.spaces.Box(0, 255, shape, np.uint8) + return spaces + + def step(self, action): + obs = self._env.step(action) + for key in self._keys: + obs[key] = self._resize(obs[key]) + return obs + + def reset(self): + obs = self._env.reset() + for key in self._keys: + obs[key] = self._resize(obs[key]) + return obs + + def _resize(self, image): + image = self._Image.fromarray(image) + image = image.resize(self._size, self._Image.NEAREST) + image = np.array(image) + return image + + +class RenderImage: + + def __init__(self, env, key='image'): + self._env = env + self._key = key + self._shape = self._env.render().shape + + def __getattr__(self, name): + if name.startswith('__'): + raise AttributeError(name) + try: + return getattr(self._env, name) + except AttributeError: + raise ValueError(name) + + @property + def obs_space(self): + spaces = self._env.obs_space + spaces[self._key] = gym.spaces.Box(0, 255, self._shape, np.uint8) + return spaces + + def step(self, action): + obs = self._env.step(action) + obs[self._key] = self._env.render('rgb_array') + return obs + + def reset(self): + obs = self._env.reset() + obs[self._key] = self._env.render('rgb_array') + return obs + + +class Async: + # Message types for communication via the pipe. + _ACCESS = 1 + _CALL = 2 + _RESULT = 3 + _CLOSE = 4 + _EXCEPTION = 5 + + def __init__(self, constructor, strategy='thread'): + self._pickled_ctor = cloudpickle.dumps(constructor) + if strategy == 'process': + import multiprocessing as mp + context = mp.get_context('spawn') + elif strategy == 'thread': + import multiprocessing.dummy as context + else: + raise NotImplementedError(strategy) + self._strategy = strategy + self._conn, conn = context.Pipe() + self._process = context.Process(target=self._worker, args=(conn,)) + atexit.register(self.close) + self._process.start() + self._receive() # Ready. + self._obs_space = None + self._act_space = None + + def access(self, name): + self._conn.send((self._ACCESS, name)) + return self._receive + + def call(self, name, *args, **kwargs): + payload = name, args, kwargs + self._conn.send((self._CALL, payload)) + return self._receive + + def close(self): + try: + self._conn.send((self._CLOSE, None)) + self._conn.close() + except IOError: + pass # The connection was already closed. + self._process.join(5) + + @property + def obs_space(self): + if not self._obs_space: + self._obs_space = self.access('obs_space')() + return self._obs_space + + @property + def act_space(self): + if not self._act_space: + self._act_space = self.access('act_space')() + return self._act_space + + def step(self, action, blocking=False): + promise = self.call('step', action) + if blocking: + return promise() + else: + return promise + + def reset(self, blocking=False): + promise = self.call('reset') + if blocking: + return promise() + else: + return promise + + def _receive(self): + try: + message, payload = self._conn.recv() + except (OSError, EOFError): + raise RuntimeError('Lost connection to environment worker.') + # Re-raise exceptions in the main process. + if message == self._EXCEPTION: + stacktrace = payload + raise Exception(stacktrace) + if message == self._RESULT: + return payload + raise KeyError('Received message of unexpected type {}'.format(message)) + + def _worker(self, conn): + try: + ctor = cloudpickle.loads(self._pickled_ctor) + env = ctor() + conn.send((self._RESULT, None)) # Ready. + while True: + try: + # Only block for short times to have keyboard exceptions be raised. + if not conn.poll(0.1): + continue + message, payload = conn.recv() + except (EOFError, KeyboardInterrupt): + break + if message == self._ACCESS: + name = payload + result = getattr(env, name) + conn.send((self._RESULT, result)) + continue + if message == self._CALL: + name, args, kwargs = payload + result = getattr(env, name)(*args, **kwargs) + conn.send((self._RESULT, result)) + continue + if message == self._CLOSE: + break + raise KeyError('Received message of unknown type {}'.format(message)) + except Exception: + stacktrace = ''.join(traceback.format_exception(*sys.exc_info())) + print('Error in environment process: {}'.format(stacktrace)) + conn.send((self._EXCEPTION, stacktrace)) + finally: + try: + conn.close() + except IOError: + pass # The connection was already closed. + + +class DMCMultitask: + def __init__(self, name, action_repeat=1, size=(64, 64), camera=None): + os.environ['MUJOCO_GL'] = 'egl' + domain, task, xml = name.split('_', 2) + + import envs.fb_mtenv_dmc as fb_mtenv_dmc + self._env = fb_mtenv_dmc.load( + domain_name=domain, + task_name=task, + task_kwargs={'xml_file_id': xml}, + ) + + self._action_repeat = action_repeat + self._size = size + if camera in (-1, None): + camera = dict( + quadruped_walk=2, quadruped_run=2, quadruped_escape=2, + quadruped_fetch=2, locom_rodent_maze_forage=1, + locom_rodent_two_touch=1, + ).get(name, 0) + self._camera = camera + self._ignored_keys = ['orientations', 'height', 'velocity', 'pixels'] + for key, value in self._env.observation_spec().items(): + if value.shape == (0,): + print(f"Ignoring empty observation key '{key}'.") + self._ignored_keys.append(key) + + @property + def obs_space(self): + spaces = { + 'image': gym.spaces.Box(0, 255, self._size + (3,), dtype=np.uint8), + 'reward': gym.spaces.Box(-np.inf, np.inf, (), dtype=np.float32), + 'is_first': gym.spaces.Box(0, 1, (), dtype=np.bool), + 'is_last': gym.spaces.Box(0, 1, (), dtype=np.bool), + 'is_terminal': gym.spaces.Box(0, 1, (), dtype=np.bool), + } + for key, value in self._env.observation_spec().items(): + if key in self._ignored_keys: + continue + if value.dtype == np.float64: + spaces[key] = gym.spaces.Box(-np.inf, np.inf, value.shape, np.float32) + elif value.dtype == np.uint8: + spaces[key] = gym.spaces.Box(0, 255, value.shape, np.uint8) + else: + raise NotImplementedError(value.dtype) + return spaces + + @property + def act_space(self): + spec = self._env.action_spec() + action = gym.spaces.Box(spec.minimum, spec.maximum, dtype=np.float32) + return {'action': action} + + def step(self, action): + assert np.isfinite(action['action']).all(), action['action'] + reward = 0.0 + for _ in range(self._action_repeat): + time_step = self._env.step(action['action']) + reward += time_step.reward or 0.0 + if time_step.last(): + break + assert time_step.discount in (0, 1) + obs = { + 'reward': reward, + 'is_first': False, + 'is_last': time_step.last(), + 'is_terminal': time_step.discount == 0, + 'image': self._env.physics.render(*self._size, camera_id=self._camera), + } + obs.update({ + k: v for k, v in dict(time_step.observation).items() + if k not in self._ignored_keys}) + return obs + + def reset(self): + time_step = self._env.reset() + obs = { + 'reward': 0.0, + 'is_first': True, + 'is_last': False, + 'is_terminal': False, + 'image': self._env.physics.render(*self._size, camera_id=self._camera), + } + obs.update({ + k: v for k, v in dict(time_step.observation).items() + if k not in self._ignored_keys}) + return obs + + +class DistractingDMC: + def __init__(self, name, action_repeat=1, size=(64, 64), camera=None, **kwargs): + os.environ['MUJOCO_GL'] = 'egl' + domain, task, difficulty = name.split('_', 2) + + from envs.distracting_control import suite as dsuite + self._env = dsuite.load( + domain_name=domain, + task_name=task, + difficulty=difficulty, + **kwargs + ) + + self._action_repeat = action_repeat + self._size = size + if camera in (-1, None): + camera = dict( + quadruped_walk=2, quadruped_run=2, quadruped_escape=2, + quadruped_fetch=2, locom_rodent_maze_forage=1, + locom_rodent_two_touch=1, + ).get(name, 0) + self._camera = camera + self._ignored_keys = [] + for key, value in self._env.observation_spec().items(): + if value.shape == (0,): + print(f"Ignoring empty observation key '{key}'.") + self._ignored_keys.append(key) + + @property + def obs_space(self): + spaces = { + 'image': gym.spaces.Box(0, 255, self._size + (3,), dtype=np.uint8), + 'reward': gym.spaces.Box(-np.inf, np.inf, (), dtype=np.float32), + 'is_first': gym.spaces.Box(0, 1, (), dtype=np.bool), + 'is_last': gym.spaces.Box(0, 1, (), dtype=np.bool), + 'is_terminal': gym.spaces.Box(0, 1, (), dtype=np.bool), + } + for key, value in self._env.observation_spec().items(): + if key in self._ignored_keys: + continue + if value.dtype == np.float64: + spaces[key] = gym.spaces.Box(-np.inf, np.inf, value.shape, np.float32) + elif value.dtype == np.uint8: + spaces[key] = gym.spaces.Box(0, 255, value.shape, np.uint8) + else: + raise NotImplementedError(value.dtype) + return spaces + + @property + def act_space(self): + spec = self._env.action_spec() + action = gym.spaces.Box(spec.minimum, spec.maximum, dtype=np.float32) + return {'action': action} + + def step(self, action): + assert np.isfinite(action['action']).all(), action['action'] + reward = 0.0 + for _ in range(self._action_repeat): + time_step = self._env.step(action['action']) + reward += time_step.reward or 0.0 + if time_step.last(): + break + assert time_step.discount in (0, 1) + obs = { + 'reward': reward, + 'is_first': False, + 'is_last': time_step.last(), + 'is_terminal': time_step.discount == 0, + 'image': self._env.physics.render(*self._size, camera_id=self._camera), + } + obs.update({ + k: v for k, v in dict(time_step.observation).items() + if k not in self._ignored_keys}) + return obs + + def reset(self): + time_step = self._env.reset() + obs = { + 'reward': 0.0, + 'is_first': True, + 'is_last': False, + 'is_terminal': False, + 'image': self._env.physics.render(*self._size, camera_id=self._camera), + } + obs.update({ + k: v for k, v in dict(time_step.observation).items() + if k not in self._ignored_keys}) + return obs diff --git a/offlinedv2/dreamerv2/common/flags.py b/offlinedv2/dreamerv2/common/flags.py new file mode 100644 index 0000000000000000000000000000000000000000..746c63456d9e7bb7b01cac8fdd5b59dd8437757c --- /dev/null +++ b/offlinedv2/dreamerv2/common/flags.py @@ -0,0 +1,97 @@ +import re +import sys + + +class Flags: + + def __init__(self, *args, **kwargs): + from .config import Config + self._config = Config(*args, **kwargs) + + def parse(self, argv=None, known_only=False, help_exists=None): + if help_exists is None: + help_exists = not known_only + if argv is None: + argv = sys.argv[1:] + if '--help' in argv: + print('\nHelp:') + lines = str(self._config).split('\n')[2:] + print('\n'.join('--' + re.sub(r'[:,\[\]]', '', x) for x in lines)) + help_exists and sys.exit() + parsed = {} + remaining = [] + key = None + vals = None + for arg in argv: + if arg.startswith('--'): + if key: + self._submit_entry(key, vals, parsed, remaining) + if '=' in arg: + key, val = arg.split('=', 1) + vals = [val] + else: + key, vals = arg, [] + else: + if key: + vals.append(arg) + else: + remaining.append(arg) + self._submit_entry(key, vals, parsed, remaining) + parsed = self._config.update(parsed) + if known_only: + return parsed, remaining + else: + for flag in remaining: + if flag.startswith('--'): + raise ValueError(f"Flag '{flag}' did not match any config keys.") + assert not remaining, remaining + return parsed + + def _submit_entry(self, key, vals, parsed, remaining): + if not key and not vals: + return + if not key: + vals = ', '.join(f"'{x}'" for x in vals) + raise ValueError(f"Values {vals} were not preceeded by any flag.") + name = key[len('--'):] + if '=' in name: + remaining.extend([key] + vals) + return + if self._config.IS_PATTERN.match(name): + pattern = re.compile(name) + keys = {k for k in self._config.flat if pattern.match(k)} + elif name in self._config: + keys = [name] + else: + keys = [] + if not keys: + remaining.extend([key] + vals) + return + if not vals: + raise ValueError(f"Flag '{key}' was not followed by any values.") + for key in keys: + parsed[key] = self._parse_flag_value(self._config[key], vals, key) + + def _parse_flag_value(self, default, value, key): + value = value if isinstance(value, (tuple, list)) else (value,) + if isinstance(default, (tuple, list)): + if len(value) == 1 and ',' in value[0]: + value = value[0].split(',') + return tuple(self._parse_flag_value(default[0], [x], key) for x in value) + assert len(value) == 1, value + value = str(value[0]) + if default is None: + return value + if isinstance(default, bool): + try: + return bool(['False', 'True'].index(value)) + except ValueError: + message = f"Expected bool but got '{value}' for key '{key}'." + raise TypeError(message) + if isinstance(default, int): + value = float(value) # Allow scientific notation for integers. + if float(int(value)) != value: + message = f"Expected int but got float '{value}' for key '{key}'." + raise TypeError(message) + return int(value) + return type(default)(value) diff --git a/offlinedv2/dreamerv2/common/logger.py b/offlinedv2/dreamerv2/common/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..d7f335645c9ac55b03fb61de8d89302f9256e301 --- /dev/null +++ b/offlinedv2/dreamerv2/common/logger.py @@ -0,0 +1,162 @@ +import json +import os +import pathlib +import time + +import numpy as np + + +class Logger: + + def __init__(self, step, outputs, multiplier=1): + self._step = step + self._outputs = outputs + self._multiplier = multiplier + self._last_step = None + self._last_time = None + self._metrics = [] + + def add(self, mapping, prefix=None): + step = int(self._step) * self._multiplier + for name, value in dict(mapping).items(): + name = f'{prefix}_{name}' if prefix else name + value = np.array(value) + if len(value.shape) not in (0, 2, 3, 4): + raise ValueError( + f"Shape {value.shape} for name '{name}' cannot be " + "interpreted as scalar, image, or video.") + self._metrics.append((step, name, value)) + + def scalar(self, name, value): + self.add({name: value}) + + def image(self, name, value): + self.add({name: value}) + + def video(self, name, value): + self.add({name: value}) + + def write(self, fps=False): + fps and self.scalar('fps', self._compute_fps()) + if not self._metrics: + return + for output in self._outputs: + output(self._metrics) + self._metrics.clear() + + def _compute_fps(self): + step = int(self._step) * self._multiplier + if self._last_step is None: + self._last_time = time.time() + self._last_step = step + return 0 + steps = step - self._last_step + duration = time.time() - self._last_time + self._last_time += duration + self._last_step = step + return steps / duration + + +class TerminalOutput: + + def __call__(self, summaries): + step = max(s for s, _, _, in summaries) + scalars = {k: float(v) for _, k, v in summaries if len(v.shape) == 0} + formatted = {k: self._format_value(v) for k, v in scalars.items()} + print(f'[{step}]', ' / '.join(f'{k} {v}' for k, v in formatted.items())) + + def _format_value(self, value): + if value == 0: + return '0' + elif 0.01 < abs(value) < 10000: + value = f'{value:.2f}' + value = value.rstrip('0') + value = value.rstrip('0') + value = value.rstrip('.') + return value + else: + value = f'{value:.1e}' + value = value.replace('.0e', 'e') + value = value.replace('+0', '') + value = value.replace('+', '') + value = value.replace('-0', '-') + return value + + +class JSONLOutput: + + def __init__(self, logdir): + self._logdir = pathlib.Path(logdir).expanduser() + + def __call__(self, summaries): + scalars = {k: float(v) for _, k, v in summaries if len(v.shape) == 0} + step = max(s for s, _, _, in summaries) + with (self._logdir / 'metrics.jsonl').open('a') as f: + f.write(json.dumps({'step': step, **scalars}) + '\n') + + +class TensorBoardOutput: + + def __init__(self, logdir, fps=20): + # The TensorFlow summary writer supports file protocols like gs://. We use + # os.path over pathlib here to preserve those prefixes. + self._logdir = os.path.expanduser(logdir) + self._writer = None + self._fps = fps + + def __call__(self, summaries): + import tensorflow as tf + self._ensure_writer() + self._writer.set_as_default() + for step, name, value in summaries: + if len(value.shape) == 0: + tf.summary.scalar('scalars/' + name, value, step) + elif len(value.shape) == 2: + tf.summary.image(name, value, step) + elif len(value.shape) == 3: + tf.summary.image(name, value, step) + elif len(value.shape) == 4: + self._video_summary(name, value, step) + self._writer.flush() + + def _ensure_writer(self): + if not self._writer: + import tensorflow as tf + self._writer = tf.summary.create_file_writer( + self._logdir, max_queue=1000) + + def _video_summary(self, name, video, step): + import tensorflow as tf + import tensorflow.compat.v1 as tf1 + name = name if isinstance(name, str) else name.decode('utf-8') + if np.issubdtype(video.dtype, np.floating): + video = np.clip(255 * video, 0, 255).astype(np.uint8) + try: + T, H, W, C = video.shape + summary = tf1.Summary() + image = tf1.Summary.Image(height=H, width=W, colorspace=C) + image.encoded_image_string = encode_gif(video, self._fps) + summary.value.add(tag=name, image=image) + tf.summary.experimental.write_raw_pb(summary.SerializeToString(), step) + except (IOError, OSError) as e: + print('GIF summaries require ffmpeg in $PATH.', e) + tf.summary.image(name, video, step) + + +def encode_gif(frames, fps): + from subprocess import Popen, PIPE + h, w, c = frames[0].shape + pxfmt = {1: 'gray', 3: 'rgb24'}[c] + cmd = ' '.join([ + 'ffmpeg -y -f rawvideo -vcodec rawvideo', + f'-r {fps:.02f} -s {w}x{h} -pix_fmt {pxfmt} -i - -filter_complex', + '[0:v]split[x][z];[z]palettegen[y];[x]fifo[x];[x][y]paletteuse', + f'-r {fps:.02f} -f gif -']) + proc = Popen(cmd.split(' '), stdin=PIPE, stdout=PIPE, stderr=PIPE) + for image in frames: + proc.stdin.write(image.tobytes()) + out, err = proc.communicate() + if proc.returncode: + raise IOError('\n'.join([' '.join(cmd), err.decode('utf8')])) + del proc + return out diff --git a/offlinedv2/dreamerv2/common/nets.py b/offlinedv2/dreamerv2/common/nets.py new file mode 100644 index 0000000000000000000000000000000000000000..421199c10f5c12dc1aa0e32ad4e080434f38ddca --- /dev/null +++ b/offlinedv2/dreamerv2/common/nets.py @@ -0,0 +1,417 @@ +import re + +import numpy as np +import tensorflow as tf +from tensorflow.keras import layers as tfkl +from tensorflow_probability import distributions as tfd +from tensorflow.keras.mixed_precision import experimental as prec + +from .. import common + + +class EnsembleRSSM(common.Module): + + def __init__( + self, ensemble=5, stoch=30, deter=200, hidden=200, discrete=False, + act='elu', norm='none', std_act='softplus', min_std=0.1): + super().__init__() + self._ensemble = ensemble + self._stoch = stoch + self._deter = deter + self._hidden = hidden + self._discrete = discrete + self._act = get_act(act) + self._norm = norm + self._std_act = std_act + self._min_std = min_std + self._cell = GRUCell(self._deter, norm=True) + self._cast = lambda x: tf.cast(x, prec.global_policy().compute_dtype) + + def initial(self, batch_size): + dtype = prec.global_policy().compute_dtype + if self._discrete: + state = dict( + logit=tf.zeros([batch_size, self._stoch, self._discrete], dtype), + stoch=tf.zeros([batch_size, self._stoch, self._discrete], dtype), + deter=self._cell.get_initial_state(None, batch_size, dtype)) + else: + state = dict( + mean=tf.zeros([batch_size, self._stoch], dtype), + std=tf.zeros([batch_size, self._stoch], dtype), + stoch=tf.zeros([batch_size, self._stoch], dtype), + deter=self._cell.get_initial_state(None, batch_size, dtype)) + return state + + @tf.function + def observe(self, embed, action, is_first, state=None): + swap = lambda x: tf.transpose(x, [1, 0] + list(range(2, len(x.shape)))) + if state is None: + state = self.initial(tf.shape(action)[0]) + post, prior = common.static_scan( + lambda prev, inputs: self.obs_step(prev[0], *inputs), + (swap(action), swap(embed), swap(is_first)), (state, state)) + post = {k: swap(v) for k, v in post.items()} + prior = {k: swap(v) for k, v in prior.items()} + return post, prior + + @tf.function + def imagine(self, action, state=None): + swap = lambda x: tf.transpose(x, [1, 0] + list(range(2, len(x.shape)))) + if state is None: + state = self.initial(tf.shape(action)[0]) + assert isinstance(state, dict), state + action = swap(action) + prior = common.static_scan(self.img_step, action, state) + prior = {k: swap(v) for k, v in prior.items()} + return prior + + def get_feat(self, state): + stoch = self._cast(state['stoch']) + if self._discrete: + shape = stoch.shape[:-2] + [self._stoch * self._discrete] + stoch = tf.reshape(stoch, shape) + return tf.concat([stoch, state['deter']], -1) + + def get_dist(self, state, ensemble=False): + if ensemble: + state = self._suff_stats_ensemble(state['deter']) + if self._discrete: + logit = state['logit'] + logit = tf.cast(logit, tf.float32) + dist = tfd.Independent(common.OneHotDist(logit), 1) + else: + mean, std = state['mean'], state['std'] + mean = tf.cast(mean, tf.float32) + std = tf.cast(std, tf.float32) + dist = tfd.MultivariateNormalDiag(mean, std) + return dist + + @tf.function + def obs_step(self, prev_state, prev_action, embed, is_first, sample=True): + # if is_first.any(): + prev_state, prev_action = tf.nest.map_structure( + lambda x: tf.einsum( + 'b,b...->b...', 1.0 - is_first.astype(x.dtype), x), + (prev_state, prev_action)) + prior = self.img_step(prev_state, prev_action, sample) + x = tf.concat([prior['deter'], embed], -1) + x = self.get('obs_out', tfkl.Dense, self._hidden)(x) + x = self.get('obs_out_norm', NormLayer, self._norm)(x) + x = self._act(x) + stats = self._suff_stats_layer('obs_dist', x) + dist = self.get_dist(stats) + stoch = dist.sample() if sample else dist.mode() + post = {'stoch': stoch, 'deter': prior['deter'], **stats} + return post, prior + + @tf.function + def img_step(self, prev_state, prev_action, sample=True): + prev_stoch = self._cast(prev_state['stoch']) + prev_action = self._cast(prev_action) + if self._discrete: + shape = prev_stoch.shape[:-2] + [self._stoch * self._discrete] + prev_stoch = tf.reshape(prev_stoch, shape) + x = tf.concat([prev_stoch, prev_action], -1) + x = self.get('img_in', tfkl.Dense, self._hidden)(x) + x = self.get('img_in_norm', NormLayer, self._norm)(x) + x = self._act(x) + deter = prev_state['deter'] + deter, _ = self._cell(x, [deter]) + stats = self._suff_stats_ensemble(deter) + index = tf.random.uniform((), 0, self._ensemble, tf.int32) + stats = {k: v[index] for k, v in stats.items()} + dist = self.get_dist(stats) + stoch = dist.sample() if sample else dist.mode() + prior = {'stoch': stoch, 'deter': deter, **stats} + return prior + + def _suff_stats_ensemble(self, inp): + bs = list(inp.shape[:-1]) + inp = inp.reshape([-1, inp.shape[-1]]) + stats = [] + for k in range(self._ensemble): + x = self.get(f'img_out_{k}', tfkl.Dense, self._hidden)(inp) + x = self.get(f'img_out_norm_{k}', NormLayer, self._norm)(x) + x = self._act(x) + stats.append(self._suff_stats_layer(f'img_dist_{k}', x)) + stats = { + k: tf.stack([x[k] for x in stats], 0) + for k, v in stats[0].items()} + stats = { + k: v.reshape([v.shape[0]] + bs + list(v.shape[2:])) + for k, v in stats.items()} + return stats + + def _suff_stats_layer(self, name, x): + if self._discrete: + x = self.get(name, tfkl.Dense, self._stoch * self._discrete, None)(x) + logit = tf.reshape(x, x.shape[:-1] + [self._stoch, self._discrete]) + return {'logit': logit} + else: + x = self.get(name, tfkl.Dense, 2 * self._stoch, None)(x) + mean, std = tf.split(x, 2, -1) + std = { + 'softplus': lambda: tf.nn.softplus(std), + 'sigmoid': lambda: tf.nn.sigmoid(std), + 'sigmoid2': lambda: 2 * tf.nn.sigmoid(std / 2), + }[self._std_act]() + std = std + self._min_std + return {'mean': mean, 'std': std} + + def kl_loss(self, post, prior, forward, balance, free, free_avg): + kld = tfd.kl_divergence + sg = lambda x: tf.nest.map_structure(tf.stop_gradient, x) + lhs, rhs = (prior, post) if forward else (post, prior) + mix = balance if forward else (1 - balance) + if balance == 0.5: + value = kld(self.get_dist(lhs), self.get_dist(rhs)) + loss = tf.maximum(value, free).mean() + else: + value_lhs = value = kld(self.get_dist(lhs), self.get_dist(sg(rhs))) + value_rhs = kld(self.get_dist(sg(lhs)), self.get_dist(rhs)) + if free_avg: + loss_lhs = tf.maximum(value_lhs.mean(), free) + loss_rhs = tf.maximum(value_rhs.mean(), free) + else: + loss_lhs = tf.maximum(value_lhs, free).mean() + loss_rhs = tf.maximum(value_rhs, free).mean() + loss = mix * loss_lhs + (1 - mix) * loss_rhs + return loss, value + + +class Encoder(common.Module): + + def __init__( + self, shapes, cnn_keys=r'.*', mlp_keys=r'.*', act='elu', norm='none', + cnn_depth=48, cnn_kernels=(4, 4, 4, 4), mlp_layers=[400, 400, 400, 400]): + self.shapes = shapes + self.cnn_keys = [ + k for k, v in shapes.items() if re.match(cnn_keys, k) and len(v) == 3] + self.mlp_keys = [ + k for k, v in shapes.items() if re.match(mlp_keys, k) and len(v) == 1] + print('Encoder CNN inputs:', list(self.cnn_keys)) + print('Encoder MLP inputs:', list(self.mlp_keys)) + self._act = get_act(act) + self._norm = norm + self._cnn_depth = cnn_depth + self._cnn_kernels = cnn_kernels + self._mlp_layers = mlp_layers + + @tf.function + def __call__(self, data): + key, shape = list(self.shapes.items())[0] + batch_dims = data[key].shape[:-len(shape)] + data = { + k: tf.reshape(v, (-1,) + tuple(v.shape)[len(batch_dims):]) + for k, v in data.items()} + outputs = [] + if self.cnn_keys: + outputs.append(self._cnn({k: data[k] for k in self.cnn_keys})) + if self.mlp_keys: + outputs.append(self._mlp({k: data[k] for k in self.mlp_keys})) + output = tf.concat(outputs, -1) + return output.reshape(batch_dims + output.shape[1:]) + + def _cnn(self, data): + x = tf.concat(list(data.values()), -1) + x = x.astype(prec.global_policy().compute_dtype) + for i, kernel in enumerate(self._cnn_kernels): + depth = 2 ** i * self._cnn_depth + x = self.get(f'conv{i}', tfkl.Conv2D, depth, kernel, 2)(x) + x = self.get(f'convnorm{i}', NormLayer, self._norm)(x) + x = self._act(x) + return x.reshape(tuple(x.shape[:-3]) + (-1,)) + + def _mlp(self, data): + x = tf.concat(list(data.values()), -1) + x = x.astype(prec.global_policy().compute_dtype) + for i, width in enumerate(self._mlp_layers): + x = self.get(f'dense{i}', tfkl.Dense, width)(x) + x = self.get(f'densenorm{i}', NormLayer, self._norm)(x) + x = self._act(x) + return x + + +class Decoder(common.Module): + + def __init__( + self, shapes, cnn_keys=r'.*', mlp_keys=r'.*', act='elu', norm='none', + cnn_depth=48, cnn_kernels=(4, 4, 4, 4), mlp_layers=[400, 400, 400, 400]): + self._shapes = shapes + self.cnn_keys = [ + k for k, v in shapes.items() if re.match(cnn_keys, k) and len(v) == 3] + self.mlp_keys = [ + k for k, v in shapes.items() if re.match(mlp_keys, k) and len(v) == 1] + print('Decoder CNN outputs:', list(self.cnn_keys)) + print('Decoder MLP outputs:', list(self.mlp_keys)) + self._act = get_act(act) + self._norm = norm + self._cnn_depth = cnn_depth + self._cnn_kernels = cnn_kernels + self._mlp_layers = mlp_layers + + def __call__(self, features): + features = tf.cast(features, prec.global_policy().compute_dtype) + outputs = {} + if self.cnn_keys: + outputs.update(self._cnn(features)) + if self.mlp_keys: + outputs.update(self._mlp(features)) + return outputs + + def _cnn(self, features): + channels = {k: self._shapes[k][-1] for k in self.cnn_keys} + ConvT = tfkl.Conv2DTranspose + x = self.get('convin', tfkl.Dense, 32 * self._cnn_depth)(features) + x = tf.reshape(x, [-1, 1, 1, 32 * self._cnn_depth]) + for i, kernel in enumerate(self._cnn_kernels): + depth = 2 ** (len(self._cnn_kernels) - i - 2) * self._cnn_depth + act, norm = self._act, self._norm + if i == len(self._cnn_kernels) - 1: + depth, act, norm = sum(channels.values()), tf.identity, 'none' + x = self.get(f'conv{i}', ConvT, depth, kernel, 2)(x) + x = self.get(f'convnorm{i}', NormLayer, norm)(x) + x = act(x) + x = x.reshape(features.shape[:-1] + x.shape[1:]) + means = tf.split(x, list(channels.values()), -1) + dists = { + key: tfd.Independent(tfd.Normal(mean, 1), 3) + for (key, shape), mean in zip(channels.items(), means)} + return dists + + def _mlp(self, features): + shapes = {k: self._shapes[k] for k in self.mlp_keys} + x = features + for i, width in enumerate(self._mlp_layers): + x = self.get(f'dense{i}', tfkl.Dense, width)(x) + x = self.get(f'densenorm{i}', NormLayer, self._norm)(x) + x = self._act(x) + dists = {} + for key, shape in shapes.items(): + dists[key] = self.get(f'dense_{key}', DistLayer, shape)(x) + return dists + + +class MLP(common.Module): + + def __init__(self, shape, layers, units, act='elu', norm='none', **out): + self._shape = (shape,) if isinstance(shape, int) else shape + self._layers = layers + self._units = units + self._norm = norm + self._act = get_act(act) + self._out = out + + def __call__(self, features): + x = tf.cast(features, prec.global_policy().compute_dtype) + x = x.reshape([-1, x.shape[-1]]) + for index in range(self._layers): + x = self.get(f'dense{index}', tfkl.Dense, self._units)(x) + x = self.get(f'norm{index}', NormLayer, self._norm)(x) + x = self._act(x) + x = x.reshape(features.shape[:-1] + [x.shape[-1]]) + return self.get('out', DistLayer, self._shape, **self._out)(x) + + +class GRUCell(tf.keras.layers.AbstractRNNCell): + + def __init__(self, size, norm=False, act='tanh', update_bias=-1, **kwargs): + super().__init__() + self._size = size + self._act = get_act(act) + self._norm = norm + self._update_bias = update_bias + self._layer = tfkl.Dense(3 * size, use_bias=norm is not None, **kwargs) + if norm: + self._norm = tfkl.LayerNormalization(dtype=tf.float32) + + @property + def state_size(self): + return self._size + + @tf.function + def call(self, inputs, state): + state = state[0] # Keras wraps the state in a list. + parts = self._layer(tf.concat([inputs, state], -1)) + if self._norm: + dtype = parts.dtype + parts = tf.cast(parts, tf.float32) + parts = self._norm(parts) + parts = tf.cast(parts, dtype) + reset, cand, update = tf.split(parts, 3, -1) + reset = tf.nn.sigmoid(reset) + cand = self._act(reset * cand) + update = tf.nn.sigmoid(update + self._update_bias) + output = update * cand + (1 - update) * state + return output, [output] + + +class DistLayer(common.Module): + + def __init__( + self, shape, dist='mse', min_std=0.1, init_std=0.0): + self._shape = shape + self._dist = dist + self._min_std = min_std + self._init_std = init_std + + def __call__(self, inputs): + out = self.get('out', tfkl.Dense, np.prod(self._shape))(inputs) + out = tf.reshape(out, tf.concat([tf.shape(inputs)[:-1], self._shape], 0)) + out = tf.cast(out, tf.float32) + if self._dist in ('normal', 'tanh_normal', 'trunc_normal'): + std = self.get('std', tfkl.Dense, np.prod(self._shape))(inputs) + std = tf.reshape(std, tf.concat([tf.shape(inputs)[:-1], self._shape], 0)) + std = tf.cast(std, tf.float32) + if self._dist == 'mse': + dist = tfd.Normal(out, 1.0) + return tfd.Independent(dist, len(self._shape)) + if self._dist == 'normal': + dist = tfd.Normal(out, std) + return tfd.Independent(dist, len(self._shape)) + if self._dist == 'binary': + dist = tfd.Bernoulli(out) + return tfd.Independent(dist, len(self._shape)) + if self._dist == 'tanh_normal': + mean = 5 * tf.tanh(out / 5) + std = tf.nn.softplus(std + self._init_std) + self._min_std + dist = tfd.Normal(mean, std) + dist = tfd.TransformedDistribution(dist, common.TanhBijector()) + dist = tfd.Independent(dist, len(self._shape)) + return common.SampleDist(dist) + if self._dist == 'trunc_normal': + std = 2 * tf.nn.sigmoid((std + self._init_std) / 2) + self._min_std + dist = common.TruncNormalDist(tf.tanh(out), std, -1, 1) + return tfd.Independent(dist, 1) + if self._dist == 'onehot': + return common.OneHotDist(out) + raise NotImplementedError(self._dist) + + +class NormLayer(common.Module): + + def __init__(self, name): + if name == 'none': + self._layer = None + elif name == 'layer': + self._layer = tfkl.LayerNormalization() + else: + raise NotImplementedError(name) + + def __call__(self, features): + if not self._layer: + return features + return self._layer(features) + + +def get_act(name): + if name == 'none': + return tf.identity + if name == 'mish': + return lambda x: x * tf.math.tanh(tf.nn.softplus(x)) + elif hasattr(tf.nn, name): + return getattr(tf.nn, name) + elif hasattr(tf, name): + return getattr(tf, name) + else: + raise NotImplementedError(name) diff --git a/offlinedv2/dreamerv2/common/other.py b/offlinedv2/dreamerv2/common/other.py new file mode 100644 index 0000000000000000000000000000000000000000..c7be688c60c83b7aa440976d76ed6912dabce83b --- /dev/null +++ b/offlinedv2/dreamerv2/common/other.py @@ -0,0 +1,199 @@ +import collections +import contextlib +import re +import time + +import numpy as np +import tensorflow as tf +from tensorflow_probability import distributions as tfd + +from . import dists +from . import tfutils + + +class RandomAgent: + def __init__(self, act_space, logprob=False): + self.act_space = act_space['action'] + self.logprob = logprob + if hasattr(self.act_space, 'n'): + self._dist = dists.OneHotDist(tf.zeros(self.act_space.n)) + else: + dist = tfd.Uniform(self.act_space.low, self.act_space.high) + self._dist = tfd.Independent(dist, 1) + + def __call__(self, obs, state=None, mode=None): + action = self._dist.sample(len(obs['is_first'])) + output = {'action': action} + if self.logprob: + output['logprob'] = self._dist.log_prob(action) + return output, None + + +def static_scan(fn, inputs, start, reverse=False): + last = start + outputs = [[] for _ in tf.nest.flatten(start)] + indices = range(tf.nest.flatten(inputs)[0].shape[0]) + if reverse: + indices = reversed(indices) + for index in indices: + inp = tf.nest.map_structure(lambda x: x[index], inputs) + last = fn(last, inp) + [o.append(l) for o, l in zip(outputs, tf.nest.flatten(last))] + if reverse: + outputs = [list(reversed(x)) for x in outputs] + outputs = [tf.stack(x, 0) for x in outputs] + return tf.nest.pack_sequence_as(start, outputs) + + +def schedule(string, step): + try: + return float(string) + except ValueError: + step = tf.cast(step, tf.float32) + match = re.match(r'linear\((.+),(.+),(.+)\)', string) + if match: + initial, final, duration = [float(group) for group in match.groups()] + mix = tf.clip_by_value(step / duration, 0, 1) + return (1 - mix) * initial + mix * final + match = re.match(r'warmup\((.+),(.+)\)', string) + if match: + warmup, value = [float(group) for group in match.groups()] + scale = tf.clip_by_value(step / warmup, 0, 1) + return scale * value + match = re.match(r'exp\((.+),(.+),(.+)\)', string) + if match: + initial, final, halflife = [float(group) for group in match.groups()] + return (initial - final) * 0.5 ** (step / halflife) + final + match = re.match(r'horizon\((.+),(.+),(.+)\)', string) + if match: + initial, final, duration = [float(group) for group in match.groups()] + mix = tf.clip_by_value(step / duration, 0, 1) + horizon = (1 - mix) * initial + mix * final + return 1 - 1 / horizon + raise NotImplementedError(string) + + +def lambda_return( + reward, value, pcont, bootstrap, lambda_, axis): + # Setting lambda=1 gives a discounted Monte Carlo return. + # Setting lambda=0 gives a fixed 1-step return. + assert reward.shape.ndims == value.shape.ndims, (reward.shape, value.shape) + if isinstance(pcont, (int, float)): + pcont = pcont * tf.ones_like(reward) + dims = list(range(reward.shape.ndims)) + dims = [axis] + dims[1:axis] + [0] + dims[axis + 1:] + if axis != 0: + reward = tf.transpose(reward, dims) + value = tf.transpose(value, dims) + pcont = tf.transpose(pcont, dims) + if bootstrap is None: + bootstrap = tf.zeros_like(value[-1]) + next_values = tf.concat([value[1:], bootstrap[None]], 0) + inputs = reward + pcont * next_values * (1 - lambda_) + returns = static_scan( + lambda agg, cur: cur[0] + cur[1] * lambda_ * agg, + (inputs, pcont), bootstrap, reverse=True) + if axis != 0: + returns = tf.transpose(returns, dims) + return returns + + +def action_noise(action, amount, act_space): + if amount == 0: + return action + amount = tf.cast(amount, action.dtype) + if hasattr(act_space, 'n'): + probs = amount / action.shape[-1] + (1 - amount) * action + return dists.OneHotDist(probs=probs).sample() + else: + return tf.clip_by_value(tfd.Normal(action, amount).sample(), -1, 1) + + +class StreamNorm(tfutils.Module): + def __init__(self, shape=(), momentum=0.99, scale=1.0, eps=1e-8): + # Momentum of 0 normalizes only based on the current batch. + # Momentum of 1 disables normalization. + self._shape = tuple(shape) + self._momentum = momentum + self._scale = scale + self._eps = eps + self.mag = tf.Variable(tf.ones(shape, tf.float64), False) + + def __call__(self, inputs): + metrics = {} + self.update(inputs) + metrics['mean'] = inputs.mean() + metrics['std'] = inputs.std() + outputs = self.transform(inputs) + metrics['normed_mean'] = outputs.mean() + metrics['normed_std'] = outputs.std() + return outputs, metrics + + def reset(self): + self.mag.assign(tf.ones_like(self.mag)) + + def update(self, inputs): + batch = inputs.reshape((-1,) + self._shape) + mag = tf.abs(batch).mean(0).astype(tf.float64) + self.mag.assign(self._momentum * self.mag + (1 - self._momentum) * mag) + + def transform(self, inputs): + values = inputs.reshape((-1,) + self._shape) + values /= self.mag.astype(inputs.dtype)[None] + self._eps + values *= self._scale + return values.reshape(inputs.shape) + + +class Timer: + def __init__(self): + self._indurs = collections.defaultdict(list) + self._outdurs = collections.defaultdict(list) + self._start_times = {} + self._end_times = {} + + @contextlib.contextmanager + def section(self, name): + self.start(name) + yield + self.end(name) + + def wrap(self, function, name): + def wrapped(*args, **kwargs): + with self.section(name): + return function(*args, **kwargs) + + return wrapped + + def start(self, name): + now = time.time() + self._start_times[name] = now + if name in self._end_times: + last = self._end_times[name] + self._outdurs[name].append(now - last) + + def end(self, name): + now = time.time() + self._end_times[name] = now + self._indurs[name].append(now - self._start_times[name]) + + def result(self): + metrics = {} + for key in self._indurs: + indurs = self._indurs[key] + outdurs = self._outdurs[key] + metrics[f'timer_count_{key}'] = len(indurs) + metrics[f'timer_inside_{key}'] = np.sum(indurs) + metrics[f'timer_outside_{key}'] = np.sum(outdurs) + indurs.clear() + outdurs.clear() + return metrics + + +class CarryOverState: + def __init__(self, fn): + self._fn = fn + self._state = None + + def __call__(self, *args): + self._state, out = self._fn(*args, self._state) + return out diff --git a/offlinedv2/dreamerv2/common/plot.py b/offlinedv2/dreamerv2/common/plot.py new file mode 100644 index 0000000000000000000000000000000000000000..dce0c933ca05b3e0f50324dfe02d200dbae03209 --- /dev/null +++ b/offlinedv2/dreamerv2/common/plot.py @@ -0,0 +1,570 @@ +import argparse +import collections +import functools +import itertools +import json +import multiprocessing as mp +import os +import pathlib +import re +import subprocess +import warnings + +os.environ['NO_AT_BRIDGE'] = '1' # Hide X org false warning. + +import matplotlib + +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import matplotlib.ticker as ticker +import numpy as np +import pandas as pd + +np.set_string_function(lambda x: f'') + +Run = collections.namedtuple('Run', 'task method seed xs ys') + +PALETTES = dict( + discrete=( + '#377eb8', '#4daf4a', '#984ea3', '#e41a1c', '#ff7f00', '#a65628', + '#f781bf', '#888888', '#a6cee3', '#b2df8a', '#cab2d6', '#fb9a99', + ), + contrast=( + '#0022ff', '#33aa00', '#ff0011', '#ddaa00', '#cc44dd', '#0088aa', + '#001177', '#117700', '#990022', '#885500', '#553366', '#006666', + ), + gradient=( + '#fde725', '#a0da39', '#4ac16d', '#1fa187', '#277f8e', '#365c8d', + '#46327e', '#440154', + ), + baselines=( + '#222222', '#666666', '#aaaaaa', '#cccccc', + ), +) + +LEGEND = dict( + fontsize='medium', numpoints=1, labelspacing=0, columnspacing=1.2, + handlelength=1.5, handletextpad=0.5, loc='lower center') + +DEFAULT_BASELINES = ['d4pg', 'rainbow_sticky', 'human_gamer', 'impala'] + + +def find_keys(args): + filenames = [] + for indir in args.indir: + task = next(indir.iterdir()) # First only. + for method in task.iterdir(): + seed = next(indir.iterdir()) # First only. + filenames += list(seed.glob('**/*.jsonl')) + keys = set() + for filename in filenames: + keys |= set(load_jsonl(filename).columns) + print(f'Keys ({len(keys)}):', ', '.join(keys), flush=True) + + +def load_runs(args): + total, toload = [], [] + for indir in args.indir: + filenames = list(indir.glob('**/*.jsonl')) + total += filenames + for filename in filenames: + task, method, seed = filename.relative_to(indir).parts[:-1] + if not any(p.search(task) for p in args.tasks): + continue + if not any(p.search(method) for p in args.methods): + continue + toload.append((filename, indir)) + print(f'Loading {len(toload)} of {len(total)} runs...') + jobs = [functools.partial(load_run, f, i, args) for f, i in toload] + # Disable async data loading: + # runs = [j() for j in jobs] + with mp.Pool(10) as pool: + promises = [pool.apply_async(j) for j in jobs] + runs = [p.get() for p in promises] + runs = [r for r in runs if r is not None] + return runs + + +def load_run(filename, indir, args): + task, method, seed = filename.relative_to(indir).parts[:-1] + prefix = f'indir{args.indir.index(indir) + 1}_' + if task == 'atari_jamesbond': + task = 'atari_james_bond' + seed = prefix + seed + if args.prefix: + method = prefix + method + df = load_jsonl(filename) + if df is None: + print('Skipping empty run') + return + try: + df = df[[args.xaxis, args.yaxis]].dropna() + if args.maxval: + df = df.replace([+np.inf], +args.maxval) + df = df.replace([-np.inf], -args.maxval) + df[args.yaxis] = df[args.yaxis].clip(-args.maxval, +args.maxval) + except KeyError: + return + xs = df[args.xaxis].to_numpy() + if args.xmult != 1: + xs = xs.astype(np.float32) * args.xmult + ys = df[args.yaxis].to_numpy() + bins = { + 'atari': 1e6, + 'dmc': 1e4, + 'crafter': 1e4, + }.get(task.split('_')[0], 1e5) if args.bins == -1 else args.bins + if bins: + borders = np.arange(0, xs.max() + 1e-8, bins) + xs, ys = bin_scores(xs, ys, borders) + if not len(xs): + print('Skipping empty run', task, method, seed) + return + return Run(task, method, seed, xs, ys) + + +def load_baselines(patterns, prefix=False): + runs = [] + directory = pathlib.Path(__file__).parent.parent / 'scores' + for filename in directory.glob('**/*_baselines.json'): + for task, methods in json.loads(filename.read_text()).items(): + for method, score in methods.items(): + if prefix: + method = f'baseline_{method}' + if not any(p.search(method) for p in patterns): + continue + runs.append(Run(task, method, None, None, score)) + return runs + + +def stats(runs, baselines): + tasks = sorted(set(r.task for r in runs)) + methods = sorted(set(r.method for r in runs)) + seeds = sorted(set(r.seed for r in runs)) + baseline = sorted(set(r.method for r in baselines)) + print('Loaded', len(runs), 'runs.') + print(f'Tasks ({len(tasks)}):', ', '.join(tasks)) + print(f'Methods ({len(methods)}):', ', '.join(methods)) + print(f'Seeds ({len(seeds)}):', ', '.join(seeds)) + print(f'Baselines ({len(baseline)}):', ', '.join(baseline)) + + +def order_methods(runs, baselines, args): + methods = [] + for pattern in args.methods: + for method in sorted(set(r.method for r in runs)): + if pattern.search(method): + if method not in methods: + methods.append(method) + if method not in args.colors: + index = len(args.colors) % len(args.palette) + args.colors[method] = args.palette[index] + non_baseline_colors = len(args.colors) + for pattern in args.baselines: + for method in sorted(set(r.method for r in baselines)): + if pattern.search(method): + if method not in methods: + methods.append(method) + if method not in args.colors: + index = len(args.colors) - non_baseline_colors + index = index % len(PALETTES['baselines']) + args.colors[method] = PALETTES['baselines'][index] + return methods + + +def figure(runs, methods, args): + tasks = sorted(set(r.task for r in runs if r.xs is not None)) + rows = int(np.ceil((len(tasks) + len(args.add)) / args.cols)) + figsize = args.size[0] * args.cols, args.size[1] * rows + fig, axes = plt.subplots(rows, args.cols, figsize=figsize, squeeze=False) + for task, ax in zip(tasks, axes.flatten()): + relevant = [r for r in runs if r.task == task] + plot(task, ax, relevant, methods, args) + for name, ax in zip(args.add, axes.flatten()[len(tasks):]): + ax.set_facecolor((0.9, 0.9, 0.9)) + if name == 'median': + plot_combined( + 'combined_median', ax, runs, methods, args, + agg=lambda x: np.nanmedian(x, -1)) + elif name == 'mean': + plot_combined( + 'combined_mean', ax, runs, methods, args, + agg=lambda x: np.nanmean(x, -1)) + elif name == 'gamer_median': + plot_combined( + 'combined_gamer_median', ax, runs, methods, args, + lo='random', hi='human_gamer', + agg=lambda x: np.nanmedian(x, -1)) + elif name == 'gamer_mean': + plot_combined( + 'combined_gamer_mean', ax, runs, methods, args, + lo='random', hi='human_gamer', + agg=lambda x: np.nanmean(x, -1)) + elif name == 'record_mean': + plot_combined( + 'combined_record_mean', ax, runs, methods, args, + lo='random', hi='record', + agg=lambda x: np.nanmean(x, -1)) + elif name == 'clip_record_mean': + plot_combined( + 'combined_clipped_record_mean', ax, runs, methods, args, + lo='random', hi='record', clip=True, + agg=lambda x: np.nanmean(x, -1)) + elif name == 'seeds': + plot_combined( + 'combined_seeds', ax, runs, methods, args, + agg=lambda x: np.isfinite(x).sum(-1)) + elif name == 'human_above': + plot_combined( + 'combined_above_human_gamer', ax, runs, methods, args, + agg=lambda y: (y >= 1.0).astype(float).sum(-1)) + elif name == 'human_below': + plot_combined( + 'combined_below_human_gamer', ax, runs, methods, args, + agg=lambda y: (y <= 1.0).astype(float).sum(-1)) + else: + raise NotImplementedError(name) + if args.xlim: + for ax in axes[:-1].flatten(): + ax.xaxis.get_offset_text().set_visible(False) + if args.xlabel: + for ax in axes[-1]: + ax.set_xlabel(args.xlabel) + if args.ylabel: + for ax in axes[:, 0]: + ax.set_ylabel(args.ylabel) + for ax in axes.flatten()[len(tasks) + len(args.add):]: + ax.axis('off') + legend(fig, args.labels, ncol=args.legendcols, **LEGEND) + return fig + + +def plot(task, ax, runs, methods, args): + assert runs + try: + title = task.split('_', 1)[1].replace('_', ' ').title() + except IndexError: + title = task.title() + ax.set_title(title) + xlim = [+np.inf, -np.inf] + for index, method in enumerate(methods): + relevant = [r for r in runs if r.method == method] + if not relevant: + continue + if any(r.xs is None for r in relevant): + baseline(index, method, ax, relevant, args) + else: + if args.agg == 'none': + xs, ys = curve_lines(index, task, method, ax, relevant, args) + else: + xs, ys = curve_area(index, task, method, ax, relevant, args) + if len(xs) == len(ys) == 0: + print(f'Skipping empty: {task} {method}') + continue + xlim = [min(xlim[0], np.nanmin(xs)), max(xlim[1], np.nanmax(xs))] + ax.ticklabel_format(axis='x', style='sci', scilimits=(0, 0)) + steps = [1, 2, 2.5, 5, 10] + ax.xaxis.set_major_locator(ticker.MaxNLocator(args.xticks, steps=steps)) + ax.yaxis.set_major_locator(ticker.MaxNLocator(args.yticks, steps=steps)) + if np.isfinite(xlim).all(): + ax.set_xlim(args.xlim or xlim) + if args.xlim: + ticks = sorted({*ax.get_xticks(), *args.xlim}) + ticks = [x for x in ticks if args.xlim[0] <= x <= args.xlim[1]] + ax.set_xticks(ticks) + if args.ylim: + ax.set_ylim(args.ylim) + if args.ylimticks: + ticks = sorted({*ax.get_yticks(), *args.ylim}) + ticks = [x for x in ticks if args.ylim[0] <= x <= args.ylim[1]] + ax.set_yticks(ticks) + + +def plot_combined( + name, ax, runs, methods, args, agg, lo=None, hi=None, clip=False): + tasks = sorted(set(run.task for run in runs if run.xs is not None)) + seeds = list(set(run.seed for run in runs)) + runs = [r for r in runs if r.task in tasks] # Discard unused baselines. + # Bin all runs onto the same X steps. + borders = sorted( + [r.xs for r in runs if r.xs is not None], + key=lambda x: np.nanmax(x))[-1] + for index, run in enumerate(runs): + if run.xs is None: + continue + xs, ys = bin_scores(run.xs, run.ys, borders, fill='last') + runs[index] = run._replace(xs=xs, ys=ys) + # Per-task normalization by low and high baseline. + if lo or hi: + mins = collections.defaultdict(list) + maxs = collections.defaultdict(list) + [mins[r.task].append(r.ys) for r in load_baselines([re.compile(lo)])] + [maxs[r.task].append(r.ys) for r in load_baselines([re.compile(hi)])] + mins = {task: min(ys) for task, ys in mins.items() if task in tasks} + maxs = {task: max(ys) for task, ys in maxs.items() if task in tasks} + missing_baselines = [] + for task in tasks: + if task not in mins or task not in maxs: + missing_baselines.append(task) + if set(missing_baselines) == set(tasks): + print(f'No baselines found to normalize any tasks in {name} plot.') + else: + for task in missing_baselines: + print(f'No baselines found to normalize {task} in {name} plot.') + for index, run in enumerate(runs): + if run.task not in mins or run.task not in maxs: + continue + ys = (run.ys - mins[run.task]) / (maxs[run.task] - mins[run.task]) + if clip: + ys = np.minimum(ys, 1.0) + runs[index] = run._replace(ys=ys) + # Aggregate across tasks but not methods or seeds. + combined = [] + for method, seed in itertools.product(methods, seeds): + relevant = [r for r in runs if r.method == method and r.seed == seed] + if not relevant: + continue + if relevant[0].xs is None: + xs, ys = None, np.array([r.ys for r in relevant]) + else: + xs, ys = stack_scores(*zip(*[(r.xs, r.ys) for r in relevant])) + with warnings.catch_warnings(): # Ignore empty slice warnings. + warnings.simplefilter('ignore', category=RuntimeWarning) + combined.append(Run('combined', method, seed, xs, agg(ys))) + plot(name, ax, combined, methods, args) + + +def curve_lines(index, task, method, ax, runs, args): + zorder = 10000 - 10 * index - 1 + for run in runs: + color = args.colors[method] + ax.plot(run.xs, run.ys, label=method, color=color, zorder=zorder) + xs, ys = stack_scores(*zip(*[(r.xs, r.ys) for r in runs])) + return xs, ys + + +def curve_area(index, task, method, ax, runs, args): + xs, ys = stack_scores(*zip(*[(r.xs, r.ys) for r in runs])) + with warnings.catch_warnings(): # NaN buckets remain NaN. + warnings.simplefilter('ignore', category=RuntimeWarning) + if args.agg == 'std1': + mean, std = np.nanmean(ys, -1), np.nanstd(ys, -1) + lo, mi, hi = mean - std, mean, mean + std + elif args.agg == 'per0': + lo, mi, hi = [np.nanpercentile(ys, k, -1) for k in (0, 50, 100)] + elif args.agg == 'per5': + lo, mi, hi = [np.nanpercentile(ys, k, -1) for k in (5, 50, 95)] + elif args.agg == 'per25': + lo, mi, hi = [np.nanpercentile(ys, k, -1) for k in (25, 50, 75)] + else: + raise NotImplementedError(args.agg) + color = args.colors[method] + kw = dict(color=color, zorder=1000 - 10 * index, alpha=0.1, linewidths=0) + mask = ~np.isnan(mi) + xs, lo, mi, hi = xs[mask], lo[mask], mi[mask], hi[mask] + ax.fill_between(xs, lo, hi, **kw) + ax.plot(xs, mi, label=method, color=color, zorder=10000 - 10 * index - 1) + return xs, mi + + +def baseline(index, method, ax, runs, args): + assert all(run.xs is None for run in runs) + ys = np.array([run.ys for run in runs]) + mean, std = ys.mean(), ys.std() + color = args.colors[method] + kw = dict(color=color, zorder=500 - 20 * index - 1, alpha=0.1, linewidths=0) + ax.fill_between([-np.inf, np.inf], [mean - std] * 2, [mean + std] * 2, **kw) + kw = dict(ls='--', color=color, zorder=5000 - 10 * index - 1) + ax.axhline(mean, label=method, **kw) + + +def legend(fig, mapping=None, **kwargs): + entries = {} + for ax in fig.axes: + for handle, label in zip(*ax.get_legend_handles_labels()): + if mapping and label in mapping: + label = mapping[label] + entries[label] = handle + leg = fig.legend(entries.values(), entries.keys(), **kwargs) + leg.get_frame().set_edgecolor('white') + extent = leg.get_window_extent(fig.canvas.get_renderer()) + extent = extent.transformed(fig.transFigure.inverted()) + yloc, xloc = kwargs['loc'].split() + y0 = dict(lower=extent.y1, center=0, upper=0)[yloc] + y1 = dict(lower=1, center=1, upper=extent.y0)[yloc] + x0 = dict(left=extent.x1, center=0, right=0)[xloc] + x1 = dict(left=1, center=1, right=extent.x0)[xloc] + fig.tight_layout(rect=[x0, y0, x1, y1], h_pad=0.5, w_pad=0.5) + + +def save(fig, args): + args.outdir.mkdir(parents=True, exist_ok=True) + filename = args.outdir / 'curves.png' + fig.savefig(filename, dpi=args.dpi) + print('Saved to', filename) + filename = args.outdir / 'curves.pdf' + fig.savefig(filename) + try: + subprocess.call(['pdfcrop', str(filename), str(filename)]) + except FileNotFoundError: + print('Install texlive-extra-utils to crop PDF outputs.') + + +def bin_scores(xs, ys, borders, reducer=np.nanmean, fill='nan'): + order = np.argsort(xs) + xs, ys = xs[order], ys[order] + binned = [] + with warnings.catch_warnings(): # Empty buckets become NaN. + warnings.simplefilter('ignore', category=RuntimeWarning) + for start, stop in zip(borders[:-1], borders[1:]): + left = (xs <= start).sum() + right = (xs <= stop).sum() + if left < right: + value = reducer(ys[left:right]) + elif binned: + value = {'nan': np.nan, 'last': binned[-1]}[fill] + else: + value = np.nan + binned.append(value) + return borders[1:], np.array(binned) + + +def stack_scores(multiple_xs, multiple_ys, fill='last'): + longest_xs = sorted(multiple_xs, key=lambda x: len(x))[-1] + multiple_padded_ys = [] + for xs, ys in zip(multiple_xs, multiple_ys): + assert (longest_xs[:len(xs)] == xs).all(), (list(xs), list(longest_xs)) + value = {'nan': np.nan, 'last': ys[-1]}[fill] + padding = [value] * (len(longest_xs) - len(xs)) + padded_ys = np.concatenate([ys, padding]) + multiple_padded_ys.append(padded_ys) + stacked_ys = np.stack(multiple_padded_ys, -1) + return longest_xs, stacked_ys + + +def load_jsonl(filename): + try: + with filename.open() as f: + lines = list(f.readlines()) + records = [] + for index, line in enumerate(lines): + try: + records.append(json.loads(line)) + except Exception: + if index == len(lines) - 1: + continue # Silently skip last line if it is incomplete. + raise ValueError( + f'Skipping invalid JSON line ({index + 1}/{len(lines) + 1}) in' + f'{filename}: {line}') + return pd.DataFrame(records) + except ValueError as e: + print('Invalid', filename, e) + return None + + +def save_runs(runs, filename): + filename.parent.mkdir(parents=True, exist_ok=True) + records = [] + for run in runs: + if run.xs is None: + continue + records.append(dict( + task=run.task, method=run.method, seed=run.seed, + xs=run.xs.tolist(), ys=run.ys.tolist())) + runs = json.dumps(records) + filename.write_text(runs) + print('Saved', filename) + + +def main(args): + find_keys(args) + runs = load_runs(args) + save_runs(runs, args.outdir / 'runs.json') + baselines = load_baselines(args.baselines, args.prefix) + stats(runs, baselines) + methods = order_methods(runs, baselines, args) + if not runs: + print('Noting to plot.') + return + # Adjust options based on loaded runs. + tasks = set(r.task for r in runs) + if 'auto' in args.add: + index = args.add.index('auto') + del args.add[index] + atari = any(run.task.startswith('atari_') for run in runs) + if len(tasks) < 2: + pass + elif atari: + args.add[index:index] = [ + 'gamer_median', 'gamer_mean', 'record_mean', 'clip_record_mean', + ] + else: + args.add[index:index] = ['mean', 'median'] + args.cols = min(args.cols, len(tasks) + len(args.add)) + args.legendcols = min(args.legendcols, args.cols) + print('Plotting...') + fig = figure(runs + baselines, methods, args) + save(fig, args) + + +def parse_args(): + boolean = lambda x: bool(['False', 'True'].index(x)) + parser = argparse.ArgumentParser() + parser.add_argument('--indir', nargs='+', type=pathlib.Path, required=True) + parser.add_argument('--indir-prefix', type=pathlib.Path) + parser.add_argument('--outdir', type=pathlib.Path, required=True) + parser.add_argument('--subdir', type=boolean, default=True) + parser.add_argument('--xaxis', type=str, default='step') + parser.add_argument('--yaxis', type=str, default='eval_return') + parser.add_argument('--tasks', nargs='+', default=[r'.*']) + parser.add_argument('--methods', nargs='+', default=[r'.*']) + parser.add_argument('--baselines', nargs='+', default=DEFAULT_BASELINES) + parser.add_argument('--prefix', type=boolean, default=False) + parser.add_argument('--bins', type=float, default=-1) + parser.add_argument('--agg', type=str, default='std1') + parser.add_argument('--size', nargs=2, type=float, default=[2.5, 2.3]) + parser.add_argument('--dpi', type=int, default=80) + parser.add_argument('--cols', type=int, default=6) + parser.add_argument('--xlim', nargs=2, type=float, default=None) + parser.add_argument('--ylim', nargs=2, type=float, default=None) + parser.add_argument('--ylimticks', type=boolean, default=True) + parser.add_argument('--xlabel', type=str, default=None) + parser.add_argument('--ylabel', type=str, default=None) + parser.add_argument('--xticks', type=int, default=6) + parser.add_argument('--yticks', type=int, default=5) + parser.add_argument('--xmult', type=float, default=1) + parser.add_argument('--labels', nargs='+', default=None) + parser.add_argument('--palette', nargs='+', default=['contrast']) + parser.add_argument('--legendcols', type=int, default=4) + parser.add_argument('--colors', nargs='+', default={}) + parser.add_argument('--maxval', type=float, default=0) + parser.add_argument('--add', nargs='+', type=str, default=['auto', 'seeds']) + args = parser.parse_args() + if args.subdir: + args.outdir /= args.indir[0].stem + if args.indir_prefix: + args.indir = [args.indir_prefix / indir for indir in args.indir] + args.indir = [d.expanduser() for d in args.indir] + args.outdir = args.outdir.expanduser() + if args.labels: + assert len(args.labels) % 2 == 0 + args.labels = {k: v for k, v in zip(args.labels[:-1], args.labels[1:])} + if args.colors: + assert len(args.colors) % 2 == 0 + args.colors = {k: v for k, v in zip(args.colors[:-1], args.colors[1:])} + args.tasks = [re.compile(p) for p in args.tasks] + args.methods = [re.compile(p) for p in args.methods] + args.baselines = [re.compile(p) for p in args.baselines] + if 'return' not in args.yaxis: + args.baselines = [] + if args.prefix is None: + args.prefix = len(args.indir) > 1 + if len(args.palette) == 1 and args.palette[0] in PALETTES: + args.palette = 10 * PALETTES[args.palette[0]] + if len(args.add) == 1 and args.add[0] == 'none': + args.add = [] + return args + + +if __name__ == '__main__': + main(parse_args()) diff --git a/offlinedv2/dreamerv2/common/replay.py b/offlinedv2/dreamerv2/common/replay.py new file mode 100644 index 0000000000000000000000000000000000000000..86b6b08cbba6f3e786361a84111bb4a4596f0018 --- /dev/null +++ b/offlinedv2/dreamerv2/common/replay.py @@ -0,0 +1,321 @@ +import collections +import datetime +import io +import pathlib +import uuid +import random +from sklearn.model_selection import train_test_split + +import numpy as np +import tensorflow as tf + + +class Replay: + def __init__( + self, directory, capacity=0, ongoing=False, minlen=1, maxlen=0, + prioritize_ends=False, disable_save=False): + self._directory = pathlib.Path(directory).expanduser() + self._directory.mkdir(parents=True, exist_ok=True) + self._capacity = capacity + self._ongoing = ongoing + self._minlen = minlen + self._maxlen = maxlen + self._prioritize_ends = prioritize_ends + self._random = np.random.RandomState() + # filename -> key -> value_sequence + self._complete_eps = load_episodes(self._directory, capacity, minlen) + # worker -> key -> value_sequence + self._ongoing_eps = collections.defaultdict( + lambda: collections.defaultdict(list)) + self._total_episodes, self._total_steps = count_episodes(directory) + self._loaded_episodes = len(self._complete_eps) + self._loaded_steps = sum(eplen(x) for x in self._complete_eps.values()) + self._disable_save = disable_save + + @property + def stats(self): + return { + 'total_steps': self._total_steps, + 'total_episodes': self._total_episodes, + 'loaded_steps': self._loaded_steps, + 'loaded_episodes': self._loaded_episodes, + } + + def add_step(self, transition, worker=0): + episode = self._ongoing_eps[worker] + for key, value in transition.items(): + episode[key].append(value) + if transition['is_last']: + self.add_episode(episode) + episode.clear() + + def add_episode(self, episode): + length = eplen(episode) + if length < self._minlen: + print(f'Skipping short episode of length {length}.') + return + self._total_steps += length + self._loaded_steps += length + self._total_episodes += 1 + self._loaded_episodes += 1 + episode = {key: convert(value) for key, value in episode.items()} + filename = save_episode(self._directory, episode, disable=self._disable_save) + self._complete_eps[str(filename)] = episode + self._enforce_limit() + + def dataset(self, batch, length): + example = next(iter(self._generate_chunks(length))) + dataset = tf.data.Dataset.from_generator( + lambda: self._generate_chunks(length), + {k: v.dtype for k, v in example.items()}, + {k: v.shape for k, v in example.items()}) + dataset = dataset.batch(batch, drop_remainder=True) + dataset = dataset.prefetch(5) + return dataset + + def _generate_chunks(self, length): + sequence = self._sample_sequence() + while True: + chunk = collections.defaultdict(list) + added = 0 + while added < length: + needed = length - added + adding = {k: v[:needed] for k, v in sequence.items()} + sequence = {k: v[needed:] for k, v in sequence.items()} + for key, value in adding.items(): + chunk[key].append(value) + added += len(adding['action']) + if len(sequence['action']) < 1: + sequence = self._sample_sequence() + chunk = {k: np.concatenate(v) for k, v in chunk.items()} + yield chunk + + def _sample_sequence(self): + episodes = list(self._complete_eps.values()) + if self._ongoing: + episodes += [ + x for x in self._ongoing_eps.values() + if eplen(x) >= self._minlen] + episode = self._random.choice(episodes) + total = len(episode['action']) + length = total + if self._maxlen: + length = min(length, self._maxlen) + # Randomize length to avoid all chunks ending at the same time in case the + # episodes are all of the same length. + length -= np.random.randint(self._minlen) + length = max(self._minlen, length) + upper = total - length + 1 + if self._prioritize_ends: + upper += self._minlen + index = min(self._random.randint(upper), total - length) + sequence = { + k: convert(v[index: index + length]) + for k, v in episode.items() if not k.startswith('log_')} + sequence['is_first'] = np.zeros(len(sequence['action']), np.bool) + sequence['is_first'][0] = True + if self._maxlen: + assert self._minlen <= len(sequence['action']) <= self._maxlen + return sequence + + def _enforce_limit(self): + if not self._capacity: + return + while self._loaded_episodes > 1 and self._loaded_steps > self._capacity: + # Relying on Python preserving the insertion order of dicts. + oldest, episode = next(iter(self._complete_eps.items())) + self._loaded_steps -= eplen(episode) + self._loaded_episodes -= 1 + del self._complete_eps[oldest] + + +def count_episodes(directory): + filenames = list(directory.glob('*.npz')) + num_episodes = len(filenames) + num_steps = sum(int(str(n).split('-')[-1][:-4]) - 1 for n in filenames) + return num_episodes, num_steps + + +def save_episode(directory, episode, disable=False): + timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S') + identifier = str(uuid.uuid4().hex) + length = eplen(episode) + filename = directory / f'{timestamp}-{identifier}-{length}.npz' + if not disable: + with io.BytesIO() as f1: + np.savez_compressed(f1, **episode) + f1.seek(0) + with filename.open('wb') as f2: + f2.write(f1.read()) + return filename + + +def load_episodes(directory, capacity=None, minlen=1, keep_temporal_order=False): + # The returned directory from filenames to episodes is guaranteed to be in + # temporally sorted order. + filenames = sorted(directory.glob('*.npz')) + if not keep_temporal_order: + print('Shuffling order of offline trajectories!') + random.Random(0).shuffle(filenames) + if capacity: + num_steps = 0 + num_episodes = 0 + for filename in reversed(filenames): + length = int(str(filename).split('-')[-1][:-4]) + num_steps += length + num_episodes += 1 + if num_steps >= capacity: + break + filenames = filenames[-num_episodes:] + episodes = {} + for filename in filenames: + try: + with filename.open('rb') as f: + episode = np.load(f) + episode = {k: episode[k] for k in episode.keys()} + # Conversion for older versions of npz files. + if 'is_terminal' not in episode: + episode['is_terminal'] = episode['discount'] == 0. + except Exception as e: + print(f'Could not load episode {str(filename)}: {e}') + continue + episodes[str(filename)] = episode + return episodes + + +def convert(value): + value = np.array(value) + if np.issubdtype(value.dtype, np.floating): + return value.astype(np.float32) + elif np.issubdtype(value.dtype, np.signedinteger): + return value.astype(np.int32) + elif np.issubdtype(value.dtype, np.uint8): + return value.astype(np.uint8) + return value + + +def eplen(episode): + return len(episode['action']) - 1 + + +class OfflineReplay: + def __init__( + self, directory, capacity=0, ongoing=False, minlen=1, maxlen=0, + prioritize_ends=False, split_val=False): + self._capacity = capacity + self._minlen = minlen + self._maxlen = maxlen + self._prioritize_ends = prioritize_ends + self._random = np.random.RandomState() + self._total_episodes = 0 + self._total_steps = 0 + self._loaded_episodes = 0 + self._loaded_steps = 0 + self._complete_eps = {} + + if type(directory) is not list: + directory = [directory] + + for d in directory: + path = pathlib.Path(d).expanduser() + complete_eps = load_episodes(path, capacity, minlen) + self._complete_eps.update(complete_eps) + t_eps, t_steps = count_episodes(path) + self._total_episodes += t_eps + self._total_steps += t_steps + self._loaded_episodes += len(complete_eps) + self._loaded_steps += sum(eplen(x) for x in complete_eps.values()) + + print('Loaded from offline directory(ies)!') + print(self.stats) + + self.episodes = list(self._complete_eps.values()) + + if split_val: + self.episodes, self.val_episodes = train_test_split(self.episodes, test_size=0.2) + print( + f'Split into {len(self.episodes)} training episodes and {len(self.val_episodes)} validation episodes.') + + @property + def stats(self): + return { + 'total_steps': self._total_steps, + 'total_episodes': self._total_episodes, + 'loaded_steps': self._loaded_steps, + 'loaded_episodes': self._loaded_episodes, + } + + def dataset(self, batch, length, validation=False): + example = next(iter(self._generate_chunks(length, validation=validation))) + dataset = tf.data.Dataset.from_generator( + lambda: self._generate_chunks(length, validation=validation), + {k: v.dtype for k, v in example.items()}, + {k: v.shape for k, v in example.items()}) + dataset = dataset.batch(batch, drop_remainder=True) + dataset = dataset.prefetch(5) + return dataset + + def _generate_chunks(self, length, validation): + sequence = self._sample_sequence(validation=validation) + while True: + chunk = collections.defaultdict(list) + added = 0 + while added < length: + needed = length - added + adding = {k: v[:needed] for k, v in sequence.items()} + sequence = {k: v[needed:] for k, v in sequence.items()} + for key, value in adding.items(): + chunk[key].append(value) + added += len(adding['action']) + if len(sequence['action']) < 1: + sequence = self._sample_sequence(validation=validation) + chunk = {k: np.concatenate(v) for k, v in chunk.items()} + yield chunk + + def _sample_sequence(self, validation): + if validation: + episode = self._random.choice(self.val_episodes) + else: + episode = self._random.choice(self.episodes) + total = len(episode['action']) + length = total + if self._maxlen: + length = min(length, self._maxlen) + # Randomize length to avoid all chunks ending at the same time in case the + # episodes are all of the same length. + length -= np.random.randint(self._minlen) + length = max(self._minlen, length) + upper = total - length + 1 + if self._prioritize_ends: + upper += self._minlen + index = min(self._random.randint(upper), total - length) + sequence = { + k: convert(v[index: index + length]) + for k, v in episode.items() if not k.startswith('log_')} + sequence['is_first'] = np.zeros(len(sequence['action']), np.bool) + sequence['is_first'][0] = True + if self._maxlen: + assert self._minlen <= len(sequence['action']) <= self._maxlen + return sequence + + # Generate chunks out of the validation episodes. (im loss is infinite...?) + def validation_chunks(self, batch, length): + chunks = [] + for episode in self.val_episodes: + episode_chunks = [] + sequence = {k: convert(v) for k, v in episode.items() if not k.startswith('log_')} + sequence['is_first'] = np.zeros(len(sequence['action']), np.bool) + + while len(sequence['action']) >= length: + chunk = collections.defaultdict(list) + adding = {k: v[:length] for k, v in sequence.items()} + adding['is_first'][0] = True + sequence = {k: v[length:] for k, v in sequence.items()} + for key, value in adding.items(): + chunk[key].append(value) + chunk = {k: np.concatenate(v) for k, v in chunk.items()} + episode_chunks.append(chunk) + episode_chunks = {k: tf.stack([x[k] for x in episode_chunks]) for k in episode_chunks[0].keys()} + chunks.append(episode_chunks) + + return chunks diff --git a/offlinedv2/dreamerv2/common/tfutils.py b/offlinedv2/dreamerv2/common/tfutils.py new file mode 100644 index 0000000000000000000000000000000000000000..7a17f339ecdc68f23ec8362f92b4d16410a38d13 --- /dev/null +++ b/offlinedv2/dreamerv2/common/tfutils.py @@ -0,0 +1,149 @@ +import pathlib +import pickle +import re + +import numpy as np +import tensorflow as tf +from tensorflow.keras import mixed_precision as prec + +try: + from tensorflow.python.distribute import values +except Exception: + from google3.third_party.tensorflow.python.distribute import values + +tf.tensor = tf.convert_to_tensor +for base in (tf.Tensor, tf.Variable, values.PerReplica): + base.mean = tf.math.reduce_mean + base.std = tf.math.reduce_std + base.var = tf.math.reduce_variance + base.sum = tf.math.reduce_sum + base.any = tf.math.reduce_any + base.all = tf.math.reduce_all + base.min = tf.math.reduce_min + base.max = tf.math.reduce_max + base.abs = tf.math.abs + base.logsumexp = tf.math.reduce_logsumexp + base.transpose = tf.transpose + base.reshape = tf.reshape + base.astype = tf.cast + + +# values.PerReplica.dtype = property(lambda self: self.values[0].dtype) + +# tf.TensorHandle.__repr__ = lambda x: '' +# tf.TensorHandle.__str__ = lambda x: '' +# np.set_printoptions(threshold=5, edgeitems=0) + + +class Module(tf.Module): + def save(self, filename): + values = tf.nest.map_structure(lambda x: x.numpy(), self.variables) + amount = len(tf.nest.flatten(values)) + count = int(sum(np.prod(x.shape) for x in tf.nest.flatten(values))) + print(f'Save checkpoint with {amount} tensors and {count} parameters.') + with pathlib.Path(filename).open('wb') as f: + pickle.dump(values, f) + + def load(self, filename): + with pathlib.Path(filename).open('rb') as f: + values = pickle.load(f) + amount = len(tf.nest.flatten(values)) + count = int(sum(np.prod(x.shape) for x in tf.nest.flatten(values))) + print(f'Load checkpoint with {amount} tensors and {count} parameters.') + tf.nest.map_structure(lambda x, y: x.assign(y), self.variables, values) + + def get(self, name, ctor, *args, **kwargs): + # Create or get layer by name to avoid mentioning it in the constructor. + if not hasattr(self, '_modules'): + self._modules = {} + if name not in self._modules: + self._modules[name] = ctor(*args, **kwargs) + return self._modules[name] + + +class Optimizer(tf.Module): + def __init__( + self, name, lr, eps=1e-4, clip=None, wd=None, + opt='adam', wd_pattern=r'.*'): + assert 0 <= wd < 1 + assert not clip or 1 <= clip + self._name = name + self._clip = clip + self._wd = wd + self._wd_pattern = wd_pattern + self._opt = { + 'adam': lambda: tf.optimizers.Adam(lr, epsilon=eps), + 'nadam': lambda: tf.optimizers.Nadam(lr, epsilon=eps), + 'adamax': lambda: tf.optimizers.Adamax(lr, epsilon=eps), + 'sgd': lambda: tf.optimizers.SGD(lr), + 'momentum': lambda: tf.optimizers.SGD(lr, 0.9), + }[opt]() + self._mixed = (prec.global_policy().compute_dtype == tf.float16) + if self._mixed: + self._opt = prec.LossScaleOptimizer(self._opt, dynamic=True) + self._once = True + + @property + def variables(self): + return self._opt.variables() + + def __call__(self, tape, loss, modules): + assert loss.dtype is tf.float32, (self._name, loss.dtype) + assert len(loss.shape) == 0, (self._name, loss.shape) + metrics = {} + + # Find variables. + modules = modules if hasattr(modules, '__len__') else (modules,) + varibs = tf.nest.flatten([module.variables for module in modules]) + count = sum(np.prod(x.shape) for x in varibs) + if self._once: + print(f'Found {count} {self._name} parameters.') + self._once = False + + # Check loss. + tf.debugging.check_numerics(loss, self._name + '_loss') + metrics[f'{self._name}_loss'] = loss + + # Compute scaled gradient. + if self._mixed: + with tape: + loss = self._opt.get_scaled_loss(loss) + grads = tape.gradient(loss, varibs) + if self._mixed: + grads = self._opt.get_unscaled_gradients(grads) + if self._mixed: + metrics[f'{self._name}_loss_scale'] = self._opt.loss_scale + + # Distributed sync. + context = tf.distribute.get_replica_context() + if context: + grads = context.all_reduce('mean', grads) + + # Gradient clipping. + norm = tf.linalg.global_norm(grads) + if not self._mixed: + tf.debugging.check_numerics(norm, self._name + '_norm') + if self._clip: + grads, _ = tf.clip_by_global_norm(grads, self._clip, norm) + metrics[f'{self._name}_grad_norm'] = norm + + # Weight decay. + if self._wd: + self._apply_weight_decay(varibs) + + # Apply gradients. + self._opt.apply_gradients( + zip(grads, varibs), + experimental_aggregate_gradients=False) + + return metrics + + def _apply_weight_decay(self, varibs): + nontrivial = (self._wd_pattern != r'.*') + if nontrivial: + print('Applied weight decay to variables:') + for var in varibs: + if re.search(self._wd_pattern, self._name + '/' + var.name): + if nontrivial: + print('- ' + self._name + '/' + var.name) + var.assign((1 - self._wd) * var) diff --git a/offlinedv2/dreamerv2/common/when.py b/offlinedv2/dreamerv2/common/when.py new file mode 100644 index 0000000000000000000000000000000000000000..961908836e07f54452e30d1d6e4276ca0e448778 --- /dev/null +++ b/offlinedv2/dreamerv2/common/when.py @@ -0,0 +1,41 @@ +class Every: + + def __init__(self, every): + self._every = every + self._last = None + + def __call__(self, step): + step = int(step) + if not self._every: + return False + if self._last is None: + self._last = step + return True + if step >= self._last + self._every: + self._last += self._every + return True + return False + + +class Once: + + def __init__(self): + self._once = True + + def __call__(self): + if self._once: + self._once = False + return True + return False + + +class Until: + + def __init__(self, until): + self._until = until + + def __call__(self, step): + step = int(step) + if not self._until: + return True + return step < self._until diff --git a/offlinedv2/dreamerv2/configs.yaml b/offlinedv2/dreamerv2/configs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..61c0f3697627a2d345600cc3aaaca3950042c641 --- /dev/null +++ b/offlinedv2/dreamerv2/configs.yaml @@ -0,0 +1,176 @@ +defaults: + + # Train Script + logdir: /dev/null + seed: 0 + task: dmc_walker_walk + envs: 1 + envs_parallel: none + render_size: [64, 64] + dmc_camera: -1 + atari_grayscale: True + time_limit: 0 + action_repeat: 1 + # steps: 1e7 + steps: 2e5 + log_every: 1e4 + eval_every: 1e5 + eval_eps: 1 + prefill: 10000 + pretrain: 1 + train_every: 5 + train_steps: 1 + expl_until: 0 + replay: {capacity: 2e6, ongoing: False, minlen: 50, maxlen: 50, prioritize_ends: True} + dataset: {batch: 16, length: 50} + log_keys_video: ['image'] + log_keys_sum: '^$' + log_keys_mean: '^$' + log_keys_max: '^$' + precision: 16 + jit: True + + offline_dir: [none] + offline_model_train_steps: 25001 + offline_model_loaddir: none + offline_lmbd: 5.0 + offline_penalty_type: none + offline_model_save_every: 5000 + offline_split_val: False + offline_tune_lmbd: False + offline_lmbd_cons: 1.5 + offline_model_dataset: {batch: 64, length: 50} + offline_train_dataset: {batch: 64, length: 50} + + # Agent + clip_rewards: tanh + expl_behavior: greedy + expl_noise: 0.0 + eval_noise: 0.0 + eval_state_mean: False + + # World Model + grad_heads: [decoder, reward, discount] + pred_discount: True + rssm: {ensemble: 7, hidden: 1024, deter: 1024, stoch: 32, discrete: 32, act: elu, norm: none, std_act: sigmoid2, min_std: 0.1} + encoder: {mlp_keys: '.*', cnn_keys: '.*', act: elu, norm: none, cnn_depth: 48, cnn_kernels: [4, 4, 4, 4], mlp_layers: [400, 400, 400, 400]} + decoder: {mlp_keys: '.*', cnn_keys: '.*', act: elu, norm: none, cnn_depth: 48, cnn_kernels: [5, 5, 6, 6], mlp_layers: [400, 400, 400, 400]} + reward_head: {layers: 4, units: 400, act: elu, norm: none, dist: mse} + discount_head: {layers: 4, units: 400, act: elu, norm: none, dist: binary} + loss_scales: {kl: 1.0, reward: 1.0, discount: 1.0, proprio: 1.0} + kl: {free: 0.0, forward: False, balance: 0.8, free_avg: True} + model_opt: {opt: adam, lr: 1e-4, eps: 1e-5, clip: 100, wd: 1e-6} + + # Actor Critic + actor: {layers: 4, units: 400, act: elu, norm: none, dist: auto, min_std: 0.1} + critic: {layers: 4, units: 400, act: elu, norm: none, dist: mse} + actor_opt: {opt: adam, lr: 8e-5, eps: 1e-5, clip: 100, wd: 1e-6} + critic_opt: {opt: adam, lr: 2e-4, eps: 1e-5, clip: 100, wd: 1e-6} + discount: 0.99 + discount_lambda: 0.95 + imag_horizon: 5 + actor_grad: auto + actor_grad_mix: 0.1 + actor_ent: 2e-3 + slow_target: True + slow_target_update: 100 + slow_target_fraction: 1 + slow_baseline: True + reward_norm: {momentum: 1.0, scale: 1.0, eps: 1e-8} + + # Exploration + expl_intr_scale: 1.0 + expl_extr_scale: 0.0 + expl_opt: {opt: adam, lr: 3e-4, eps: 1e-5, clip: 100, wd: 1e-6} + expl_head: {layers: 4, units: 400, act: elu, norm: none, dist: mse} + expl_reward_norm: {momentum: 1.0, scale: 1.0, eps: 1e-8} + disag_target: stoch + disag_log: False + disag_models: 10 + disag_offset: 1 + disag_action_cond: True + expl_model_loss: kl + +atari: + + task: atari_pong + encoder: {mlp_keys: '$^', cnn_keys: 'image'} + decoder: {mlp_keys: '$^', cnn_keys: 'image'} + time_limit: 27000 + action_repeat: 4 + steps: 5e7 + eval_every: 2.5e5 + log_every: 1e4 + prefill: 50000 + train_every: 16 + clip_rewards: tanh + rssm: {hidden: 600, deter: 600} + model_opt.lr: 2e-4 + actor_opt.lr: 4e-5 + critic_opt.lr: 1e-4 + actor_ent: 1e-3 + discount: 0.999 + loss_scales.kl: 0.1 + loss_scales.discount: 5.0 + +crafter: + + task: crafter_reward + encoder: {mlp_keys: '$^', cnn_keys: 'image'} + decoder: {mlp_keys: '$^', cnn_keys: 'image'} + log_keys_max: '^log_achievement_.*' + log_keys_sum: '^log_reward$' + discount: 0.999 + .*\.norm: layer + +dmc_vision: + + task: dmc_walker_walk + encoder: {mlp_keys: '$^', cnn_keys: 'image'} + decoder: {mlp_keys: '$^', cnn_keys: 'image'} + action_repeat: 2 + eval_every: 1e4 + prefill: 1000 + pretrain: 100 + clip_rewards: identity + pred_discount: False + replay.prioritize_ends: False + grad_heads: [decoder, reward] + rssm: {hidden: 200, deter: 200} + model_opt.lr: 3e-4 + actor_opt.lr: 8e-5 + critic_opt.lr: 8e-5 + actor_ent: 1e-4 + kl.free: 1.0 + +dmc_proprio: + + task: dmc_walker_walk + encoder: {mlp_keys: '.*', cnn_keys: '$^'} + decoder: {mlp_keys: '.*', cnn_keys: '$^'} + action_repeat: 2 + eval_every: 1e4 + prefill: 1000 + pretrain: 100 + clip_rewards: identity + pred_discount: False + replay.prioritize_ends: False + grad_heads: [decoder, reward] + rssm: {hidden: 200, deter: 200} + model_opt.lr: 3e-4 + actor_opt.lr: 8e-5 + critic_opt.lr: 8e-5 + actor_ent: 1e-4 + kl.free: 1.0 + +debug: + + jit: False + time_limit: 100 + eval_every: 300 + log_every: 300 + prefill: 100 + pretrain: 1 + train_steps: 1 + replay: {minlen: 10, maxlen: 30} + dataset: {batch: 10, length: 10} diff --git a/offlinedv2/dreamerv2/expl.py b/offlinedv2/dreamerv2/expl.py new file mode 100644 index 0000000000000000000000000000000000000000..a17d79f85f843bd81d61257b807f33d6d7a88c95 --- /dev/null +++ b/offlinedv2/dreamerv2/expl.py @@ -0,0 +1,124 @@ +import tensorflow as tf +from tensorflow_probability import distributions as tfd + +from dreamerv2 import agent +from dreamerv2 import common + + +class Random(common.Module): + + def __init__(self, config, act_space, wm, tfstep, reward): + self.config = config + self.act_space = self.act_space + + def actor(self, feat): + shape = feat.shape[:-1] + self.act_space.shape + if self.config.actor.dist == 'onehot': + return common.OneHotDist(tf.zeros(shape)) + else: + dist = tfd.Uniform(-tf.ones(shape), tf.ones(shape)) + return tfd.Independent(dist, 1) + + def train(self, start, context, data): + return None, {} + + +class Plan2Explore(common.Module): + + def __init__(self, config, act_space, wm, tfstep, reward): + self.config = config + self.reward = reward + self.wm = wm + self.ac = agent.ActorCritic(config, act_space, tfstep) + self.actor = self.ac.actor + stoch_size = config.rssm.stoch + if config.rssm.discrete: + stoch_size *= config.rssm.discrete + size = { + 'embed': 32 * config.encoder.cnn_depth, + 'stoch': stoch_size, + 'deter': config.rssm.deter, + 'feat': config.rssm.stoch + config.rssm.deter, + }[self.config.disag_target] + self._networks = [ + common.MLP(size, **config.expl_head) + for _ in range(config.disag_models)] + self.opt = common.Optimizer('expl', **config.expl_opt) + self.extr_rewnorm = common.StreamNorm(**self.config.expl_reward_norm) + self.intr_rewnorm = common.StreamNorm(**self.config.expl_reward_norm) + + def train(self, start, context, data): + metrics = {} + stoch = start['stoch'] + if self.config.rssm.discrete: + stoch = tf.reshape( + stoch, stoch.shape[:-2] + (stoch.shape[-2] * stoch.shape[-1])) + target = { + 'embed': context['embed'], + 'stoch': stoch, + 'deter': start['deter'], + 'feat': context['feat'], + }[self.config.disag_target] + inputs = context['feat'] + if self.config.disag_action_cond: + action = tf.cast(data['action'], inputs.dtype) + inputs = tf.concat([inputs, action], -1) + metrics.update(self._train_ensemble(inputs, target)) + metrics.update(self.ac.train( + self.wm, start, data['is_terminal'], self._intr_reward)) + return None, metrics + + def _intr_reward(self, seq): + inputs = seq['feat'] + if self.config.disag_action_cond: + action = tf.cast(seq['action'], inputs.dtype) + inputs = tf.concat([inputs, action], -1) + preds = [head(inputs).mode() for head in self._networks] + disag = tf.tensor(preds).std(0).mean(-1) + if self.config.disag_log: + disag = tf.math.log(disag) + reward = self.config.expl_intr_scale * self.intr_rewnorm(disag)[0] + if self.config.expl_extr_scale: + reward += self.config.expl_extr_scale * self.extr_rewnorm( + self.reward(seq))[0] + return reward + + def _train_ensemble(self, inputs, targets): + if self.config.disag_offset: + targets = targets[:, self.config.disag_offset:] + inputs = inputs[:, :-self.config.disag_offset] + targets = tf.stop_gradient(targets) + inputs = tf.stop_gradient(inputs) + with tf.GradientTape() as tape: + preds = [head(inputs) for head in self._networks] + loss = -sum([pred.log_prob(targets).mean() for pred in preds]) + metrics = self.opt(tape, loss, self._networks) + return metrics + + +class ModelLoss(common.Module): + + def __init__(self, config, act_space, wm, tfstep, reward): + self.config = config + self.reward = reward + self.wm = wm + self.ac = agent.ActorCritic(config, act_space, tfstep) + self.actor = self.ac.actor + self.head = common.MLP([], **self.config.expl_head) + self.opt = common.Optimizer('expl', **self.config.expl_opt) + + def train(self, start, context, data): + metrics = {} + target = tf.cast(context[self.config.expl_model_loss], tf.float32) + with tf.GradientTape() as tape: + loss = -self.head(context['feat']).log_prob(target).mean() + metrics.update(self.opt(tape, loss, self.head)) + metrics.update(self.ac.train( + self.wm, start, data['is_terminal'], self._intr_reward)) + return None, metrics + + def _intr_reward(self, seq): + reward = self.config.expl_intr_scale * self.head(seq['feat']).mode() + if self.config.expl_extr_scale: + reward += self.config.expl_extr_scale * self.reward(seq) + return reward diff --git a/offlinedv2/dreamerv2/train.py b/offlinedv2/dreamerv2/train.py new file mode 100644 index 0000000000000000000000000000000000000000..caeca1ffb12bab2429e73f582ff1abc7ceec45e3 --- /dev/null +++ b/offlinedv2/dreamerv2/train.py @@ -0,0 +1,201 @@ +import collections +import functools +import logging +import os +import pathlib +import re +import sys +import warnings + +try: + import rich.traceback + + rich.traceback.install() +except ImportError: + pass + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +logging.getLogger().setLevel('ERROR') +warnings.filterwarnings('ignore', '.*box bound precision lowered.*') + +sys.path.append(str(pathlib.Path(__file__).parent)) +sys.path.append(str(pathlib.Path(__file__).parent.parent)) + +import numpy as np +import ruamel.yaml as yaml + +from dreamerv2 import agent +from dreamerv2 import common + + +def main(): + configs = yaml.safe_load((pathlib.Path(sys.argv[0]).parent / 'configs.yaml').read_text()) + parsed, remaining = common.Flags(configs=['defaults']).parse(known_only=True) + config = common.Config(configs['defaults']) + for name in parsed.configs: + config = config.update(configs[name]) + config = common.Flags(config).parse(remaining) + + logdir = pathlib.Path(config.logdir).expanduser() + logdir.mkdir(parents=True, exist_ok=True) + config.save(logdir / 'config.yaml') + print(config, '\n') + print('Logdir', logdir) + + import tensorflow as tf + tf.config.experimental_run_functions_eagerly(not config.jit) + message = 'No GPU found. To actually train on CPU remove this assert.' + assert tf.config.experimental.list_physical_devices('GPU'), message + for gpu in tf.config.experimental.list_physical_devices('GPU'): + tf.config.experimental.set_memory_growth(gpu, True) + assert config.precision in (16, 32), config.precision + if config.precision == 16: + from tensorflow.keras.mixed_precision import experimental as prec + prec.set_policy(prec.Policy('mixed_float16')) + + train_replay = common.Replay(logdir / 'train_episodes', **config.replay) + eval_replay = common.Replay(logdir / 'eval_episodes', **dict( + capacity=config.replay.capacity // 10, + minlen=config.dataset.length, + maxlen=config.dataset.length)) + step = common.Counter(train_replay.stats['total_steps']) + outputs = [ + common.TerminalOutput(), + common.JSONLOutput(logdir), + common.TensorBoardOutput(logdir), + ] + logger = common.Logger(step, outputs, multiplier=config.action_repeat) + metrics = collections.defaultdict(list) + + should_train = common.Every(config.train_every) + should_log = common.Every(config.log_every) + should_video_train = common.Every(config.eval_every) + should_video_eval = common.Every(config.eval_every) + should_expl = common.Until(config.expl_until) + + def make_env(mode): + suite, task = config.task.split('_', 1) + if suite == 'dmc': + env = common.DMC( + task, config.action_repeat, config.render_size, config.dmc_camera) + env = common.NormalizeAction(env) + elif suite == 'dmcmt': + env = common.DMCMultitask( + task, config.action_repeat, config.render_size, config.dmc_camera) + env = common.NormalizeAction(env) + elif suite == 'atari': + env = common.Atari( + task, config.action_repeat, config.render_size, + config.atari_grayscale) + env = common.OneHotAction(env) + elif suite == 'crafter': + assert config.action_repeat == 1 + outdir = logdir / 'crafter' if mode == 'train' else None + reward = bool(['noreward', 'reward'].index(task)) or mode == 'eval' + env = common.Crafter(outdir, reward) + env = common.OneHotAction(env) + else: + raise NotImplementedError(suite) + env = common.TimeLimit(env, config.time_limit) + return env + + def per_episode(ep, mode): + length = len(ep['reward']) - 1 + score = float(ep['reward'].astype(np.float64).sum()) + print(f'{mode.title()} episode has {length} steps and return {score:.1f}.') + logger.scalar(f'{mode}_return', score) + logger.scalar(f'{mode}_length', length) + for key, value in ep.items(): + if re.match(config.log_keys_sum, key): + logger.scalar(f'sum_{mode}_{key}', ep[key].sum()) + if re.match(config.log_keys_mean, key): + logger.scalar(f'mean_{mode}_{key}', ep[key].mean()) + if re.match(config.log_keys_max, key): + logger.scalar(f'max_{mode}_{key}', ep[key].max(0).mean()) + should = {'train': should_video_train, 'eval': should_video_eval}[mode] + if should(step): + for key in config.log_keys_video: + logger.video(f'{mode}_policy_{key}', ep[key]) + replay = dict(train=train_replay, eval=eval_replay)[mode] + logger.add(replay.stats, prefix=mode) + logger.write() + + print('Create envs.') + num_eval_envs = min(config.envs, config.eval_eps) + if config.envs_parallel == 'none': + train_envs = [make_env('train') for _ in range(config.envs)] + eval_envs = [make_env('eval') for _ in range(num_eval_envs)] + else: + make_async_env = lambda mode: common.Async( + functools.partial(make_env, mode), config.envs_parallel) + train_envs = [make_async_env('train') for _ in range(config.envs)] + eval_envs = [make_async_env('eval') for _ in range(num_eval_envs)] + act_space = train_envs[0].act_space + obs_space = train_envs[0].obs_space + train_driver = common.Driver(train_envs) + train_driver.on_episode(lambda ep: per_episode(ep, mode='train')) + train_driver.on_step(lambda tran, worker: step.increment()) + train_driver.on_step(train_replay.add_step) + train_driver.on_reset(train_replay.add_step) + eval_driver = common.Driver(eval_envs) + eval_driver.on_episode(lambda ep: per_episode(ep, mode='eval')) + eval_driver.on_episode(eval_replay.add_episode) + + prefill = max(0, config.prefill - train_replay.stats['total_steps']) + if prefill: + print(f'Prefill dataset ({prefill} steps).') + random_agent = common.RandomAgent(act_space) + train_driver(random_agent, steps=prefill, episodes=1) + eval_driver(random_agent, episodes=1) + train_driver.reset() + eval_driver.reset() + + print('Create agent.') + train_dataset = iter(train_replay.dataset(**config.dataset)) + report_dataset = iter(train_replay.dataset(**config.dataset)) + eval_dataset = iter(eval_replay.dataset(**config.dataset)) + agnt = agent.Agent(config, obs_space, act_space, step) + train_agent = common.CarryOverState(agnt.train) + train_agent(next(train_dataset)) + if (logdir / 'variables.pkl').exists(): + agnt.load(logdir / 'variables.pkl') + else: + print('Pretrain agent.') + for _ in range(config.pretrain): + train_agent(next(train_dataset)) + train_policy = lambda *args: agnt.policy( + *args, mode='explore' if should_expl(step) else 'train') + eval_policy = lambda *args: agnt.policy(*args, mode='eval') + + def train_step(tran, worker): + if should_train(step): + for _ in range(config.train_steps): + mets = train_agent(next(train_dataset)) + [metrics[key].append(value) for key, value in mets.items()] + if should_log(step): + for name, values in metrics.items(): + logger.scalar(name, np.array(values, np.float64).mean()) + metrics[name].clear() + logger.add(agnt.report(next(report_dataset)), prefix='train') + logger.write(fps=True) + + train_driver.on_step(train_step) + + while step < config.steps: + logger.write() + print('Start evaluation.') + logger.add(agnt.report(next(eval_dataset)), prefix='eval') + eval_driver(eval_policy, episodes=config.eval_eps) + print('Start training.') + train_driver(train_policy, steps=config.eval_every) + agnt.save(logdir / f'variables.pkl') + + for env in train_envs + eval_envs: + try: + env.close() + except Exception: + pass + + +if __name__ == '__main__': + main() diff --git a/offlinedv2/train_offline.py b/offlinedv2/train_offline.py new file mode 100644 index 0000000000000000000000000000000000000000..56453fc4e7a705a99a0b8fcca4a580737fce68e7 --- /dev/null +++ b/offlinedv2/train_offline.py @@ -0,0 +1,214 @@ +import collections +import functools +import logging +import os +import pathlib +import re +import warnings + +try: + import rich.traceback + + rich.traceback.install() +except ImportError: + pass + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +logging.getLogger().setLevel('ERROR') +warnings.filterwarnings('ignore', '.*box bound precision lowered.*') + +import numpy as np +import ruamel.yaml as yaml + +from dreamerv2 import agent +from dreamerv2 import common + + +def make_env(mode, config, logdir): + suite, task = config.task.split('_', 1) + if suite == 'dmc': + env = common.DMC( + task, config.action_repeat, config.render_size, config.dmc_camera) + env = common.NormalizeAction(env) + elif suite == 'dmcmt': + env = common.DMCMultitask( + task, config.action_repeat, config.render_size, config.dmc_camera) + env = common.NormalizeAction(env) + elif suite == 'atari': + env = common.Atari( + task, config.action_repeat, config.render_size, + config.atari_grayscale) + env = common.OneHotAction(env) + elif suite == 'crafter': + assert config.action_repeat == 1 + outdir = logdir / 'crafter' if mode == 'train' else None + reward = bool(['noreward', 'reward'].index(task)) or mode == 'eval' + env = common.Crafter(outdir, reward) + env = common.OneHotAction(env) + else: + raise NotImplementedError(suite) + env = common.TimeLimit(env, config.time_limit) + return env + + +def main(): + config_path = pathlib.Path('offlinedv2/dreamerv2') / 'configs.yaml' + configs = yaml.safe_load(config_path.read_text()) + parsed, remaining = common.Flags(configs=['defaults']).parse(known_only=True) + config = common.Config(configs['defaults']) + for name in parsed.configs: + config = config.update(configs[name]) + config = common.Flags(config).parse(remaining) + + logdir = pathlib.Path(config.logdir).expanduser() + logdir.mkdir(parents=True, exist_ok=True) + config.save(logdir / 'config.yaml') + print(config, '\n') + print('Logdir', logdir) + + import tensorflow as tf + tf.config.experimental_run_functions_eagerly(not config.jit) + message = 'No GPU found. To actually train on CPU remove this assert.' + assert tf.config.experimental.list_physical_devices('GPU'), message + for gpu in tf.config.experimental.list_physical_devices('GPU'): + tf.config.experimental.set_memory_growth(gpu, True) + assert config.precision in (16, 32), config.precision + if config.precision == 16: + from tensorflow.keras.mixed_precision import experimental as prec + prec.set_policy(prec.Policy('mixed_float16')) + + print('Offline Datadir', config.offline_dir) + train_replay = common.OfflineReplay(list(config.offline_dir), split_val=config.offline_split_val, **config.replay) + eval_replay = common.Replay( + logdir / 'eval_episodes', + capacity=config.replay.capacity // 10, + minlen=config.dataset.length, + maxlen=config.dataset.length, + disable_save=True, + ) + step = common.Counter(0) + outputs = [ + common.TerminalOutput(), + common.JSONLOutput(logdir), + common.TensorBoardOutput(logdir), + ] + logger = common.Logger(step, outputs) + metrics = collections.defaultdict(list) + + should_video_eval = common.Every(config.eval_every) + + def per_episode(ep, mode): + length = len(ep['reward']) - 1 + score = float(ep['reward'].astype(np.float64).sum()) + print(f'{mode.title()} episode has {length} steps and return {score:.1f}.') + logger.scalar(f'{mode}_return', score) + logger.scalar(f'{mode}_length', length) + for key, value in ep.items(): + if re.match(config.log_keys_sum, key): + logger.scalar(f'sum_{mode}_{key}', ep[key].sum()) + if re.match(config.log_keys_mean, key): + logger.scalar(f'mean_{mode}_{key}', ep[key].mean()) + if re.match(config.log_keys_max, key): + logger.scalar(f'max_{mode}_{key}', ep[key].max(0).mean()) + # if should_video_eval(step): + # for key in config.log_keys_video: + # logger.video(f'{mode}_policy_{key}', ep[key]) + replay = dict(train=train_replay, eval=eval_replay)[mode] + logger.add(replay.stats, prefix=mode) + logger.write() + + print('Create envs.') + num_eval_envs = min(config.envs, config.eval_eps) + if config.envs_parallel == 'none': + eval_envs = [make_env('eval', config, logdir) for _ in range(num_eval_envs)] + else: + make_async_env = lambda mode: common.Async( + functools.partial(make_env, mode, config, logdir), config.envs_parallel) + eval_envs = [make_async_env('eval') for _ in range(num_eval_envs)] + act_space = eval_envs[0].act_space + obs_space = eval_envs[0].obs_space + eval_driver = common.Driver(eval_envs) + eval_driver.on_episode(lambda ep: per_episode(ep, mode='eval')) + eval_driver.on_episode(eval_replay.add_episode) + + print(f'Precollecting eval steps.') + random_agent = common.RandomAgent(act_space) + eval_driver(random_agent, episodes=1) + eval_driver.reset() + + print('Create agent.') + train_dataset = iter(train_replay.dataset(**config.offline_model_dataset)) + if config.offline_split_val: + val_dataset = iter(train_replay.dataset(validation=True, **config.offline_model_dataset)) + + agnt = agent.Agent(config, obs_space, act_space, step) + agnt.train(next(train_dataset)) + eval_policy = lambda *args: agnt.policy(*args, mode='eval') + + if config.offline_model_loaddir != 'none': + print('Loading model', config.offline_model_loaddir) + agnt.wm.load(config.offline_model_loaddir) + else: + print('Start model training.') + model_step = common.Counter(0) + model_logger = common.Logger(model_step, outputs) + while model_step < config.offline_model_train_steps: + if config.offline_split_val: + # Compute model validation loss as average over chunks of data + val_losses = [] + for _ in range(10): + val_loss, _, _, val_mets = agnt.wm.loss(next(val_dataset)) + val_losses.append(val_loss) + model_logger.scalar(f'validation_model_loss', np.array(val_losses, np.float64).mean()) + + for it in range(100): + model_step.increment() + mets = agnt.model_train(next(train_dataset)) + + for key, value in mets.items(): + metrics[key].append(value) + + for name, values in metrics.items(): + model_logger.scalar(name, np.array(values, np.float64).mean()) + metrics[name].clear() + model_logger.write() + + if model_step % config.offline_model_save_every == 0: + print('Saving model') + agnt.wm.save(logdir / f'model_{model_step.value}.pkl') + agnt.wm.save(logdir / f'final_model.pkl') + + # Begin training + metrics = collections.defaultdict(list) + + train_dataset = iter(train_replay.dataset(**config.offline_train_dataset)) + eval_dataset = iter(eval_replay.dataset(**config.offline_train_dataset)) + while step < config.steps: + logger.write() + print('Start evaluation.') + if should_video_eval(step): + logger.add(agnt.report(next(eval_dataset)), prefix='eval') + eval_driver(eval_policy, episodes=config.eval_eps) + + print('Start training.') + for it in range(200): + step.increment() + mets = agnt.agent_train(next(train_dataset)) + for key, value in mets.items(): + metrics[key].append(value) + for name, values in metrics.items(): + logger.scalar(name, np.array(values, np.float64).mean()) + metrics[name].clear() + + logger.write(fps=True) + agnt.save(logdir / 'variables.pkl') + + for env in eval_envs: + try: + env.close() + except Exception: + pass + + +if __name__ == '__main__': + main()