conglu commited on
Commit
6e5cc8b
1 Parent(s): e0540b6

upload code

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. baselines/lompo/__init__.py +0 -0
  2. baselines/lompo/buffer.py +126 -0
  3. baselines/lompo/models.py +250 -0
  4. baselines/lompo/run.py +723 -0
  5. baselines/lompo/tools.py +243 -0
  6. baselines/lompo/wrappers.py +508 -0
  7. conversion_scripts/hdf5_to_npz.py +66 -0
  8. conversion_scripts/npz_to_hdf5.py +81 -0
  9. conversion_scripts/split_hdf5_shards.py +40 -0
  10. dockerfiles/Dockerfile-drqv2 +74 -0
  11. dockerfiles/Dockerfile-dv2 +58 -0
  12. drqbc/.DS_Store +0 -0
  13. drqbc/LICENSE +21 -0
  14. drqbc/bcagent.py +105 -0
  15. drqbc/cfgs/.DS_Store +0 -0
  16. drqbc/cfgs/algo/bc.yaml +20 -0
  17. drqbc/cfgs/algo/cql.yaml +27 -0
  18. drqbc/cfgs/algo/drqv2.yaml +23 -0
  19. drqbc/cfgs/algo/noaug_bc.yaml +10 -0
  20. drqbc/cfgs/algo/noaug_drqv2.yaml +10 -0
  21. drqbc/cfgs/config.yaml +63 -0
  22. drqbc/cfgs/task/offline_dmc.yaml +12 -0
  23. drqbc/cfgs/task/online/acrobot_swingup.yaml +5 -0
  24. drqbc/cfgs/task/online/cartpole_balance.yaml +5 -0
  25. drqbc/cfgs/task/online/cartpole_balance_sparse.yaml +5 -0
  26. drqbc/cfgs/task/online/cartpole_swingup.yaml +5 -0
  27. drqbc/cfgs/task/online/cartpole_swingup_sparse.yaml +5 -0
  28. drqbc/cfgs/task/online/cheetah_run.yaml +5 -0
  29. drqbc/cfgs/task/online/cup_catch.yaml +5 -0
  30. drqbc/cfgs/task/online/easy.yaml +2 -0
  31. drqbc/cfgs/task/online/finger_spin.yaml +5 -0
  32. drqbc/cfgs/task/online/finger_turn_easy.yaml +5 -0
  33. drqbc/cfgs/task/online/finger_turn_hard.yaml +5 -0
  34. drqbc/cfgs/task/online/hard.yaml +2 -0
  35. drqbc/cfgs/task/online/hopper_hop.yaml +5 -0
  36. drqbc/cfgs/task/online/hopper_stand.yaml +5 -0
  37. drqbc/cfgs/task/online/humanoid_run.yaml +7 -0
  38. drqbc/cfgs/task/online/humanoid_stand.yaml +7 -0
  39. drqbc/cfgs/task/online/humanoid_walk.yaml +7 -0
  40. drqbc/cfgs/task/online/medium.yaml +2 -0
  41. drqbc/cfgs/task/online/pendulum_swingup.yaml +5 -0
  42. drqbc/cfgs/task/online/quadruped_run.yaml +6 -0
  43. drqbc/cfgs/task/online/quadruped_walk.yaml +5 -0
  44. drqbc/cfgs/task/online/reach_duplo.yaml +5 -0
  45. drqbc/cfgs/task/online/reacher_easy.yaml +5 -0
  46. drqbc/cfgs/task/online/reacher_hard.yaml +5 -0
  47. drqbc/cfgs/task/online/walker_run.yaml +7 -0
  48. drqbc/cfgs/task/online/walker_stand.yaml +7 -0
  49. drqbc/cfgs/task/online/walker_walk.yaml +7 -0
  50. drqbc/conda_env.yml +245 -0
baselines/lompo/__init__.py ADDED
File without changes
baselines/lompo/buffer.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import h5py
2
+ import numpy as np
3
+
4
+
5
+ class Buffer():
6
+ def __init__(self):
7
+ self._obs = None
8
+ self._actions = None
9
+ self._rewards = None
10
+ self._next_obs = None
11
+ self._terminals = None
12
+
13
+
14
+ class LatentReplayBuffer(object):
15
+ def __init__(self,
16
+ real_size: int,
17
+ latent_size: int,
18
+ obs_dim: int,
19
+ action_dim: int,
20
+ immutable: bool = False,
21
+ load_from: str = None,
22
+ silent: bool = False,
23
+ seed: int = 0):
24
+
25
+ self.immutable = immutable
26
+
27
+ self.buffers = dict()
28
+ self.sizes = {'real': real_size, 'latent': latent_size}
29
+ for key in ['real', 'latent']:
30
+ self.buffers[key] = Buffer()
31
+ self.buffers[key]._obs = np.full((self.sizes[key], obs_dim), float('nan'), dtype=np.float32)
32
+ self.buffers[key]._actions = np.full((self.sizes[key], action_dim), float('nan'), dtype=np.float32)
33
+ self.buffers[key]._rewards = np.full((self.sizes[key], 1), float('nan'), dtype=np.float32)
34
+ self.buffers[key]._next_obs = np.full((self.sizes[key], obs_dim), float('nan'), dtype=np.float32)
35
+ self.buffers[key]._terminals = np.full((self.sizes[key], 1), float('nan'), dtype=np.float32)
36
+
37
+ self._real_stored_steps = 0
38
+ self._real_write_location = 0
39
+
40
+ self._latent_stored_steps = 0
41
+ self._latent_write_location = 0
42
+
43
+ self._stored_steps = 0
44
+ self._random = np.random.RandomState(seed)
45
+
46
+ @property
47
+ def obs_dim(self):
48
+ return self._obs.shape[-1]
49
+
50
+ @property
51
+ def action_dim(self):
52
+ return self._actions.shape[-1]
53
+
54
+ def __len__(self):
55
+ return self._stored_steps
56
+
57
+ def save(self, location: str):
58
+ f = h5py.File(location, 'w')
59
+ f.create_dataset('obs', data=self.buffers['real']._obs[:self._real_stored_steps], compression='lzf')
60
+ f.create_dataset('actions', data=self.buffers['real']._actions[:self._real_stored_steps], compression='lzf')
61
+ f.create_dataset('rewards', data=self.buffers['real']._rewards[:self._real_stored_steps], compression='lzf')
62
+ f.create_dataset('next_obs', data=self.buffers['real']._next_obs[:self._real_stored_steps], compression='lzf')
63
+ f.create_dataset('terminals', data=self.buffers['real']._terminals[:self._real_stored_steps], compression='lzf')
64
+ f.close()
65
+
66
+ def load(self, location: str):
67
+ with h5py.File(location, "r") as f:
68
+ obs = np.array(f['obs'])
69
+ self._real_stored_steps = obs.shape[0]
70
+ self._real_write_location = obs.shape[0] % self.sizes['real']
71
+
72
+ self.buffers['real']._obs[:self._real_stored_steps] = np.array(f['obs'])
73
+ self.buffers['real']._actions[:self._real_stored_steps] = np.array(f['actions'])
74
+ self.buffers['real']._rewards[:self._real_stored_steps] = np.array(f['rewards'])
75
+ self.buffers['real']._next_obs[:self._real_stored_steps] = np.array(f['next_obs'])
76
+ self.buffers['real']._terminals[:self._real_stored_steps] = np.array(f['terminals'])
77
+
78
+ def add_samples(self, obs_feats, actions, next_obs_feats, rewards, terminals, sample_type='latent'):
79
+ if sample_type == 'real':
80
+ for obsi, actsi, nobsi, rewi, termi in zip(obs_feats, actions, next_obs_feats, rewards, terminals):
81
+ self.buffers['real']._obs[self._real_write_location] = obsi
82
+ self.buffers['real']._actions[self._real_write_location] = actsi
83
+ self.buffers['real']._next_obs[self._real_write_location] = nobsi
84
+ self.buffers['real']._rewards[self._real_write_location] = rewi
85
+ self.buffers['real']._terminals[self._real_write_location] = termi
86
+
87
+ self._real_write_location = (self._real_write_location + 1) % self.sizes['real']
88
+ self._real_stored_steps = min(self._real_stored_steps + 1, self.sizes['real'])
89
+
90
+ else:
91
+ for obsi, actsi, nobsi, rewi, termi in zip(obs_feats, actions, next_obs_feats, rewards, terminals):
92
+ self.buffers['latent']._obs[self._latent_write_location] = obsi
93
+ self.buffers['latent']._actions[self._latent_write_location] = actsi
94
+ self.buffers['latent']._next_obs[self._latent_write_location] = nobsi
95
+ self.buffers['latent']._rewards[self._latent_write_location] = rewi
96
+ self.buffers['latent']._terminals[self._latent_write_location] = termi
97
+
98
+ self._latent_write_location = (self._latent_write_location + 1) % self.sizes['latent']
99
+ self._latent_stored_steps = min(self._latent_stored_steps + 1, self.sizes['latent'])
100
+
101
+ self._stored_steps = self._real_stored_steps + self._latent_stored_steps
102
+
103
+ def sample(self, batch_size, return_dict: bool = False):
104
+ real_idxs = self._random.choice(self._real_stored_steps, batch_size)
105
+ latent_idxs = self._random.choice(self._latent_stored_steps, batch_size)
106
+
107
+ obs = np.concatenate([self.buffers['real']._obs[real_idxs],
108
+ self.buffers['latent']._obs[latent_idxs]], axis=0)
109
+ actions = np.concatenate([self.buffers['real']._actions[real_idxs],
110
+ self.buffers['latent']._actions[latent_idxs]], axis=0)
111
+ next_obs = np.concatenate([self.buffers['real']._next_obs[real_idxs],
112
+ self.buffers['latent']._next_obs[latent_idxs]], axis=0)
113
+ rewards = np.concatenate([self.buffers['real']._rewards[real_idxs],
114
+ self.buffers['latent']._rewards[latent_idxs]], axis=0)
115
+ terminals = np.concatenate([self.buffers['real']._terminals[real_idxs],
116
+ self.buffers['latent']._terminals[latent_idxs]], axis=0)
117
+
118
+ data = {
119
+ 'obs': obs,
120
+ 'actions': actions,
121
+ 'next_obs': next_obs,
122
+ 'rewards': rewards,
123
+ 'terminals': terminals
124
+ }
125
+
126
+ return data
baselines/lompo/models.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import tensorflow as tf
3
+ from tensorflow.keras import layers as tfkl
4
+ from tensorflow_probability import distributions as tfd
5
+ from tensorflow.keras.mixed_precision import experimental as prec
6
+
7
+ from . import tools
8
+
9
+
10
+ class RSSME(tools.Module):
11
+ def __init__(self, stoch=30, deter=200, hidden=200, num_models=7, act=tf.nn.elu):
12
+ super().__init__()
13
+ self._activation = act
14
+ self._stoch_size = stoch
15
+ self._deter_size = deter
16
+ self._hidden_size = hidden
17
+ self._cell = tfkl.GRUCell(self._deter_size)
18
+ self._k = num_models
19
+
20
+ def initial(self, batch_size):
21
+ dtype = prec.global_policy().compute_dtype
22
+ return dict(
23
+ mean=tf.zeros([batch_size, self._stoch_size], dtype),
24
+ std=tf.zeros([batch_size, self._stoch_size], dtype),
25
+ stoch=tf.zeros([batch_size, self._stoch_size], dtype),
26
+ deter=self._cell.get_initial_state(None, batch_size, dtype))
27
+
28
+ def observe(self, embed, action, state=None):
29
+ if state is None:
30
+ state = self.initial(tf.shape(action)[0])
31
+ embed = tf.transpose(embed, [1, 0, 2])
32
+ action = tf.transpose(action, [1, 0, 2])
33
+ post, prior = tools.static_scan(lambda prev, inputs: self.obs_step(prev[0], *inputs),
34
+ (action, embed), (state, state))
35
+ post = {k: tf.transpose(v, [1, 0, 2]) for k, v in post.items()}
36
+ prior = {k: tf.transpose(v, [1, 0, 2]) for k, v in prior.items()}
37
+ return post, prior
38
+
39
+ def imagine(self, action, state=None):
40
+ if state is None:
41
+ state = self.initial(tf.shape(action)[0])
42
+ assert isinstance(state, dict), state
43
+ action = tf.transpose(action, [1, 0, 2])
44
+ prior = tools.static_scan(self.img_step, action, state)
45
+ prior = {k: tf.transpose(v, [1, 0, 2]) for k, v in prior.items()}
46
+ return prior
47
+
48
+ def get_feat(self, state):
49
+ return tf.concat([state['stoch'], state['deter']], -1)
50
+
51
+ def get_feat_size(self, state):
52
+ return self._stoch_size + self._deter_size
53
+
54
+ def get_dist(self, state):
55
+ return tfd.MultivariateNormalDiag(state['mean'], state['std'])
56
+
57
+ def obs_step(self, prev_state, prev_action, embed):
58
+ prior = self.img_step(prev_state, prev_action)
59
+ x = tf.concat([prior['deter'], embed], -1)
60
+ x = self.get('obs1', tfkl.Dense, self._hidden_size, self._activation)(x)
61
+ x = self.get('obs2', tfkl.Dense, 2 * self._stoch_size, None)(x)
62
+ mean, std = tf.split(x, 2, -1)
63
+ std = tf.nn.softplus(std) + 0.1
64
+ stoch = self.get_dist({'mean': mean, 'std': std}).sample()
65
+ post = {'mean': mean, 'std': std, 'stoch': stoch, 'deter': prior['deter']}
66
+ return post, prior
67
+
68
+ def img_step(self, prev_state, prev_action, k=None):
69
+ if k is None:
70
+ k = np.random.choice(self._k)
71
+ x = tf.concat([prev_state['stoch'], prev_action], -1)
72
+ x = self.get('img1', tfkl.Dense, self._hidden_size, self._activation)(x)
73
+ x, deter = self._cell(x, [prev_state['deter']])
74
+ deter = deter[0] # Keras wraps the state in a list.
75
+ x = self.get('img2_{}'.format(k), tfkl.Dense, self._hidden_size, self._activation)(x)
76
+ x = self.get('img3_{}'.format(k), tfkl.Dense, 2 * self._stoch_size, None)(x)
77
+ mean, std = tf.split(x, 2, -1)
78
+ std = tf.nn.softplus(std) + 0.1
79
+ stoch = self.get_dist({'mean': mean, 'std': std}).sample()
80
+ prior = {'mean': mean, 'std': std, 'stoch': stoch, 'deter': deter}
81
+ return prior
82
+
83
+
84
+ class MultivariateNormalDiag(tools.Module):
85
+ def __init__(self, hidden_size, latent_size, scale=None):
86
+ super().__init__()
87
+ self.latent_size = latent_size
88
+ self.scale = scale
89
+ self.dense1 = tf.keras.layers.Dense(hidden_size, activation=tf.nn.leaky_relu)
90
+ self.dense2 = tf.keras.layers.Dense(hidden_size, activation=tf.nn.leaky_relu)
91
+ self.output_layer = tf.keras.layers.Dense(2 * latent_size if self.scale
92
+ is None else latent_size)
93
+
94
+ def __call__(self, *inputs):
95
+ if len(inputs) > 1:
96
+ inputs = tf.concat(inputs, axis=-1)
97
+ else:
98
+ inputs, = inputs
99
+ out = self.dense1(inputs)
100
+ out = self.dense2(out)
101
+ out = self.output_layer(out)
102
+ loc = out[..., :self.latent_size]
103
+ if self.scale is None:
104
+ assert out.shape[-1] == 2 * self.latent_size
105
+ scale_diag = tf.nn.softplus(out[..., self.latent_size:]) + 1e-5
106
+ else:
107
+ assert out.shape[-1].value == self.latent_size
108
+ scale_diag = tf.ones_like(loc) * self.scale
109
+ return loc, scale_diag
110
+
111
+
112
+ class ConstantMultivariateNormalDiag(tools.Module):
113
+ def __init__(self, latent_size, scale=None):
114
+ super().__init__()
115
+ self.latent_size = latent_size
116
+ self.scale = scale
117
+
118
+ def __call__(self, *inputs):
119
+ # first input should not have any dimensions after the batch_shape, step_type
120
+ batch_shape = tf.shape(inputs[0]) # input is only used to infer batch_shape
121
+ shape = tf.concat([batch_shape, [self.latent_size]], axis=0)
122
+ loc = tf.zeros(shape)
123
+ if self.scale is None:
124
+ scale_diag = tf.ones(shape)
125
+ else:
126
+ scale_diag = tf.ones(shape) * self.scale
127
+ return loc, scale_diag
128
+
129
+
130
+ class ConvEncoderLarge(tools.Module):
131
+ def __init__(self, depth=32, act=tf.nn.relu):
132
+ self._act = act
133
+ self._depth = depth
134
+
135
+ def __call__(self, obs):
136
+ kwargs = dict(strides=2, activation=self._act)
137
+ x = tf.reshape(obs['image'], (-1,) + tuple(obs['image'].shape[-3:]))
138
+ x = self.get('h1', tfkl.Conv2D, 1 * self._depth, 4, **kwargs)(x)
139
+ x = self.get('h2', tfkl.Conv2D, 2 * self._depth, 4, **kwargs)(x)
140
+ x = self.get('h3', tfkl.Conv2D, 4 * self._depth, 4, **kwargs)(x)
141
+ x = self.get('h4', tfkl.Conv2D, 8 * self._depth, 4, **kwargs)(x)
142
+ x = self.get('h5', tfkl.Conv2D, 8 * self._depth, 4, **kwargs)(x)
143
+ shape = tf.concat([tf.shape(obs['image'])[:-3], [32 * self._depth]], 0)
144
+ return tf.reshape(x, shape)
145
+
146
+
147
+ class ConvDecoderLarge(tools.Module):
148
+ def __init__(self, depth=32, act=tf.nn.relu, shape=(128, 128, 3)):
149
+ self._act = act
150
+ self._depth = depth
151
+ self._shape = shape
152
+
153
+ def __call__(self, features):
154
+ kwargs = dict(strides=2, activation=self._act)
155
+ x = self.get('h1', tfkl.Dense, 32 * self._depth, None)(features)
156
+ x = tf.reshape(x, [-1, 1, 1, 32 * self._depth])
157
+ x = self.get('h2', tfkl.Conv2DTranspose, 4 * self._depth, 5, **kwargs)(x)
158
+ x = self.get('h3', tfkl.Conv2DTranspose, 2 * self._depth, 5, **kwargs)(x)
159
+ x = self.get('h4', tfkl.Conv2DTranspose, 1 * self._depth, 5, **kwargs)(x)
160
+ x = self.get('h5', tfkl.Conv2DTranspose, 1 * self._depth, 6, **kwargs)(x)
161
+ x = self.get('h6', tfkl.Conv2DTranspose, self._shape[-1], 6, strides=2)(x)
162
+ mean = tf.reshape(x, tf.concat([tf.shape(features)[:-1], self._shape], 0))
163
+ return tfd.Independent(tfd.Normal(mean, 1), len(self._shape))
164
+
165
+
166
+ class ConvEncoder(tools.Module):
167
+ def __init__(self, depth=32, act=tf.nn.relu):
168
+ self._act = act
169
+ self._depth = depth
170
+
171
+ def __call__(self, obs):
172
+ kwargs = dict(strides=2, activation=self._act)
173
+ x = tf.reshape(obs['image'], (-1,) + tuple(obs['image'].shape[-3:]))
174
+ x = self.get('h1', tfkl.Conv2D, 1 * self._depth, 4, **kwargs)(x)
175
+ x = self.get('h2', tfkl.Conv2D, 2 * self._depth, 4, **kwargs)(x)
176
+ x = self.get('h3', tfkl.Conv2D, 4 * self._depth, 4, **kwargs)(x)
177
+ x = self.get('h4', tfkl.Conv2D, 8 * self._depth, 4, **kwargs)(x)
178
+ shape = tf.concat([tf.shape(obs['image'])[:-3], [32 * self._depth]], 0)
179
+ return tf.reshape(x, shape)
180
+
181
+
182
+ class ConvDecoder(tools.Module):
183
+ def __init__(self, depth=32, act=tf.nn.relu, shape=(64, 64, 3)):
184
+ self._act = act
185
+ self._depth = depth
186
+ self._shape = shape
187
+
188
+ def __call__(self, features):
189
+ kwargs = dict(strides=2, activation=self._act)
190
+ x = self.get('h1', tfkl.Dense, 32 * self._depth, None)(features)
191
+ x = tf.reshape(x, [-1, 1, 1, 32 * self._depth])
192
+ x = self.get('h2', tfkl.Conv2DTranspose, 4 * self._depth, 5, **kwargs)(x)
193
+ x = self.get('h3', tfkl.Conv2DTranspose, 2 * self._depth, 5, **kwargs)(x)
194
+ x = self.get('h4', tfkl.Conv2DTranspose, 1 * self._depth, 6, **kwargs)(x)
195
+ x = self.get('h5', tfkl.Conv2DTranspose, self._shape[-1], 6, strides=2)(x)
196
+ mean = tf.reshape(x, tf.concat([tf.shape(features)[:-1], self._shape], 0))
197
+ return tfd.Independent(tfd.Normal(mean, 1), len(self._shape))
198
+
199
+
200
+ class DenseDecoder(tools.Module):
201
+ def __init__(self, shape, layers, units, dist='normal', act=tf.nn.elu):
202
+ self._shape = shape
203
+ self._layers = layers
204
+ self._units = units
205
+ self._dist = dist
206
+ self._act = act
207
+
208
+ def __call__(self, features):
209
+ x = features
210
+ for index in range(self._layers):
211
+ x = self.get(f'h{index}', tfkl.Dense, self._units, self._act)(x)
212
+ x = self.get(f'hout', tfkl.Dense, np.prod(self._shape))(x)
213
+ x = tf.reshape(x, tf.concat([tf.shape(features)[:-1], self._shape], 0))
214
+ if self._dist == 'normal':
215
+ return tfd.Independent(tfd.Normal(x, 1), len(self._shape))
216
+ if self._dist == 'binary':
217
+ return tfd.Independent(tfd.Bernoulli(x), len(self._shape))
218
+ raise NotImplementedError(self._dist)
219
+
220
+
221
+ class DenseNetwork(tools.Module):
222
+ def __init__(self, shape, layers, units, act=tf.nn.elu):
223
+ self._shape = shape
224
+ self._layers = layers
225
+ self._units = units
226
+ self._act = act
227
+
228
+ def __call__(self, features):
229
+ x = features
230
+ for index in range(self._layers):
231
+ x = self.get(f'h{index}', tfkl.Dense, self._units, self._act)(x)
232
+ x = self.get(f'hout', tfkl.Dense, self._shape)(x)
233
+ return x
234
+
235
+
236
+ class ActorNetwork(tools.Module):
237
+ def __init__(self, shape, layers, units, act=tf.nn.elu, mean_scale=1.0):
238
+ self._shape = shape
239
+ self._layers = layers
240
+ self._units = units
241
+ self._act = act
242
+ self._mean_scale = mean_scale
243
+
244
+ def __call__(self, features):
245
+ x = features
246
+ for index in range(self._layers):
247
+ x = self.get(f'h{index}', tfkl.Dense, self._units, self._act)(x)
248
+ x = self.get(f'hout', tfkl.Dense, self._shape)(x)
249
+ x = self._mean_scale * tf.tanh(x)
250
+ return x
baselines/lompo/run.py ADDED
@@ -0,0 +1,723 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from copy import deepcopy
3
+ import functools
4
+ import os
5
+ import pathlib
6
+
7
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
8
+
9
+ import numpy as np
10
+ import random
11
+ import tensorflow as tf
12
+ from tensorflow.keras.mixed_precision import experimental as prec
13
+
14
+ tf.get_logger().setLevel('ERROR')
15
+
16
+ from tensorflow_probability import distributions as tfd
17
+
18
+ from baselines.lompo import buffer
19
+ from baselines.lompo import models
20
+ from baselines.lompo import tools
21
+ from baselines.lompo import wrappers
22
+
23
+
24
+ def define_config():
25
+ config = tools.AttrDict()
26
+ # General.
27
+ config.logdir = pathlib.Path('.logdir')
28
+ config.loaddir = pathlib.Path('.logdir')
29
+ config.datadir = pathlib.Path('.datadir/walker')
30
+ config.seed = 0
31
+ config.log_every = 1000
32
+ config.save_every = 5000
33
+ config.log_scalars = True
34
+ config.log_images = True
35
+ config.gpu_growth = True
36
+
37
+ # Environment.
38
+ config.task = 'dmc_walker_walk'
39
+ config.envs = 1
40
+ config.parallel = 'none'
41
+ config.action_repeat = 2
42
+ config.time_limit = 1000
43
+ config.im_size = 64
44
+ config.eval_noise = 0.0
45
+ config.clip_rewards = 'none'
46
+ config.precision = 32
47
+
48
+ # Model.
49
+ config.deter_size = 256
50
+ config.stoch_size = 64
51
+ config.num_models = 7
52
+ config.num_units = 256
53
+ config.proprio = False
54
+ config.penalty_type = 'log_prob'
55
+ config.dense_act = 'elu'
56
+ config.cnn_act = 'relu'
57
+ config.cnn_depth = 32
58
+ config.pcont = False
59
+ config.kl_scale = 1.0
60
+ config.pcont_scale = 10.0
61
+ config.weight_decay = 0.0
62
+ config.weight_decay_pattern = r'.*'
63
+
64
+ # Training.
65
+ config.load_model = False
66
+ config.load_agent = False
67
+ config.load_buffer = False
68
+ config.train_steps = 100000
69
+ config.model_train_steps = 25000
70
+ config.model_batch_size = 64
71
+ config.model_batch_length = 50
72
+ config.agent_batch_size = 256
73
+ config.cql_samples = 16
74
+ config.start_training = 50000
75
+ config.agent_train_steps = 150000
76
+ config.agent_iters_per_step = 200
77
+ config.buffer_size = 2e6
78
+ config.model_lr = 6e-4
79
+ config.q_lr = 3e-4
80
+ config.actor_lr = 3e-4
81
+ config.grad_clip = 100.0
82
+ config.tau = 5e-3
83
+ config.target_update_interval = 1
84
+ config.dataset_balance = False
85
+
86
+ # Behavior.
87
+ config.lmbd = 5.0
88
+ config.alpha = 0.0
89
+ config.sample = True
90
+ config.discount = 0.99
91
+ config.disclam = 0.95
92
+ config.horizon = 5
93
+ config.done_treshold = 0.5
94
+ config.action_dist = 'tanh_normal'
95
+ config.action_init_std = 5.0
96
+ config.expl = 'additive_gaussian'
97
+ config.expl_amount = 0.2
98
+ config.expl_decay = 0.0
99
+ config.expl_min = 0.0
100
+ return config
101
+
102
+
103
+ class Lompo(tools.Module):
104
+
105
+ def __init__(self, config, datadir, actspace, writer):
106
+ self._c = config
107
+ self._actspace = actspace
108
+ self._actdim = actspace.n if hasattr(actspace, 'n') else actspace.shape[0]
109
+ episodes, steps = tools.count_episodes(datadir)
110
+ self.latent_buffer = buffer.LatentReplayBuffer(steps,
111
+ steps,
112
+ self._c.deter_size + self._c.stoch_size,
113
+ self._actdim)
114
+ self.lmbd = config.lmbd
115
+ self.alpha = config.alpha
116
+
117
+ self._writer = writer
118
+ tf.summary.experimental.set_step(0)
119
+ self._metrics = dict()
120
+
121
+ self._agent_step = 0
122
+ self._model_step = 0
123
+
124
+ self._random = np.random.RandomState(config.seed)
125
+ self._float = prec.global_policy().compute_dtype
126
+ self._dataset = iter(load_dataset(datadir, self._c))
127
+ self._episode_iterator = episode_iterator(datadir, self._c)
128
+
129
+ self._build_model()
130
+ for _ in range(10):
131
+ self._model_train_step(next(self._dataset), prefix='eval')
132
+
133
+ def __call__(self, obs, reset, state=None, training=True):
134
+ if state is not None and reset.any():
135
+ mask = tf.cast(1 - reset, self._float)[:, None]
136
+ state = tf.nest.map_structure(lambda x: x * mask, state)
137
+ action, state = self.policy(obs, state, training)
138
+ return action, state
139
+
140
+ def load(self, filename):
141
+ try:
142
+ self.load_model(filename)
143
+ except:
144
+ pass
145
+
146
+ try:
147
+ self.load_agent(filename)
148
+ except:
149
+ pass
150
+
151
+ def save(self, filename):
152
+ self.save_model(filename)
153
+ self.save_agentl(filename)
154
+
155
+ def load_model(self, filename):
156
+ self._encode.load(filename / 'encode.pkl')
157
+ self._dynamics.load(filename / 'dynamic.pkl')
158
+ self._decode.load(filename / 'decode.pkl')
159
+ self._reward.load(filename / 'reward.pkl')
160
+ if self._c.pcont:
161
+ self._pcont.load(filename / 'pcont.pkl')
162
+ if self._c.proprio:
163
+ self._proprio.load(filename / 'proprio.pkl')
164
+
165
+ def save_model(self, filename):
166
+ filename.mkdir(parents=True, exist_ok=True)
167
+ self._encode.save(filename / 'encode.pkl')
168
+ self._dynamics.save(filename / 'dynamic.pkl')
169
+ self._decode.save(filename / 'decode.pkl')
170
+ self._reward.save(filename / 'reward.pkl')
171
+ if self._c.pcont:
172
+ self._pcont.save(filename / 'pcont.pkl')
173
+ if self._c.proprio:
174
+ self._proprio.save(filename / 'proprio.pkl')
175
+
176
+ def load_agent(self, filename):
177
+ self._qf1.load(filename / 'qf1.pkl')
178
+ self._qf2.load(filename / 'qf2.pkl')
179
+ self._target_qf1.load(filename / 'target_qf1.pkl')
180
+ self._target_qf2.load(filename / 'target_qf2.pkl')
181
+ self._actor.load(filename / 'actor.pkl')
182
+
183
+ def save_agent(self, filename):
184
+ filename.mkdir(parents=True, exist_ok=True)
185
+ self._qf1.save(filename / 'qf1.pkl')
186
+ self._qf2.save(filename / 'qf2.pkl')
187
+ self._target_qf1.save(filename / 'target_qf1.pkl')
188
+ self._target_qf2.save(filename / 'target_qf2.pkl')
189
+ self._actor.save(filename / 'actor.pkl')
190
+
191
+ def policy(self, obs, state, training):
192
+ if state is None:
193
+ latent = self._dynamics.initial(len(obs['image']))
194
+ action = tf.zeros((len(obs['image']), self._actdim), self._float)
195
+ else:
196
+ latent, action = state
197
+
198
+ embed = self._encode(preprocess_raw(obs, self._c))
199
+ latent, _ = self._dynamics.obs_step(latent, action, embed)
200
+ feat = self._dynamics.get_feat(latent)
201
+ action = self._exploration(self._actor(feat), training)
202
+ state = (latent, action)
203
+ return action, state
204
+
205
+ def _build_model(self):
206
+ acts = dict(elu=tf.nn.elu, relu=tf.nn.relu,
207
+ swish=tf.nn.swish, leaky_relu=tf.nn.leaky_relu)
208
+ cnn_act = acts[self._c.cnn_act]
209
+ act = acts[self._c.dense_act]
210
+
211
+ # Create encoder based on environment observations
212
+ if self._c.proprio:
213
+ if self._c.im_size == 64:
214
+ self._encode = models.ConvEncoderProprio(self._c.cnn_depth, cnn_act)
215
+ else:
216
+ self._encode = models.ConvEncoderProprioLarge(self._c.cnn_depth, cnn_act)
217
+ else:
218
+ if self._c.im_size == 64:
219
+ self._encode = models.ConvEncoder(self._c.cnn_depth, cnn_act)
220
+ else:
221
+ self._encode = models.ConvEncoderLarge(self._c.cnn_depth, cnn_act)
222
+ # RSSM model with ensembles
223
+ self._dynamics = models.RSSME(self._c.stoch_size, self._c.deter_size,
224
+ self._c.deter_size, num_models=self._c.num_models)
225
+ # Create decoder based on image size
226
+ if self._c.im_size == 64:
227
+ self._decode = models.ConvDecoder(self._c.cnn_depth, cnn_act,
228
+ shape=(self._c.im_size, self._c.im_size, 3))
229
+ else:
230
+ self._decode = models.ConvDecoderLarge(self._c.cnn_depth, cnn_act,
231
+ shape=(self._c.im_size, self._c.im_size, 3))
232
+ if self._c.proprio:
233
+ self._proprio = models.DenseDecoder((self._propriodim,), 3, self._c.num_units, act=act)
234
+ if self._c.pcont:
235
+ self._pcont = models.DenseDecoder((), 3, self._c.num_units, 'binary', act=act)
236
+ self._reward = models.DenseDecoder((), 2, self._c.num_units, act=act)
237
+
238
+ model_modules = [self._encode, self._dynamics, self._decode, self._reward]
239
+ if self._c.proprio:
240
+ model_modules.append(self._proprio)
241
+ if self._c.pcont:
242
+ model_modules.append(self._pcont)
243
+
244
+ # Build actor-critic networks
245
+ self._qf1 = models.DenseNetwork(1, 3, self._c.num_units, act=act)
246
+ self._qf2 = models.DenseNetwork(1, 3, self._c.num_units, act=act)
247
+ self._target_qf1 = deepcopy(self._qf2)
248
+ self._target_qf2 = deepcopy(self._qf1)
249
+ self._qf_criterion = tf.keras.losses.Huber()
250
+ self._actor = models.ActorNetwork(self._actdim, 4, self._c.num_units, act=act)
251
+
252
+ # Initialize optimizers
253
+ Optimizer = functools.partial(tools.Adam,
254
+ wd=self._c.weight_decay,
255
+ clip=self._c.grad_clip,
256
+ wdpattern=self._c.weight_decay_pattern)
257
+
258
+ self._model_opt = Optimizer('model', model_modules, self._c.model_lr)
259
+ self._qf_opt = Optimizer('qf', [self._qf1, self._qf2], self._c.q_lr)
260
+ self._actor_opt = Optimizer('actor', [self._actor], self._c.actor_lr)
261
+
262
+ def _exploration(self, action, training):
263
+ if training:
264
+ amount = self._c.expl_amount
265
+ if self._c.expl_decay:
266
+ amount *= 0.5 ** (tf.cast(self._agent_step, tf.float32) / self._c.expl_decay)
267
+ if self._c.expl_min:
268
+ amount = tf.maximum(self._c.expl_min, amount)
269
+ self._metrics['expl_amount'] = amount
270
+ elif self._c.eval_noise:
271
+ amount = self._c.eval_noise
272
+ else:
273
+ return action
274
+ if self._c.expl == 'additive_gaussian':
275
+ return tf.clip_by_value(tfd.Normal(action, amount).sample(), -1, 1)
276
+ if self._c.expl == 'completely_random':
277
+ return tf.random.uniform(action.shape, -1, 1)
278
+ raise NotImplementedError(self._c.expl)
279
+
280
+ def fit_model(self, iters):
281
+ for iter in range(iters):
282
+ data = next(self._dataset)
283
+ self._model_train_step(data)
284
+ if iter % self._c.save_every == 0:
285
+ self.save_model(self._c.logdir / 'model_step_{}'.format(iter))
286
+ self.save_model(self._c.logdir / 'final_model')
287
+
288
+ def _model_train_step(self, data, prefix='train'):
289
+ with tf.GradientTape() as model_tape:
290
+ embed = self._encode(data)
291
+ post, prior = self._dynamics.observe(embed, data['action'])
292
+ feat = self._dynamics.get_feat(post)
293
+ image_pred = self._decode(feat)
294
+ reward_pred = self._reward(feat)
295
+ likes = tools.AttrDict()
296
+ likes.image = tf.reduce_mean(tf.boolean_mask(image_pred.log_prob(data['image']),
297
+ data['mask']))
298
+ likes.reward = tf.reduce_mean(tf.boolean_mask(reward_pred.log_prob(data['reward']),
299
+ data['mask']))
300
+ if self._c.pcont:
301
+ pcont_pred = self._pcont(feat)
302
+ pcont_target = data['terminal']
303
+ likes.pcont = tf.reduce_mean(tf.boolean_mask(pcont_pred.log_prob(pcont_target),
304
+ data['mask']))
305
+ likes.pcont *= self._c.pcont_scale
306
+
307
+ for key in prior.keys():
308
+ prior[key] = tf.boolean_mask(prior[key], data['mask'])
309
+ post[key] = tf.boolean_mask(post[key], data['mask'])
310
+
311
+ prior_dist = self._dynamics.get_dist(prior)
312
+ post_dist = self._dynamics.get_dist(post)
313
+ div = tf.reduce_mean(tfd.kl_divergence(post_dist, prior_dist))
314
+ model_loss = self._c.kl_scale * div - sum(likes.values())
315
+
316
+ if prefix == 'train':
317
+ model_norm = self._model_opt(model_tape, model_loss)
318
+ self._model_step += 1
319
+
320
+ if self._model_step % self._c.log_every == 0:
321
+ self._image_summaries(data, embed, image_pred, self._model_step, prefix)
322
+ model_summaries = dict()
323
+ model_summaries['model_train/KL Divergence'] = tf.reduce_mean(div)
324
+ model_summaries['model_train/image_recon'] = tf.reduce_mean(likes.image)
325
+ model_summaries['model_train/reward_recon'] = tf.reduce_mean(likes.reward)
326
+ model_summaries['model_train/model_loss'] = tf.reduce_mean(model_loss)
327
+ if prefix == 'train':
328
+ model_summaries['model_train/model_norm'] = tf.reduce_mean(model_norm)
329
+ if self._c.pcont:
330
+ model_summaries['model_train/terminal_recon'] = tf.reduce_mean(likes.pcont)
331
+ self._write_summaries(model_summaries, self._model_step)
332
+
333
+ def train_agent(self, iters):
334
+ for iter in range(iters):
335
+ data = preprocess_latent(self.latent_buffer.sample(self._c.agent_batch_size))
336
+ self._agent_train_step(data)
337
+ if self._agent_step % self._c.target_update_interval == 0:
338
+ self._update_target_critics()
339
+ if iter % self._c.save_every == 0:
340
+ self.save_agent(self._c.logdir)
341
+ self.save_agent(self._c.logdir / 'final_agent')
342
+
343
+ def _agent_train_step(self, data):
344
+ obs = data['obs']
345
+ actions = data['actions']
346
+ next_obs = data['next_obs']
347
+ rewards = data['rewards']
348
+ terminals = data['terminals']
349
+
350
+ with tf.GradientTape() as q_tape:
351
+ q1_pred = self._qf1(tf.concat([obs, actions], axis=-1))
352
+ q2_pred = self._qf2(tf.concat([obs, actions], axis=-1))
353
+ # new_next_actions = self._exploration(self._actor(next_obs), True)
354
+ new_actions = self._actor(obs)
355
+ new_next_actions = self._actor(next_obs)
356
+
357
+ target_q_values = tf.reduce_min([self._target_qf1(tf.concat([next_obs, new_next_actions], axis=-1)),
358
+ self._target_qf2(tf.concat([next_obs, new_next_actions], axis=-1))],
359
+ axis=0)
360
+ q_target = rewards + self._c.discount * (1.0 - terminals) * target_q_values
361
+
362
+ expanded_actions = tf.expand_dims(actions, 0)
363
+ tilled_actions = tf.tile(expanded_actions, [self._c.cql_samples, 1, 1])
364
+ tilled_actions = tf.random.uniform(tilled_actions.shape, minval=-1, maxval=1)
365
+ tilled_actions = tf.concat([tilled_actions, tf.expand_dims(new_actions, 0)], axis=0)
366
+
367
+ expanded_obs = tf.expand_dims(obs, 0)
368
+ tilled_obs = tf.tile(expanded_obs, [self._c.cql_samples + 1, 1, 1])
369
+
370
+ q1_values = self._qf1(tf.concat([tilled_obs, tilled_actions], axis=-1))
371
+ q2_values = self._qf2(tf.concat([tilled_obs, tilled_actions], axis=-1))
372
+ q1_penalty = tf.math.reduce_logsumexp(q1_values, axis=0)
373
+ q2_penalty = tf.math.reduce_logsumexp(q2_values, axis=0)
374
+
375
+ qf1_loss = self.alpha * (tf.reduce_mean(q1_penalty) - tf.reduce_mean(q1_pred[:self._c.agent_batch_size])) + \
376
+ tf.reduce_mean((q1_pred - tf.stop_gradient(q_target)) ** 2)
377
+ qf2_loss = self.alpha * (tf.reduce_mean(q2_penalty) - tf.reduce_mean(q2_pred[:self._c.agent_batch_size])) + \
378
+ tf.reduce_mean((q2_pred - tf.stop_gradient(q_target)) ** 2)
379
+
380
+ q_loss = qf1_loss + qf2_loss
381
+
382
+ with tf.GradientTape() as actor_tape:
383
+ new_obs_actions = self._actor(obs)
384
+ q_new_actions = tf.reduce_min([self._qf1(tf.concat([obs, new_obs_actions], axis=-1)),
385
+ self._qf2(tf.concat([obs, new_obs_actions], axis=-1))], axis=0)
386
+ actor_loss = -tf.reduce_mean(q_new_actions)
387
+
388
+ q_norm = self._qf_opt(q_tape, q_loss)
389
+ actor_norm = self._actor_opt(actor_tape, actor_loss)
390
+ self._agent_step += 1
391
+
392
+ if self._agent_step % self._c.log_every == 0:
393
+ agent_summaries = dict()
394
+ agent_summaries['agent/Q1_value'] = tf.reduce_mean(q1_pred)
395
+ agent_summaries['agent/Q2_value'] = tf.reduce_mean(q2_pred)
396
+ agent_summaries['agent/Q_target'] = tf.reduce_mean(q_target)
397
+ agent_summaries['agent/Q_loss'] = q_loss
398
+ agent_summaries['agent/actor_loss'] = actor_loss
399
+ agent_summaries['agent/Q_grad_norm'] = q_norm
400
+ agent_summaries['agent/actor_grad_norm'] = actor_norm
401
+ self._write_summaries(agent_summaries, self._agent_step)
402
+
403
+ def _update_target_critics(self):
404
+ tau = tf.constant(self._c.tau)
405
+ for source_weight, target_weight in zip(self._qf1.trainable_variables,
406
+ self._target_qf1.trainable_variables):
407
+ target_weight.assign(tau * source_weight + (1.0 - tau) * target_weight)
408
+ for source_weight, target_weight in zip(self._qf2.trainable_variables,
409
+ self._target_qf2.trainable_variables):
410
+ target_weight.assign(tau * source_weight + (1.0 - tau) * target_weight)
411
+
412
+ def _generate_latent_data(self, data):
413
+ embed = self._encode(data)
414
+ post, prior = self._dynamics.observe(embed, data['action'])
415
+ if self._c.pcont: # Last step could be terminal.
416
+ post = {k: v[:, :-1] for k, v in post.items()}
417
+ for key in post.keys():
418
+ post[key] = tf.boolean_mask(post[key], data['mask'])
419
+ start = post
420
+
421
+ policy = lambda state: tf.stop_gradient(
422
+ self._exploration(self._actor(self._dynamics.get_feat(state)), True))
423
+
424
+ obs = [[] for _ in tf.nest.flatten(start)]
425
+ next_obs = [[] for _ in tf.nest.flatten(start)]
426
+ actions = []
427
+ full_posts = [[[] for _ in tf.nest.flatten(start)] for _ in range(self._c.num_models)]
428
+ prev = start
429
+
430
+ for index in range(self._c.horizon):
431
+ [o.append(l) for o, l in zip(obs, tf.nest.flatten(prev))]
432
+ a = policy(prev)
433
+ actions.append(a)
434
+ for i in range(self._c.num_models):
435
+ p = self._dynamics.img_step(prev, a, k=i)
436
+ [o.append(l) for o, l in zip(full_posts[i], tf.nest.flatten(p))]
437
+ prev = self._dynamics.img_step(prev, a, k=np.random.choice(self._c.num_models, 1)[0])
438
+ [o.append(l) for o, l in zip(next_obs, tf.nest.flatten(prev))]
439
+
440
+ obs = self._dynamics.get_feat(tf.nest.pack_sequence_as(start, [tf.stack(x, 0) for x in obs]))
441
+ stoch = tf.nest.pack_sequence_as(start, [tf.stack(x, 0) for x in next_obs])['stoch']
442
+ next_obs = self._dynamics.get_feat(tf.nest.pack_sequence_as(start, [tf.stack(x, 0) for x in next_obs]))
443
+ actions = tf.stack(actions, 0)
444
+ rewards = self._reward(next_obs).mode()
445
+ if self._c.pcont:
446
+ dones = 1.0 * (self._pcont(next_obs).mean().numpy() > self._c.done_treshold)
447
+ else:
448
+ dones = tf.zeros_like(rewards)
449
+
450
+ dists = [self._dynamics.get_dist(
451
+ tf.nest.pack_sequence_as(start, [tf.stack(x, 0) for x in full_posts[i]]))
452
+ for i in range(self._c.num_models)]
453
+
454
+ # Compute penalty based on specification
455
+ if self._c.penalty_type == 'log_prob':
456
+ log_prob_vars = tf.math.reduce_std(
457
+ tf.stack([d.log_prob(stoch) for d in dists], 0),
458
+ axis=0)
459
+ modified_rewards = rewards - self.lmbd * log_prob_vars
460
+ elif self._c.penalty_type == 'max_var':
461
+ max_std = tf.reduce_max(
462
+ tf.stack([tf.norm(d.stddev(), 2, -1) for d in dists], 0),
463
+ axis=0)
464
+ modified_rewards = rewards - self.lmbd * max_std
465
+ elif self._c.penalty_type == 'mean':
466
+ mean_prediction = tf.reduce_mean(tf.stack([d.mean() for d in dists], 0), axis=0)
467
+ mean_disagreement = tf.reduce_mean(
468
+ tf.stack([tf.norm(d.mean() - mean_prediction, 2, -1) for d in dists], 0),
469
+ axis=0)
470
+ modified_rewards = rewards - self.lmbd * mean_disagreement
471
+ else:
472
+ modified_rewards = rewards
473
+
474
+ self.latent_buffer.add_samples(flatten(obs).numpy(),
475
+ flatten(actions).numpy(),
476
+ flatten(next_obs).numpy(),
477
+ flatten(modified_rewards).numpy(),
478
+ flatten(dones),
479
+ sample_type='latent')
480
+
481
+ obs = [[] for _ in tf.nest.flatten(start)]
482
+ next_obs = [[] for _ in tf.nest.flatten(start)]
483
+ actions = []
484
+ full_posts = [[[] for _ in tf.nest.flatten(start)] for _ in range(self._c.num_models)]
485
+
486
+ for key in prev.keys():
487
+ prev[key] = tf.boolean_mask(prev[key], flatten(1.0 - dones))
488
+
489
+ def _add_data(self, num_episodes=1):
490
+ self._process_data_to_latent(num_episodes=num_episodes)
491
+ self._generate_latent_data(next(self._dataset))
492
+
493
+ def _process_data_to_latent(self, num_episodes=None):
494
+ if num_episodes is None:
495
+ num_episodes, _ = tools.count_episodes(self._c.datadir)
496
+
497
+ for _ in range(num_episodes):
498
+ filename = next(self._episode_iterator)
499
+ try:
500
+ with filename.open('rb') as f:
501
+ episode = np.load(f)
502
+ episode = {k: episode[k] for k in episode.keys()}
503
+ except Exception as e:
504
+ print(f'Could not load episode: {e}')
505
+ continue
506
+
507
+ obs = preprocess_raw(episode, self._c)
508
+ if not self._c.pcont:
509
+ obs['terminal'] = tf.zeros_like(obs['reward'])
510
+ with tf.GradientTape(watch_accessed_variables=False) as _:
511
+ embed = self._encode(obs)
512
+ post, prior = self._dynamics.observe(tf.expand_dims(embed, 0),
513
+ tf.expand_dims(obs['action'], 0))
514
+ feat = flatten(self._dynamics.get_feat(post))
515
+ self.latent_buffer.add_samples(feat.numpy()[:-1],
516
+ obs['action'].numpy()[1:],
517
+ feat.numpy()[1:],
518
+ obs['reward'].numpy()[1:],
519
+ obs['terminal'].numpy()[1:],
520
+ sample_type='real')
521
+
522
+ def _image_summaries(self, data, embed, image_pred, step=None, prefix='train'):
523
+ truth = data['image'][:6] + 0.5
524
+ recon = image_pred.mode()[:6]
525
+ init, _ = self._dynamics.observe(embed[:6, :5], data['action'][:6, :5])
526
+ init = {k: v[:, -1] for k, v in init.items()}
527
+ prior = self._dynamics.imagine(data['action'][:6, 5:], init)
528
+ openl = self._decode(self._dynamics.get_feat(prior)).mode()
529
+ model = tf.concat([recon[:, :5] + 0.5, openl + 0.5], 1)
530
+ error_prior = (model - truth + 1) / 2
531
+ error_posterior = (recon + 0.5 - truth + 1) / 2
532
+ openl = tf.concat([truth, recon + 0.5, model, error_prior, error_posterior], 2)
533
+ with self._writer.as_default():
534
+ tools.video_summary('agent/' + prefix, openl.numpy(), step=step)
535
+
536
+ def _write_summaries(self, metrics, step=None):
537
+ step = int(step)
538
+ metrics = [(k, float(v)) for k, v in metrics.items()]
539
+ with self._writer.as_default():
540
+ tf.summary.experimental.set_step(step)
541
+ [tf.summary.scalar(k, m, step=step) for k, m in metrics]
542
+ print(f'[{step}]', ' / '.join(f'{k} {v:.1f}' for k, v in metrics))
543
+ self._writer.flush()
544
+
545
+
546
+ def preprocess_raw(obs, config):
547
+ dtype = prec.global_policy().compute_dtype
548
+ obs = obs.copy()
549
+
550
+ with tf.device('cpu:0'):
551
+ obs['image'] = tf.cast(obs['image'], dtype) / 255.0 - 0.5
552
+ if 'image_128' in obs.keys():
553
+ obs['image_128'] = tf.cast(obs['image_128'], dtype) / 255.0 - 0.5
554
+ clip_rewards = dict(none=lambda x: x, tanh=tf.tanh)[config.clip_rewards]
555
+ obs['reward'] = clip_rewards(obs['reward'])
556
+ for k in obs.keys():
557
+ obs[k] = tf.cast(obs[k], dtype)
558
+ return obs
559
+
560
+
561
+ def flatten(x):
562
+ return tf.reshape(x, [-1] + list(x.shape[2:]))
563
+
564
+
565
+ def preprocess_latent(batch):
566
+ dtype = prec.global_policy().compute_dtype
567
+ batch = batch.copy()
568
+ with tf.device('cpu:0'):
569
+ for key in batch.keys():
570
+ batch[key] = tf.cast(batch[key], dtype)
571
+ return batch
572
+
573
+
574
+ def count_steps(datadir, config):
575
+ return tools.count_episodes(datadir)[1] * config.action_repeat
576
+
577
+
578
+ def load_dataset(directory, config):
579
+ episode = next(tools.load_episodes(directory, 1000, load_episodes=1))
580
+ types = {k: v.dtype for k, v in episode.items()}
581
+ shapes = {k: (None,) + v.shape[1:] for k, v in episode.items()}
582
+ generator = lambda: tools.load_episodes(directory, config.train_steps,
583
+ config.model_batch_length, config.dataset_balance)
584
+ dataset = tf.data.Dataset.from_generator(generator, types, shapes)
585
+ dataset = dataset.batch(config.model_batch_size, drop_remainder=True)
586
+ dataset = dataset.map(functools.partial(preprocess_raw, config=config))
587
+ dataset = dataset.prefetch(10)
588
+ return dataset
589
+
590
+
591
+ def episode_iterator(datadir, config):
592
+ while True:
593
+ filenames = list(datadir.glob('*.npz'))
594
+ for filename in list(filenames):
595
+ yield filename
596
+
597
+
598
+ def summarize_episode(episode, config, datadir, writer, prefix):
599
+ length = (len(episode['reward']) - 1) * config.action_repeat
600
+ ret = episode['reward'].sum()
601
+ print(f'{prefix.title()} episode of length {length} with return {ret:.1f}.')
602
+ metrics = [
603
+ (f'{prefix}/return', float(episode['reward'].sum())),
604
+ (f'{prefix}/length', len(episode['reward']) - 1)]
605
+ with writer.as_default(): # Env might run in a different thread.
606
+ [tf.summary.scalar('sim/' + k, v) for k, v in metrics]
607
+ if prefix == 'test':
608
+ tools.video_summary(f'sim/{prefix}/video', episode['image'][None])
609
+
610
+
611
+ def make_env(config, writer, prefix, datadir, store):
612
+ suite, task = config.task.split('_', 1)
613
+ if suite == 'dmc':
614
+ env = wrappers.DeepMindControl(task)
615
+ env = wrappers.ActionRepeat(env, config.action_repeat)
616
+ env = wrappers.NormalizeActions(env)
617
+ elif suite == 'gym':
618
+ env = wrappers.Gym(task, config, size=(128, 128))
619
+ env = wrappers.ActionRepeat(env, config.action_repeat)
620
+ env = wrappers.NormalizeActions(env)
621
+ elif task == 'door':
622
+ env = wrappers.DoorOpen(config, size=(128, 128))
623
+ env = wrappers.ActionRepeat(env, config.action_repeat)
624
+ env = wrappers.NormalizeActions(env)
625
+ elif task == 'drawer':
626
+ env = wrappers.DrawerOpen(config, size=(128, 128))
627
+ env = wrappers.ActionRepeat(env, config.action_repeat)
628
+ env = wrappers.NormalizeActions(env)
629
+ else:
630
+ raise NotImplementedError(suite)
631
+ env = wrappers.TimeLimit(env, config.time_limit / config.action_repeat)
632
+ callbacks = []
633
+ if store:
634
+ callbacks.append(lambda ep: tools.save_episodes(datadir, [ep]))
635
+ if prefix == 'test':
636
+ callbacks.append(
637
+ lambda ep: summarize_episode(ep, config, datadir, writer, prefix))
638
+ env = wrappers.Collect(env, callbacks, config.precision)
639
+ env = wrappers.RewardObs(env)
640
+ return env
641
+
642
+
643
+ def main(config):
644
+ print(config)
645
+
646
+ # Set random seeds
647
+ os.environ['PYTHONHASHSEED'] = str(config.seed)
648
+ os.environ['TF_CUDNN_DETERMINISTIC'] = '1'
649
+ random.seed(config.seed)
650
+ np.random.seed(config.seed)
651
+ tf.random.set_seed(config.seed)
652
+
653
+ if config.gpu_growth:
654
+ for gpu in tf.config.experimental.list_physical_devices('GPU'):
655
+ tf.config.experimental.set_memory_growth(gpu, True)
656
+
657
+ config.logdir = config.logdir / config.task
658
+ config.logdir = config.logdir / 'seed_{}'.format(config.seed)
659
+ config.logdir.mkdir(parents=True, exist_ok=True)
660
+ tf_dir = config.logdir / 'tensorboard'
661
+ writer = tf.summary.create_file_writer(str(tf_dir), max_queue=1000, flush_millis=20000)
662
+ writer.set_as_default()
663
+
664
+ # Create environments.
665
+ train_envs = [wrappers.Async(lambda: make_env(
666
+ config, writer, 'train', '.', store=False), config.parallel)
667
+ for _ in range(config.envs)]
668
+ test_envs = [wrappers.Async(lambda: make_env(
669
+ config, writer, 'test', '.', store=False), config.parallel)
670
+ for _ in range(config.envs)]
671
+ actspace = train_envs[0].action_space
672
+
673
+ # Train and regularly evaluate the agent.
674
+ agent = Lompo(config, config.datadir, actspace, writer)
675
+
676
+ if agent._c.load_model:
677
+ agent.load_model(config.loaddir / 'final_model')
678
+ print('Load pretrained model')
679
+ else:
680
+ agent.fit_model(agent._c.model_train_steps)
681
+ agent.save_model(config.logdir)
682
+
683
+ if agent._c.load_buffer:
684
+ agent.latent_buffer.load(agent._c.loaddir / 'buffer.h5py')
685
+ else:
686
+ agent._process_data_to_latent()
687
+ agent.latent_buffer.save(agent._c.logdir / 'buffer.h5py')
688
+
689
+ if agent._c.load_agent:
690
+ agent.load_agent(config.loaddir)
691
+ print('Load pretrained actor')
692
+
693
+ while agent.latent_buffer._latent_stored_steps < agent._c.start_training:
694
+ agent._generate_latent_data(next(agent._dataset))
695
+
696
+ while agent._agent_step < int(config.agent_train_steps):
697
+ print('Start evaluation.')
698
+ tools.simulate(
699
+ functools.partial(agent, training=False), test_envs, episodes=1)
700
+ writer.flush()
701
+ print('Start collection.')
702
+ agent.train_agent(agent._c.agent_iters_per_step)
703
+
704
+ if config.sample:
705
+ agent._add_data(num_episodes=1)
706
+ else:
707
+ agent._process_data_to_latent(num_episodes=1)
708
+
709
+ for env in train_envs + test_envs:
710
+ env.close()
711
+
712
+
713
+ if __name__ == '__main__':
714
+ try:
715
+ import colored_traceback
716
+
717
+ colored_traceback.add_hook()
718
+ except ImportError:
719
+ pass
720
+ parser = argparse.ArgumentParser()
721
+ for key, value in define_config().items():
722
+ parser.add_argument(f'--{key}', type=tools.args_type(value), default=value)
723
+ main(parser.parse_args())
baselines/lompo/tools.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import datetime
3
+ import io
4
+ import pathlib
5
+ import pickle
6
+ import uuid
7
+
8
+ import numpy as np
9
+ import tensorflow as tf
10
+ import tensorflow.compat.v1 as tf1
11
+ from tensorflow_probability import distributions as tfd
12
+
13
+
14
+ class AttrDict(dict):
15
+ __setattr__ = dict.__setitem__
16
+ __getattr__ = dict.__getitem__
17
+
18
+
19
+ class Module(tf.Module):
20
+ def save(self, filename):
21
+ values = tf.nest.map_structure(lambda x: x.numpy(), self.variables)
22
+ with pathlib.Path(filename).open('wb') as f:
23
+ pickle.dump(values, f)
24
+
25
+ def load(self, filename):
26
+ with pathlib.Path(filename).open('rb') as f:
27
+ values = pickle.load(f)
28
+ tf.nest.map_structure(lambda x, y: x.assign(y), self.variables, values)
29
+
30
+ def get(self, name, ctor, *args, **kwargs):
31
+ # Create or get layer by name to avoid mentioning it in the constructor.
32
+ if not hasattr(self, '_modules'):
33
+ self._modules = {}
34
+ if name not in self._modules:
35
+ self._modules[name] = ctor(*args, **kwargs)
36
+ return self._modules[name]
37
+
38
+
39
+ def video_summary(name, video, step=None, fps=20):
40
+ name = name if isinstance(name, str) else name.decode('utf-8')
41
+ if np.issubdtype(video.dtype, np.floating):
42
+ video = np.clip(255 * video, 0, 255).astype(np.uint8)
43
+ B, T, H, W, C = video.shape
44
+ try:
45
+ frames = video.transpose((1, 2, 0, 3, 4)).reshape((T, H, B * W, C))
46
+ summary = tf1.Summary()
47
+ image = tf1.Summary.Image(height=B * H, width=T * W, colorspace=C)
48
+ image.encoded_image_string = encode_gif(frames, fps)
49
+ summary.value.add(tag=name + '/gif', image=image)
50
+ tf.summary.experimental.write_raw_pb(summary.SerializeToString(), step)
51
+ except (IOError, OSError) as e:
52
+ print('GIF summaries require ffmpeg in $PATH.', e)
53
+ frames = video.transpose((0, 2, 1, 3, 4)).reshape((1, B * H, T * W, C))
54
+ tf.summary.image(name + '/grid', frames, step)
55
+
56
+
57
+ def encode_gif(frames, fps):
58
+ from subprocess import Popen, PIPE
59
+ h, w, c = frames[0].shape
60
+ pxfmt = {1: 'gray', 3: 'rgb24'}[c]
61
+ cmd = ' '.join([
62
+ f'ffmpeg -y -f rawvideo -vcodec rawvideo',
63
+ f'-r {fps:.02f} -s {w}x{h} -pix_fmt {pxfmt} -i - -filter_complex',
64
+ f'[0:v]split[x][z];[z]palettegen[y];[x]fifo[x];[x][y]paletteuse',
65
+ f'-r {fps:.02f} -f gif -'])
66
+ proc = Popen(cmd.split(' '), stdin=PIPE, stdout=PIPE, stderr=PIPE)
67
+ for image in frames:
68
+ proc.stdin.write(image.tostring())
69
+ out, err = proc.communicate()
70
+ if proc.returncode:
71
+ raise IOError('\n'.join([' '.join(cmd), err.decode('utf8')]))
72
+ del proc
73
+ return out
74
+
75
+
76
+ def simulate(agent, envs, steps=0, episodes=0, state=None):
77
+ # Initialize or unpack simulation state.
78
+ if state is None:
79
+ step, episode = 0, 0
80
+ done = np.ones(len(envs), np.bool)
81
+ length = np.zeros(len(envs), np.int32)
82
+ obs = [None] * len(envs)
83
+ agent_state = None
84
+ else:
85
+ step, episode, done, length, obs, agent_state = state
86
+ while (steps and step < steps) or (episodes and episode < episodes):
87
+ # Reset envs if necessary.
88
+ if done.any():
89
+ indices = [index for index, d in enumerate(done) if d]
90
+ promises = [envs[i].reset(blocking=False) for i in indices]
91
+ for index, promise in zip(indices, promises):
92
+ obs[index] = promise()
93
+ # Step agents.
94
+ obs = {k: np.stack([o[k] for o in obs]) for k in obs[0]}
95
+ action, agent_state = agent(obs, done, agent_state)
96
+ action = np.array(action)
97
+ assert len(action) == len(envs)
98
+ # Step envs.
99
+ promises = [e.step(a, blocking=False) for e, a in zip(envs, action)]
100
+ obs, _, done = zip(*[p()[:3] for p in promises])
101
+ obs = list(obs)
102
+ done = np.stack(done)
103
+ episode += int(done.sum())
104
+ length += 1
105
+ step += (done * length).sum()
106
+ length *= (1 - done)
107
+ # Return new state to allow resuming the simulation.
108
+ return step - steps, episode - episodes, done, length, obs, agent_state
109
+
110
+
111
+ def count_episodes(directory):
112
+ filenames = directory.glob('*.npz')
113
+ lengths = [int(n.stem.rsplit('-', 1)[-1]) - 1 for n in filenames]
114
+ episodes, steps = len(lengths), sum(lengths)
115
+ return episodes, steps
116
+
117
+
118
+ def save_episodes(directory, episodes):
119
+ directory = pathlib.Path(directory).expanduser()
120
+ directory.mkdir(parents=True, exist_ok=True)
121
+ timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
122
+ for episode in episodes:
123
+ identifier = str(uuid.uuid4().hex)
124
+ length = len(episode['reward'])
125
+ filename = directory / f'{timestamp}-{identifier}-{length}.npz'
126
+ with io.BytesIO() as f1:
127
+ np.savez_compressed(f1, **episode)
128
+ f1.seek(0)
129
+ with filename.open('wb') as f2:
130
+ f2.write(f1.read())
131
+
132
+
133
+ def load_episodes(directory, rescan, length=None, balance=False, seed=0, load_episodes=1000):
134
+ directory = pathlib.Path(directory).expanduser()
135
+ random = np.random.RandomState(seed)
136
+ filenames = list(directory.glob('*.npz'))
137
+ load_episodes = min(len(filenames), load_episodes)
138
+ if load_episodes is None:
139
+ load_episodes = int(count_episodes(directory)[0] / 20)
140
+
141
+ while True:
142
+ cache = {}
143
+ for filename in random.choice(list(directory.glob('*.npz')),
144
+ load_episodes,
145
+ replace=False):
146
+ try:
147
+ with filename.open('rb') as f:
148
+ episode = np.load(f)
149
+ episode = {k: episode[k] for k in episode.keys() if k not in ['image_128']}
150
+ # episode['reward'] = copy.deepcopy(episode['success'])
151
+ if 'discount' not in episode:
152
+ episode['discount'] = np.where(episode['is_terminal'], 0., 1.)
153
+ except Exception as e:
154
+ print(f'Could not load episode: {e}')
155
+ continue
156
+ cache[filename] = episode
157
+
158
+ keys = list(cache.keys())
159
+ for index in random.choice(len(keys), rescan):
160
+ episode = copy.deepcopy(cache[keys[index]])
161
+ if length:
162
+ total = len(next(iter(episode.values())))
163
+ available = total - length
164
+ if available < 0:
165
+ for key in episode.keys():
166
+ shape = episode[key].shape
167
+ episode[key] = np.concatenate([episode[key],
168
+ np.zeros([abs(available)] + list(shape[1:]))],
169
+ axis=0)
170
+ episode['mask'] = np.ones(length)
171
+ episode['mask'][available:] = 0.0
172
+ elif available > 0:
173
+ if balance:
174
+ index = min(random.randint(0, total), available)
175
+ else:
176
+ index = int(random.randint(0, available))
177
+ episode = {k: v[index: index + length] for k, v in episode.items()}
178
+ episode['mask'] = np.ones(length)
179
+ else:
180
+ episode['mask'] = np.ones_like(episode['reward'])
181
+ else:
182
+ episode['mask'] = np.ones_like(episode['reward'])
183
+ yield episode
184
+
185
+
186
+ class Adam(tf.Module):
187
+ def __init__(self, name, modules, lr, clip=None, wd=None, wdpattern=r'.*'):
188
+ self._name = name
189
+ self._modules = modules
190
+ self._clip = clip
191
+ self._wd = wd
192
+ self._wdpattern = wdpattern
193
+ self._opt = tf.optimizers.Adam(lr)
194
+
195
+ @property
196
+ def variables(self):
197
+ return self._opt.variables()
198
+
199
+ def __call__(self, tape, loss):
200
+ variables = [module.variables for module in self._modules]
201
+ self._variables = tf.nest.flatten(variables)
202
+ assert len(loss.shape) == 0, loss.shape
203
+ grads = tape.gradient(loss, self._variables)
204
+ norm = tf.linalg.global_norm(grads)
205
+ if self._clip:
206
+ grads, _ = tf.clip_by_global_norm(grads, self._clip, norm)
207
+ self._opt.apply_gradients(zip(grads, self._variables))
208
+ return norm
209
+
210
+
211
+ def args_type(default):
212
+ if isinstance(default, bool):
213
+ return lambda x: bool(['False', 'True'].index(x))
214
+ if isinstance(default, int):
215
+ return lambda x: float(x) if ('e' in x or '.' in x) else int(x)
216
+ if isinstance(default, pathlib.Path):
217
+ return lambda x: pathlib.Path(x).expanduser()
218
+ return type(default)
219
+
220
+
221
+ def static_scan(fn, inputs, start, reverse=False):
222
+ last = start
223
+ outputs = [[] for _ in tf.nest.flatten(start)]
224
+ indices = range(len(tf.nest.flatten(inputs)[0]))
225
+ if reverse:
226
+ indices = reversed(indices)
227
+ for index in indices:
228
+ inp = tf.nest.map_structure(lambda x: x[index], inputs)
229
+ last = fn(last, inp)
230
+ [o.append(l) for o, l in zip(outputs, tf.nest.flatten(last))]
231
+ if reverse:
232
+ outputs = [list(reversed(x)) for x in outputs]
233
+ outputs = [tf.stack(x, 0) for x in outputs]
234
+ return tf.nest.pack_sequence_as(start, outputs)
235
+
236
+
237
+ def _mnd_sample(self, sample_shape=(), seed=None, name='sample'):
238
+ return tf.random.normal(
239
+ tuple(sample_shape) + tuple(self.event_shape),
240
+ self.mean(), self.stddev(), self.dtype, seed, name)
241
+
242
+
243
+ tfd.MultivariateNormalDiag.sample = _mnd_sample
baselines/lompo/wrappers.py ADDED
@@ -0,0 +1,508 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ import functools
3
+ import sys
4
+ import traceback
5
+
6
+ import gym
7
+ import numpy as np
8
+
9
+
10
+ class DrawerOpen:
11
+ def __init__(self, config, size=(128, 128)):
12
+ import metaworld
13
+ import mujoco_py
14
+ self._env = metaworld.envs.mujoco.sawyer_xyz.v2.sawyer_drawer_open_v2.SawyerDrawerOpenEnvV2()
15
+ self._env._last_rand_vec = np.array([-0.1, 0.9, 0.0])
16
+ self._env._set_task_called = True
17
+ self.size = size
18
+
19
+ # Setup camera in environment
20
+ self.viewer = mujoco_py.MjRenderContextOffscreen(self._env.sim, -1)
21
+ self.viewer.cam.elevation = -22.5
22
+ self.viewer.cam.azimuth = 15
23
+ self.viewer.cam.distance = 0.75
24
+ self.viewer.cam.lookat[0] = -0.15
25
+ self.viewer.cam.lookat[1] = 0.7
26
+ self.viewer.cam.lookat[2] = 0.10
27
+
28
+ def __getattr__(self, attr):
29
+ if attr == '_wrapped_env':
30
+ raise AttributeError()
31
+ return getattr(self._env, attr)
32
+
33
+ def step(self, action):
34
+ state, reward, done, info = self._env.step(action)
35
+ img = self.render(mode='rgb_array', width=self.size[0], height=self.size[1])
36
+ obs = {'state': state, 'image': img}
37
+ reward = 1.0 * info['success']
38
+ return obs, reward, done, info
39
+
40
+ def reset(self):
41
+ state = self._env.reset()
42
+ state = self._env.reset()
43
+ img = self.render(mode='rgb_array', width=self.size[0], height=self.size[1])
44
+ if self.use_transform:
45
+ img = img[self.pad:-self.pad, self.pad:-self.pad, :]
46
+ obs = {'state': state, 'image': img}
47
+ return obs
48
+
49
+ def render(self, mode, width=128, height=128):
50
+ self.viewer.render(width=width, height=width)
51
+ img = self.viewer.read_pixels(self.size[0], self.size[1], depth=False)
52
+ img = img[::-1]
53
+ return img
54
+
55
+
56
+ class Hammer:
57
+ def __init__(self, config, size=(128, 128)):
58
+ import metaworld
59
+ import mujoco_py
60
+ self._env = metaworld.envs.mujoco.sawyer_xyz.v2.sawyer_hammer_v2.SawyerHammerEnvV2()
61
+ self._env._last_rand_vec = np.array([-0.06, 0.4, 0.02])
62
+ self._env._set_task_called = True
63
+ self.size = size
64
+
65
+ # Setup camera in environment
66
+ self.viewer = mujoco_py.MjRenderContextOffscreen(self._env.sim, -1)
67
+ self.viewer.cam.elevation = -15
68
+ self.viewer.cam.azimuth = 137.5
69
+ self.viewer.cam.distance = 0.9
70
+ self.viewer.cam.lookat[0] = -0.
71
+ self.viewer.cam.lookat[1] = 0.6
72
+ self.viewer.cam.lookat[2] = 0.175
73
+
74
+ def __getattr__(self, attr):
75
+ if attr == '_wrapped_env':
76
+ raise AttributeError()
77
+ return getattr(self._env, attr)
78
+
79
+ def step(self, action):
80
+ state, reward, done, info = self._env.step(action)
81
+ img = self.render(mode='rgb_array', width=self.size[0], height=self.size[1])
82
+ obs = {'state': state, 'image': img}
83
+ return obs, reward, done, info
84
+
85
+ def reset(self):
86
+ state = self._env.reset()
87
+ img = self.render(mode='rgb_array', width=self.size[0], height=self.size[1])
88
+ obs = {'state': state, 'image': img}
89
+ return obs
90
+
91
+ def render(self, mode, width=128, height=128):
92
+ self.viewer.render(width=width, height=width)
93
+ img = self.viewer.read_pixels(self.size[0], self.size[1], depth=False)
94
+ img = img[::-1]
95
+ return img
96
+
97
+
98
+ class DoorOpen:
99
+ def __init__(self, config, size=(128, 128)):
100
+ import metaworld
101
+ import mujoco_py
102
+ self._env = metaworld.envs.mujoco.sawyer_xyz.v2.sawyer_door_v2.SawyerDoorEnvV2()
103
+ self._env._last_rand_vec = np.array([0.0, 1.0, .1525])
104
+ self._env._set_task_called = True
105
+ self.size = size
106
+
107
+ # Setup camera in environment
108
+ self.viewer = mujoco_py.MjRenderContextOffscreen(self._env.sim, -1)
109
+ self.viewer.cam.elevation = -12.5
110
+ self.viewer.cam.azimuth = 115
111
+ self.viewer.cam.distance = 1.05
112
+ self.viewer.cam.lookat[0] = 0.075
113
+ self.viewer.cam.lookat[1] = 0.75
114
+ self.viewer.cam.lookat[2] = 0.15
115
+
116
+ def __getattr__(self, attr):
117
+ if attr == '_wrapped_env':
118
+ raise AttributeError()
119
+ return getattr(self._env, attr)
120
+
121
+ def step(self, action):
122
+ state, reward, done, info = self._env.step(action)
123
+ img = self.render(mode='rgb_array', width=self.size[0], height=self.size[1])
124
+ obs = {'state': state, 'image': img}
125
+ reward = 1.0 * info['success']
126
+ return obs, reward, done, info
127
+
128
+ def reset(self):
129
+ state = self._env.reset()
130
+ img = self.render(mode='rgb_array', width=self.size[0], height=self.size[1])
131
+ obs = {'state': state, 'image': img}
132
+ return obs
133
+
134
+ def render(self, mode, width=128, height=128):
135
+ self.viewer.render(width=width, height=width)
136
+ img = self.viewer.read_pixels(self.size[0], self.size[1], depth=False)
137
+ img = img[::-1]
138
+ return img
139
+
140
+
141
+ class Gym:
142
+ def __init__(self, name, config, size=(64, 64)):
143
+ self._env = gym.make(name)
144
+ self.size = size
145
+ self.use_transform = config.use_transform
146
+ self.pad = int(config.pad / 2)
147
+
148
+ def __getattr__(self, attr):
149
+ if attr == '_wrapped_env':
150
+ raise AttributeError()
151
+ return getattr(self._env, attr)
152
+
153
+ def step(self, action):
154
+ state, reward, done, info = self.env.step(action)
155
+ img = self._env.render(mode='rgb_array', width=self.size[0], height=self.size[1])
156
+ if self.use_transform:
157
+ img = img[self.pad:-self.pad, self.pad:-self.pad, :]
158
+ obs = {'state': state, 'image': img}
159
+ return obs, reward, done, info
160
+
161
+ def reset(self):
162
+ state = self._env.reset()
163
+ img = self._env.render(mode='rgb_array', width=self.size[0], height=self.size[1])
164
+ if self.use_transform:
165
+ img = img[self.pad:-self.pad, self.pad:-self.pad, :]
166
+ obs = {'state': state, 'image': img}
167
+ return obs
168
+
169
+ def render(self, *args, **kwargs):
170
+ if kwargs.get('mode', 'rgb_array') != 'rgb_array':
171
+ raise ValueError("Only render mode 'rgb_array' is supported.")
172
+ return self._env.render(mode='rgb_array', width=self.size[0], height=self.size[1])
173
+
174
+
175
+ class DeepMindControl:
176
+ def __init__(self, name, size=(64, 64), camera=None):
177
+ domain, task = name.split('_', 1)
178
+ if domain == 'cup': # Only domain with multiple words.
179
+ domain = 'ball_in_cup'
180
+ if isinstance(domain, str):
181
+ from dm_control import suite
182
+ self._env = suite.load(domain, task)
183
+ else:
184
+ assert task is None
185
+ self._env = domain()
186
+ self._size = size
187
+ if camera is None:
188
+ camera = dict(quadruped=2).get(domain, 0)
189
+ self._camera = camera
190
+
191
+ @property
192
+ def observation_space(self):
193
+ spaces = {}
194
+ for key, value in self._env.observation_spec().items():
195
+ spaces[key] = gym.spaces.Box(-np.inf, np.inf, value.shape, dtype=np.float32)
196
+ spaces['image'] = gym.spaces.Box(0, 255, self._size + (3,), dtype=np.uint8)
197
+ return gym.spaces.Dict(spaces)
198
+
199
+ @property
200
+ def action_space(self):
201
+ spec = self._env.action_spec()
202
+ return gym.spaces.Box(spec.minimum, spec.maximum, dtype=np.float32)
203
+
204
+ def step(self, action):
205
+ time_step = self._env.step(action)
206
+ obs = dict(time_step.observation)
207
+ obs['image'] = self.render()
208
+ reward = time_step.reward or 0
209
+ done = time_step.last()
210
+ info = {'discount': np.array(time_step.discount, np.float32)}
211
+ return obs, reward, done, info
212
+
213
+ def reset(self):
214
+ time_step = self._env.reset()
215
+ obs = dict(time_step.observation)
216
+ obs['image'] = self.render()
217
+ return obs
218
+
219
+ def render(self, *args, **kwargs):
220
+ if kwargs.get('mode', 'rgb_array') != 'rgb_array':
221
+ raise ValueError("Only render mode 'rgb_array' is supported.")
222
+ return self._env.physics.render(*self._size, camera_id=self._camera)
223
+
224
+
225
+ class Collect:
226
+ def __init__(self, env, callbacks=None, precision=32):
227
+ self._env = env
228
+ self._callbacks = callbacks or ()
229
+ self._precision = precision
230
+ self._episode = None
231
+
232
+ def __getattr__(self, name):
233
+ return getattr(self._env, name)
234
+
235
+ def step(self, action):
236
+ obs, reward, done, info = self._env.step(action)
237
+ obs = {k: self._convert(v) for k, v in obs.items()}
238
+ transition = obs.copy()
239
+ transition['action'] = action
240
+ transition['reward'] = reward
241
+ transition['discount'] = info.get('discount', np.array(1 - float(done)))
242
+ self._episode.append(transition)
243
+ if done:
244
+ episode = {k: [t[k] for t in self._episode] for k in self._episode[0]}
245
+ episode = {k: self._convert(v) for k, v in episode.items()}
246
+ info['episode'] = episode
247
+ for callback in self._callbacks:
248
+ callback(episode)
249
+ return obs, reward, done, info
250
+
251
+ def reset(self):
252
+ obs = self._env.reset()
253
+ transition = obs.copy()
254
+ transition['action'] = np.zeros(self._env.action_space.shape)
255
+ transition['reward'] = 0.0
256
+ transition['discount'] = 1.0
257
+ self._episode = [transition]
258
+ return obs
259
+
260
+ def _convert(self, value):
261
+ value = np.array(value)
262
+ if np.issubdtype(value.dtype, np.floating):
263
+ dtype = {16: np.float16, 32: np.float32, 64: np.float64}[self._precision]
264
+ elif np.issubdtype(value.dtype, np.signedinteger):
265
+ dtype = {16: np.int16, 32: np.int32, 64: np.int64}[self._precision]
266
+ elif np.issubdtype(value.dtype, np.uint8):
267
+ dtype = np.uint8
268
+ else:
269
+ raise NotImplementedError(value.dtype)
270
+ return value.astype(dtype)
271
+
272
+
273
+ class TimeLimit:
274
+ def __init__(self, env, duration):
275
+ self._env = env
276
+ self._duration = duration
277
+ self._step = None
278
+
279
+ def __getattr__(self, name):
280
+ return getattr(self._env, name)
281
+
282
+ def step(self, action):
283
+ assert self._step is not None, 'Must reset environment.'
284
+ obs, reward, done, info = self._env.step(action)
285
+ self._step += 1
286
+ if self._step >= self._duration:
287
+ done = True
288
+ if 'discount' not in info:
289
+ info['discount'] = np.array(1.0).astype(np.float32)
290
+ self._step = None
291
+ return obs, reward, done, info
292
+
293
+ def reset(self):
294
+ self._step = 0
295
+ return self._env.reset()
296
+
297
+
298
+ class ActionRepeat:
299
+ def __init__(self, env, amount):
300
+ self._env = env
301
+ self._amount = amount
302
+
303
+ def __getattr__(self, name):
304
+ return getattr(self._env, name)
305
+
306
+ def step(self, action):
307
+ done = False
308
+ total_reward = 0
309
+ current_step = 0
310
+ while current_step < self._amount and not done:
311
+ obs, reward, done, info = self._env.step(action)
312
+ total_reward += reward
313
+ current_step += 1
314
+ return obs, total_reward, done, info
315
+
316
+
317
+ class NormalizeActions:
318
+ def __init__(self, env):
319
+ self._env = env
320
+ self._mask = np.logical_and(np.isfinite(env.action_space.low),
321
+ np.isfinite(env.action_space.high))
322
+ self._low = np.where(self._mask, env.action_space.low, -1)
323
+ self._high = np.where(self._mask, env.action_space.high, 1)
324
+
325
+ def __getattr__(self, name):
326
+ return getattr(self._env, name)
327
+
328
+ @property
329
+ def action_space(self):
330
+ low = np.where(self._mask, -np.ones_like(self._low), self._low)
331
+ high = np.where(self._mask, np.ones_like(self._low), self._high)
332
+ return gym.spaces.Box(low, high, dtype=np.float32)
333
+
334
+ def step(self, action):
335
+ original = (action + 1) / 2 * (self._high - self._low) + self._low
336
+ original = np.where(self._mask, original, action)
337
+ return self._env.step(original)
338
+
339
+
340
+ class ObsDict:
341
+ def __init__(self, env, key='obs'):
342
+ self._env = env
343
+ self._key = key
344
+
345
+ def __getattr__(self, name):
346
+ return getattr(self._env, name)
347
+
348
+ @property
349
+ def observation_space(self):
350
+ spaces = {self._key: self._env.observation_space}
351
+ return gym.spaces.Dict(spaces)
352
+
353
+ @property
354
+ def action_space(self):
355
+ return self._env.action_space
356
+
357
+ def step(self, action):
358
+ obs, reward, done, info = self._env.step(action)
359
+ obs = {self._key: np.array(obs)}
360
+ return obs, reward, done, info
361
+
362
+ def reset(self):
363
+ obs = self._env.reset()
364
+ obs = {self._key: np.array(obs)}
365
+ return obs
366
+
367
+
368
+ class RewardObs:
369
+ def __init__(self, env):
370
+ self._env = env
371
+
372
+ def __getattr__(self, name):
373
+ return getattr(self._env, name)
374
+
375
+ @property
376
+ def observation_space(self):
377
+ spaces = self._env.observation_space.spaces
378
+ assert 'reward' not in spaces
379
+ spaces['reward'] = gym.spaces.Box(-np.inf, np.inf, dtype=np.float32)
380
+ return gym.spaces.Dict(spaces)
381
+
382
+ def step(self, action):
383
+ obs, reward, done, info = self._env.step(action)
384
+ obs['reward'] = reward
385
+ return obs, reward, done, info
386
+
387
+ def reset(self):
388
+ obs = self._env.reset()
389
+ obs['reward'] = 0.0
390
+ return obs
391
+
392
+
393
+ class Async:
394
+ _ACCESS = 1
395
+ _CALL = 2
396
+ _RESULT = 3
397
+ _EXCEPTION = 4
398
+ _CLOSE = 5
399
+
400
+ def __init__(self, ctor, strategy='process'):
401
+ self._strategy = strategy
402
+ if strategy == 'none':
403
+ self._env = ctor()
404
+ elif strategy == 'thread':
405
+ import multiprocessing.dummy as mp
406
+ elif strategy == 'process':
407
+ import multiprocessing as mp
408
+ else:
409
+ raise NotImplementedError(strategy)
410
+ if strategy != 'none':
411
+ self._conn, conn = mp.Pipe()
412
+ self._process = mp.Process(target=self._worker, args=(ctor, conn))
413
+ atexit.register(self.close)
414
+ self._process.start()
415
+ self._obs_space = None
416
+ self._action_space = None
417
+
418
+ @property
419
+ def observation_space(self):
420
+ if not self._obs_space:
421
+ self._obs_space = self.__getattr__('observation_space')
422
+ return self._obs_space
423
+
424
+ @property
425
+ def action_space(self):
426
+ if not self._action_space:
427
+ self._action_space = self.__getattr__('action_space')
428
+ return self._action_space
429
+
430
+ def __getattr__(self, name):
431
+ if self._strategy == 'none':
432
+ return getattr(self._env, name)
433
+ self._conn.send((self._ACCESS, name))
434
+ return self._receive()
435
+
436
+ def call(self, name, *args, **kwargs):
437
+ blocking = kwargs.pop('blocking', True)
438
+ if self._strategy == 'none':
439
+ return functools.partial(getattr(self._env, name), *args, **kwargs)
440
+ payload = name, args, kwargs
441
+ self._conn.send((self._CALL, payload))
442
+ promise = self._receive
443
+ return promise() if blocking else promise
444
+
445
+ def close(self):
446
+ if self._strategy == 'none':
447
+ try:
448
+ self._env.close()
449
+ except AttributeError:
450
+ pass
451
+ return
452
+ try:
453
+ self._conn.send((self._CLOSE, None))
454
+ self._conn.close()
455
+ except IOError:
456
+ # The connection was already closed.
457
+ pass
458
+ self._process.join()
459
+
460
+ def step(self, action, blocking=True):
461
+ return self.call('step', action, blocking=blocking)
462
+
463
+ def reset(self, blocking=True):
464
+ return self.call('reset', blocking=blocking)
465
+
466
+ def _receive(self):
467
+ try:
468
+ message, payload = self._conn.recv()
469
+ except ConnectionResetError:
470
+ raise RuntimeError('Environment worker crashed.')
471
+ # Re-raise exceptions in the main process.
472
+ if message == self._EXCEPTION:
473
+ stacktrace = payload
474
+ raise Exception(stacktrace)
475
+ if message == self._RESULT:
476
+ return payload
477
+ raise KeyError(f'Received message of unexpected type {message}')
478
+
479
+ def _worker(self, ctor, conn):
480
+ try:
481
+ env = ctor()
482
+ while True:
483
+ try:
484
+ # Only block for short times to have keyboard exceptions be raised.
485
+ if not conn.poll(0.1):
486
+ continue
487
+ message, payload = conn.recv()
488
+ except (EOFError, KeyboardInterrupt):
489
+ break
490
+ if message == self._ACCESS:
491
+ name = payload
492
+ result = getattr(env, name)
493
+ conn.send((self._RESULT, result))
494
+ continue
495
+ if message == self._CALL:
496
+ name, args, kwargs = payload
497
+ result = getattr(env, name)(*args, **kwargs)
498
+ conn.send((self._RESULT, result))
499
+ continue
500
+ if message == self._CLOSE:
501
+ assert payload is None
502
+ break
503
+ raise KeyError(f'Received message of unknown type {message}')
504
+ except Exception:
505
+ stacktrace = ''.join(traceback.format_exception(*sys.exc_info()))
506
+ print(f'Error in environment process: {stacktrace}')
507
+ conn.send((self._EXCEPTION, stacktrace))
508
+ conn.close()
conversion_scripts/hdf5_to_npz.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import io
3
+ import pathlib
4
+ import uuid
5
+ import numpy as np
6
+ import h5py
7
+ import argparse
8
+
9
+
10
+ def save_episode(directory, episode):
11
+ timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
12
+ identifier = str(uuid.uuid4().hex)
13
+ length = len(episode['action'])
14
+ filename = directory / f'{timestamp}-{identifier}-{length}.npz'
15
+ with io.BytesIO() as f1:
16
+ np.savez_compressed(f1, **episode)
17
+ f1.seek(0)
18
+ with filename.open('wb') as f2:
19
+ f2.write(f1.read())
20
+ return filename
21
+
22
+
23
+ def main():
24
+ # Include argument parser
25
+ parser = argparse.ArgumentParser(description='Convert hdf5 files to npz.')
26
+ parser.add_argument('--input_dir', type=str, required=True,
27
+ help='Path to input files')
28
+ parser.add_argument('--output_dir', type=str, required=True,
29
+ help='Path to output files')
30
+ args = parser.parse_args()
31
+
32
+ is_last = np.zeros(501, dtype=bool)
33
+ is_last[500] = True
34
+ is_first = np.zeros(501, dtype=bool)
35
+ is_first[0] = True
36
+ is_terminal = np.zeros(501, dtype=bool)
37
+
38
+ out_dir = pathlib.Path(args.output_dir)
39
+ out_dir.mkdir(parents=True, exist_ok=True)
40
+
41
+ filenames = sorted(pathlib.Path(args.input_dir).glob('*.hdf5'))
42
+ for filename in filenames:
43
+ with h5py.File(filename, "r") as f:
44
+ actions = f['action'][:]
45
+ observations = f['observation'][:]
46
+ rewards = f['reward'][:]
47
+ discounts = f['discount'][:]
48
+ while len(actions) > 0:
49
+ ep = {
50
+ 'image': observations[:501].transpose(0, 2, 3, 1),
51
+ 'action': actions[:501],
52
+ 'reward': rewards[:501],
53
+ 'discount': discounts[:501],
54
+ 'is_last': is_last,
55
+ 'is_first': is_first,
56
+ 'is_terminal': is_terminal,
57
+ }
58
+ actions = actions[501:]
59
+ observations = observations[501:]
60
+ rewards = rewards[501:]
61
+ discounts = discounts[501:]
62
+ save_episode(out_dir, ep)
63
+
64
+
65
+ if __name__ == '__main__':
66
+ main()
conversion_scripts/npz_to_hdf5.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pathlib
2
+ import numpy as np
3
+ import h5py
4
+ import cv2
5
+ import argparse
6
+
7
+
8
+ def load_episodes(directory, capacity=None):
9
+ # The returned directory from filenames to episodes is guaranteed to be in
10
+ # temporally sorted order.
11
+ filenames = sorted(directory.glob('*.npz'))
12
+ if capacity:
13
+ num_steps = 0
14
+ num_episodes = 0
15
+ for filename in reversed(filenames):
16
+ length = int(str(filename).split('-')[-1][:-4])
17
+ num_steps += length
18
+ num_episodes += 1
19
+ if num_steps >= capacity:
20
+ break
21
+ filenames = filenames[-num_episodes:]
22
+ episodes = {}
23
+ for filename in filenames:
24
+ try:
25
+ with filename.open('rb') as f:
26
+ episode = np.load(f)
27
+ episode = {k: episode[k] for k in episode.keys()}
28
+ # Conversion for older versions of npz files.
29
+ if 'is_terminal' not in episode:
30
+ episode['is_terminal'] = episode['discount'] == 0.
31
+ except Exception as e:
32
+ print(f'Could not load episode {str(filename)}: {e}')
33
+ continue
34
+ episodes[str(filename)] = episode
35
+ return episodes
36
+
37
+
38
+ def main():
39
+ # Include argument parser
40
+ parser = argparse.ArgumentParser(description='Convert npz files to hdf5.')
41
+ parser.add_argument('--input_dir', type=str, required=True,
42
+ help='Path to input files')
43
+ parser.add_argument('--output_dir', type=str, required=True,
44
+ help='Path to output files')
45
+ args = parser.parse_args()
46
+
47
+ step_type = np.ones(501)
48
+ step_type[0] = 0
49
+ step_type[500] = 2
50
+
51
+ output = {}
52
+ episodes = load_episodes(pathlib.Path(args.input_dir))
53
+ episodes = list(episodes.values())
54
+
55
+ actions = [e['action'] for e in episodes]
56
+ discounts = [e['discount'] for e in episodes]
57
+ observations = []
58
+ for e in episodes:
59
+ resized_images = np.empty((501, 84, 84, 3), dtype=e['image'].dtype)
60
+ for (k, i) in enumerate(e['image']):
61
+ resized_images[k] = cv2.resize(i, dsize=(84, 84), interpolation=cv2.INTER_CUBIC)
62
+ observations.append(resized_images.transpose(0, 3, 1, 2))
63
+ rewards = [e['reward'] for e in episodes]
64
+ step_types = [step_type for _ in episodes]
65
+
66
+ output['action'] = np.concatenate(actions)
67
+ output['discount'] = np.concatenate(discounts)
68
+ output['observation'] = np.concatenate(observations)
69
+ output['reward'] = np.concatenate(rewards)
70
+ output['step_type'] = np.concatenate(step_types)
71
+
72
+ out_dir = pathlib.Path(args.output_dir)
73
+ out_dir.mkdir(parents=True, exist_ok=True)
74
+
75
+ with h5py.File(out_dir / 'data.hdf5', 'w') as shard_file:
76
+ for k, v in output.items():
77
+ shard_file.create_dataset(k, data=v, compression='gzip')
78
+
79
+
80
+ if __name__ == '__main__':
81
+ main()
conversion_scripts/split_hdf5_shards.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pathlib
2
+ import numpy as np
3
+ import h5py
4
+ import argparse
5
+
6
+
7
+ def main():
8
+ # Include argument parser
9
+ parser = argparse.ArgumentParser(description='Split hdf5 shards to smaller.')
10
+ parser.add_argument('--input_dir', type=str, required=True,
11
+ help='Path to input files')
12
+ parser.add_argument('--output_dir', type=str, required=True,
13
+ help='Path to output files')
14
+ parser.add_argument('--split_size', type=int, default=5)
15
+ args = parser.parse_args()
16
+
17
+ in_dir = pathlib.Path(args.input_dir)
18
+ out_dir = pathlib.Path(args.output_dir)
19
+ out_dir.mkdir(parents=True, exist_ok=True)
20
+
21
+ counter = 0
22
+ for filename in in_dir.glob('*.hdf5'):
23
+ print(filename)
24
+ with h5py.File(filename, "r") as episodes:
25
+ episodes = {k: episodes[k][:] for k in episodes.keys()}
26
+ print(episodes['action'].shape[0])
27
+
28
+ split_episodes = {k: np.array_split(v, args.split_size) for k, v in episodes.items()}
29
+ split_episodes = [{k: v[idx] for k, v in split_episodes.items()} for idx in range(args.split_size)]
30
+
31
+ for eps in split_episodes:
32
+ with h5py.File(out_dir / f'{counter}.hdf5', 'w') as shard_file:
33
+ for k, v in eps.items():
34
+ shard_file.create_dataset(k, data=v, compression='gzip')
35
+ print(counter)
36
+ counter += 1
37
+
38
+
39
+ if __name__ == '__main__':
40
+ main()
dockerfiles/Dockerfile-drqv2 ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FROM MAIN DIR: docker build -t offline-drqv2 -f dockerfiles/Dockerfile-drqv2 .
2
+
3
+ FROM nvidia/cuda:11.3.0-cudnn8-devel-ubuntu18.04
4
+ MAINTAINER Anonymous
5
+
6
+ ENV DEBIAN_FRONTEND=noninteractive
7
+
8
+ RUN apt-get update && apt-get install -y \
9
+ vim wget unzip \
10
+ libosmesa6-dev libgl1-mesa-glx libgl1-mesa-dev patchelf libglfw3 build-essential
11
+
12
+ RUN apt-get update && apt-get install -y --no-install-recommends \
13
+ tzdata \
14
+ cmake \
15
+ git \
16
+ curl \
17
+ ca-certificates \
18
+ libjpeg-dev \
19
+ libpng-dev \
20
+ libglib2.0-0
21
+
22
+ # ZOO PACKAGES
23
+ RUN apt-get -y update \
24
+ && apt-get -y install \
25
+ ffmpeg \
26
+ freeglut3-dev \
27
+ swig \
28
+ xvfb \
29
+ libxrandr2 \
30
+ && apt-get clean \
31
+ && rm -rf /var/lib/apt/lists/*
32
+
33
+ # ATARI PACKAGES
34
+ RUN apt-get -y update \
35
+ && apt-get -y install \
36
+ tmux \
37
+ libsm6 \
38
+ libxext6 \
39
+ libxrender-dev \
40
+ unrar \
41
+ zlib1g \
42
+ zlib1g-dev \
43
+ && apt-get clean \
44
+ && rm -rf /var/lib/apt/lists/*
45
+
46
+ # mujoco
47
+ RUN mkdir -p /.mujoco \
48
+ && wget https://mujoco.org/download/mujoco210-linux-x86_64.tar.gz -O mujoco.tar.gz \
49
+ && tar -xf mujoco.tar.gz -C /.mujoco \
50
+ && rm mujoco.tar.gz
51
+ ENV MUJOCO_PY_MUJOCO_PATH /.mujoco/mujoco210
52
+ ENV LD_LIBRARY_PATH /.mujoco/mujoco210/bin:${LD_LIBRARY_PATH}
53
+ ENV LD_LIBRARY_PATH /usr/local/cuda/lib64:${LD_LIBRARY_PATH}
54
+ ENV MJLIB_PATH /.mujoco/mujoco210/bin/libmujoco210.so
55
+
56
+ RUN useradd -u <<USER_ID>> --create-home user
57
+ USER user
58
+ WORKDIR /home/user
59
+
60
+ RUN wget -q https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \
61
+ bash Miniconda3-latest-Linux-x86_64.sh -b -p miniconda3 && \
62
+ rm Miniconda3-latest-Linux-x86_64.sh
63
+ ENV PATH /home/user/miniconda3/bin:$PATH
64
+
65
+ RUN pip install --upgrade pip
66
+ RUN pip install mujoco_py seaborn
67
+ RUN conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch
68
+ RUN pip install absl-py
69
+ RUN pip install dm_control
70
+ RUN pip install tb-nightly termcolor
71
+ RUN pip install imageio imageio-ffmpeg hydra-core hydra-submitit-launcher pandas
72
+ RUN pip install ipdb yapf sklearn matplotlib opencv-python
73
+
74
+ WORKDIR /vd4rl
dockerfiles/Dockerfile-dv2 ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FROM MAIN DIR: docker build -t offline-dv2 -f dockerfiles/Dockerfile-dv2 .
2
+
3
+ FROM tensorflow/tensorflow:2.6.0-gpu
4
+ MAINTAINER Anonymous
5
+
6
+ # System packages.
7
+ RUN apt-get update && apt-get install -y \
8
+ ffmpeg \
9
+ libgl1-mesa-dev \
10
+ libosmesa6-dev \
11
+ python3-pip \
12
+ unrar \
13
+ wget \
14
+ && apt-get clean
15
+
16
+ RUN apt-get update && apt-get install -y \
17
+ git \
18
+ && apt-get clean
19
+
20
+ RUN useradd -u <<USER_ID>> --create-home user
21
+ USER user
22
+ WORKDIR /home/user
23
+
24
+ RUN wget -q https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \
25
+ bash Miniconda3-latest-Linux-x86_64.sh -b -p miniconda3 && \
26
+ rm Miniconda3-latest-Linux-x86_64.sh
27
+ ENV PATH /home/user/miniconda3/bin:$PATH
28
+
29
+ # MuJoCo
30
+ ENV MUJOCO_GL egl
31
+ ENV MUJOCO_PY_MUJOCO_PATH /home/user/.mujoco/mujoco210
32
+ RUN mkdir -p .mujoco \
33
+ && wget https://mujoco.org/download/mujoco210-linux-x86_64.tar.gz -O mujoco.tar.gz \
34
+ && tar -xf mujoco.tar.gz -C .mujoco \
35
+ && rm mujoco.tar.gz
36
+ ENV LD_LIBRARY_PATH /home/user/.mujoco/mujoco210/bin:${LD_LIBRARY_PATH}
37
+
38
+ # Python packages.
39
+ RUN pip3 install --no-cache-dir 'gym[atari]'
40
+ RUN pip3 install --no-cache-dir atari_py
41
+ RUN pip3 install --no-cache-dir crafter
42
+ RUN pip3 install --no-cache-dir dm_control
43
+ RUN pip3 install --no-cache-dir ruamel.yaml
44
+ RUN pip3 install --no-cache-dir tensorflow==2.6.0 tensorflow_probability==0.14.1 tensorflow-estimator==2.6.0 tensorboard==2.6.0 keras==2.6.0
45
+ RUN pip3 install --no-cache-dir gym-minigrid sklearn
46
+
47
+ # Atari ROMS.
48
+ RUN wget -L -nv http://www.atarimania.com/roms/Roms.rar && \
49
+ unrar x Roms.rar && \
50
+ unzip ROMS.zip && \
51
+ python3 -m atari_py.import_roms ROMS && \
52
+ rm -rf Roms.rar ROMS.zip ROMS
53
+
54
+ RUN pip3 install -U 'mujoco-py<2.2,>=2.1'
55
+ RUN wget https://www.roboti.us/file/mjkey.txt -O /home/user/.mujoco/mjkey.txt
56
+ RUN conda install patchelf
57
+
58
+ WORKDIR /vd4rl
drqbc/.DS_Store ADDED
Binary file (6.15 kB). View file
 
drqbc/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) Facebook, Inc. and its affiliates.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
drqbc/bcagent.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ import numpy as np
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+
10
+ from drqv2 import Actor, Encoder, RandomShiftsAug
11
+ import utils
12
+
13
+
14
+ class BCAgent:
15
+ def __init__(self, obs_shape, action_shape, device, lr, feature_dim,
16
+ hidden_dim, critic_target_tau, num_expl_steps,
17
+ update_every_steps, stddev_schedule, stddev_clip, use_tb,
18
+ augmentation=RandomShiftsAug(pad=4)):
19
+ self.device = device
20
+ self.critic_target_tau = critic_target_tau
21
+ self.update_every_steps = update_every_steps
22
+ self.use_tb = use_tb
23
+ self.num_expl_steps = num_expl_steps
24
+ self.stddev_schedule = stddev_schedule
25
+ self.stddev_clip = stddev_clip
26
+
27
+ # models
28
+ self.encoder = Encoder(obs_shape).to(device)
29
+ self.actor = Actor(self.encoder.repr_dim, action_shape, feature_dim,
30
+ hidden_dim).to(device)
31
+
32
+ # optimizers
33
+ self.encoder_opt = torch.optim.Adam(self.encoder.parameters(), lr=lr)
34
+ self.actor_opt = torch.optim.Adam(self.actor.parameters(), lr=lr)
35
+
36
+ # data augmentation
37
+ self.aug = augmentation
38
+
39
+ self.train()
40
+
41
+ def train(self, training=True):
42
+ self.training = training
43
+ self.encoder.train(training)
44
+ self.actor.train(training)
45
+
46
+ def act(self, obs, step, eval_mode):
47
+ obs = torch.as_tensor(obs, device=self.device)
48
+ obs = self.encoder(obs.unsqueeze(0))
49
+ stddev = utils.schedule(self.stddev_schedule, step)
50
+ dist = self.actor(obs, stddev)
51
+ if eval_mode:
52
+ action = dist.mean
53
+ else:
54
+ action = dist.sample(clip=None)
55
+ if step < self.num_expl_steps:
56
+ action.uniform_(-1.0, 1.0)
57
+ return action.cpu().numpy()[0]
58
+
59
+ def update_actor(self, obs, step, behavioural_action=None):
60
+ metrics = dict()
61
+
62
+ stddev = utils.schedule(self.stddev_schedule, step)
63
+ dist = self.actor(obs, stddev)
64
+ action = dist.sample(clip=self.stddev_clip)
65
+ log_prob = dist.log_prob(action).sum(-1, keepdim=True)
66
+
67
+ # offline BC Loss
68
+ actor_loss = F.mse_loss(action, behavioural_action)
69
+
70
+ # optimize actor and encoder
71
+ self.encoder_opt.zero_grad(set_to_none=True)
72
+ self.actor_opt.zero_grad(set_to_none=True)
73
+ actor_loss.backward()
74
+ self.actor_opt.step()
75
+ self.encoder_opt.step()
76
+
77
+ if self.use_tb:
78
+ metrics['actor_logprob'] = log_prob.mean().item()
79
+ metrics['actor_ent'] = dist.entropy().sum(dim=-1).mean().item()
80
+ metrics['actor_bc_loss'] = actor_loss.item()
81
+
82
+ return metrics
83
+
84
+ def update(self, replay_buffer, step):
85
+ metrics = dict()
86
+
87
+ if step % self.update_every_steps != 0:
88
+ return metrics
89
+
90
+ batch = next(replay_buffer)
91
+ obs, action, reward, _, _ = utils.to_torch(
92
+ batch, self.device)
93
+
94
+ # augment
95
+ obs = self.aug(obs.float())
96
+ # encode
97
+ obs = self.encoder(obs)
98
+
99
+ if self.use_tb:
100
+ metrics['batch_reward'] = reward.mean().item()
101
+
102
+ # update actor
103
+ metrics.update(self.update_actor(obs, step, action.detach()))
104
+
105
+ return metrics
drqbc/cfgs/.DS_Store ADDED
Binary file (6.15 kB). View file
 
drqbc/cfgs/algo/bc.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - _self_
3
+
4
+ # experiment
5
+ experiment: bc
6
+
7
+ agent:
8
+ _target_: bcagent.BCAgent
9
+ obs_shape: ??? # to be specified later
10
+ action_shape: ??? # to be specified later
11
+ device: ${device}
12
+ lr: ${lr}
13
+ critic_target_tau: 0.01
14
+ update_every_steps: 2
15
+ use_tb: ${use_tb}
16
+ num_expl_steps: 2000
17
+ hidden_dim: 1024
18
+ feature_dim: ${feature_dim}
19
+ stddev_schedule: ${stddev_schedule}
20
+ stddev_clip: 0.3
drqbc/cfgs/algo/cql.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - _self_
3
+
4
+ # experiment
5
+ experiment: cql
6
+
7
+ agent:
8
+ _target_: cql.CQLAgent
9
+ obs_shape: ??? # to be specified later
10
+ action_shape: ??? # to be specified later
11
+ device: ${device}
12
+ lr: ${lr}
13
+ critic_target_tau: 0.01
14
+ update_every_steps: 2
15
+ use_tb: ${use_tb}
16
+ num_expl_steps: 2000
17
+ hidden_dim: 1024
18
+ feature_dim: ${feature_dim}
19
+ stddev_schedule: ${stddev_schedule}
20
+ stddev_clip: 0.3
21
+ offline: ${offline}
22
+ cql_importance_sample: ${cql_importance_sample}
23
+ temp: ${temp}
24
+ min_q_weight: ${min_q_weight}
25
+ num_random: ${num_random}
26
+ with_lagrange: ${with_lagrange}
27
+ lagrange_thresh: ${lagrange_thresh}
drqbc/cfgs/algo/drqv2.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - _self_
3
+
4
+ # experiment
5
+ experiment: drqv2
6
+
7
+ agent:
8
+ _target_: drqv2.DrQV2Agent
9
+ obs_shape: ??? # to be specified later
10
+ action_shape: ??? # to be specified later
11
+ device: ${device}
12
+ lr: ${lr}
13
+ critic_target_tau: 0.01
14
+ update_every_steps: 2
15
+ use_tb: ${use_tb}
16
+ num_expl_steps: 2000
17
+ hidden_dim: 1024
18
+ feature_dim: ${feature_dim}
19
+ stddev_schedule: ${stddev_schedule}
20
+ stddev_clip: 0.3
21
+ offline: ${offline}
22
+ bc_weight: ${bc_weight}
23
+ use_bc: ${use_bc}
drqbc/cfgs/algo/noaug_bc.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - bc
3
+ - _self_
4
+
5
+ # experiment
6
+ experiment: noaug_bc
7
+
8
+ agent:
9
+ augmentation:
10
+ _target_: drqv2.NoShiftAug
drqbc/cfgs/algo/noaug_drqv2.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - drqv2
3
+ - _self_
4
+
5
+ # experiment
6
+ experiment: noaug_drqv2
7
+
8
+ agent:
9
+ augmentation:
10
+ _target_: drqv2.NoShiftAug
drqbc/cfgs/config.yaml ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - _self_
3
+ - task@_global_: offline_dmc
4
+ - algo@_global_: drqv2
5
+ - override hydra/launcher: submitit_local
6
+
7
+ # task settings
8
+ frame_stack: 3
9
+ action_repeat: 2
10
+ discount: 0.99
11
+ distracting_mode: null
12
+ # train settings
13
+ num_seed_frames: 4000
14
+ # eval
15
+ eval_every_frames: 10000
16
+ num_eval_episodes: 10
17
+ eval_on_distracting: False
18
+ eval_on_multitask: False
19
+ eval_save_vid_every_step: 100000
20
+ # snapshot
21
+ save_snapshot: False
22
+ # replay buffer
23
+ replay_buffer_size: 1_000_000
24
+ replay_buffer_num_workers: 1
25
+ nstep: 3
26
+ batch_size: 256
27
+ # misc
28
+ seed: 1
29
+ device: cuda
30
+ save_video: True
31
+ save_train_video: False
32
+ use_tb: True
33
+ # experiment
34
+ experiment: exp
35
+ # agent
36
+ lr: 1e-4
37
+ feature_dim: 50
38
+ # offline
39
+ offline: False
40
+ bc_weight: 2.5
41
+ use_bc: True
42
+ # cql
43
+ cql_importance_sample: False
44
+ temp: 1.0
45
+ min_q_weight: 1.0
46
+ num_random: 10
47
+ with_lagrange: False
48
+ lagrange_thresh: 0.0
49
+
50
+ hydra:
51
+ run:
52
+ dir: drqv2_data/${experiment}/${task_name}/${now:%Y.%m.%d}/${now:%H%M%S}`
53
+ sweep:
54
+ dir: ./exp/${now:%Y.%m.%d}/${now:%H%M}_${agent_cfg.experiment}
55
+ subdir: ${hydra.job.num}
56
+ launcher:
57
+ timeout_min: 4300
58
+ cpus_per_task: 10
59
+ gpus_per_node: 1
60
+ tasks_per_node: 1
61
+ mem_gb: 160
62
+ nodes: 1
63
+ submitit_folder: ./exp/${now:%Y.%m.%d}/${now:%H%M%S}_${agent_cfg.experiment}/.slurm
drqbc/cfgs/task/offline_dmc.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - _self_
3
+
4
+ num_train_frames: 1100000
5
+ stddev_schedule: 'linear(1.0,0.1,250000)'
6
+ nstep: 1
7
+ show_train_stats_every_frames: 2000
8
+ lr: 3e-4
9
+ replay_buffer_size: 1_000_000
10
+ offline: True
11
+ task_name: offline_walker_walk_random
12
+ offline_dir: ...
drqbc/cfgs/task/online/acrobot_swingup.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ defaults:
2
+ - medium
3
+ - _self_
4
+
5
+ task_name: acrobot_swingup
drqbc/cfgs/task/online/cartpole_balance.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ defaults:
2
+ - easy
3
+ - _self_
4
+
5
+ task_name: cartpole_balance
drqbc/cfgs/task/online/cartpole_balance_sparse.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ defaults:
2
+ - easy
3
+ - _self_
4
+
5
+ task_name: cartpole_balance_sparse
drqbc/cfgs/task/online/cartpole_swingup.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ defaults:
2
+ - easy
3
+ - _self_
4
+
5
+ task_name: cartpole_swingup
drqbc/cfgs/task/online/cartpole_swingup_sparse.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ defaults:
2
+ - medium
3
+ - _self_
4
+
5
+ task_name: cartpole_swingup_sparse
drqbc/cfgs/task/online/cheetah_run.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ defaults:
2
+ - medium
3
+ - _self_
4
+
5
+ task_name: cheetah_run
drqbc/cfgs/task/online/cup_catch.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ defaults:
2
+ - easy
3
+ - _self_
4
+
5
+ task_name: cup_catch
drqbc/cfgs/task/online/easy.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ num_train_frames: 1100000
2
+ stddev_schedule: 'linear(1.0,0.1,100000)'
drqbc/cfgs/task/online/finger_spin.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ defaults:
2
+ - easy
3
+ - _self_
4
+
5
+ task_name: finger_spin
drqbc/cfgs/task/online/finger_turn_easy.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ defaults:
2
+ - medium
3
+ - _self_
4
+
5
+ task_name: finger_turn_easy
drqbc/cfgs/task/online/finger_turn_hard.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ defaults:
2
+ - medium
3
+ - _self_
4
+
5
+ task_name: finger_turn_hard
drqbc/cfgs/task/online/hard.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ num_train_frames: 30100000
2
+ stddev_schedule: 'linear(1.0,0.1,2000000)'
drqbc/cfgs/task/online/hopper_hop.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ defaults:
2
+ - medium
3
+ - _self_
4
+
5
+ task_name: hopper_hop
drqbc/cfgs/task/online/hopper_stand.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ defaults:
2
+ - easy
3
+ - _self_
4
+
5
+ task_name: hopper_stand
drqbc/cfgs/task/online/humanoid_run.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - hard
3
+ - _self_
4
+
5
+ task_name: humanoid_run
6
+ lr: 8e-5
7
+ feature_dim: 100
drqbc/cfgs/task/online/humanoid_stand.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - hard
3
+ - _self_
4
+
5
+ task_name: humanoid_stand
6
+ lr: 8e-5
7
+ feature_dim: 100
drqbc/cfgs/task/online/humanoid_walk.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - hard
3
+ - _self_
4
+
5
+ task_name: humanoid_walk
6
+ lr: 8e-5
7
+ feature_dim: 100
drqbc/cfgs/task/online/medium.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ num_train_frames: 3100000
2
+ stddev_schedule: 'linear(1.0,0.1,500000)'
drqbc/cfgs/task/online/pendulum_swingup.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ defaults:
2
+ - easy
3
+ - _self_
4
+
5
+ task_name: pendulum_swingup
drqbc/cfgs/task/online/quadruped_run.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ defaults:
2
+ - medium
3
+ - _self_
4
+
5
+ task_name: quadruped_run
6
+ replay_buffer_size: 100000
drqbc/cfgs/task/online/quadruped_walk.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ defaults:
2
+ - medium
3
+ - _self_
4
+
5
+ task_name: quadruped_walk
drqbc/cfgs/task/online/reach_duplo.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ defaults:
2
+ - medium
3
+ - _self_
4
+
5
+ task_name: reach_duplo
drqbc/cfgs/task/online/reacher_easy.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ defaults:
2
+ - medium
3
+ - _self_
4
+
5
+ task_name: reacher_easy
drqbc/cfgs/task/online/reacher_hard.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ defaults:
2
+ - medium
3
+ - _self_
4
+
5
+ task_name: reacher_hard
drqbc/cfgs/task/online/walker_run.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - medium
3
+ - _self_
4
+
5
+ task_name: walker_run
6
+ nstep: 1
7
+ batch_size: 512
drqbc/cfgs/task/online/walker_stand.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - easy
3
+ - _self_
4
+
5
+ task_name: walker_stand
6
+ nstep: 1
7
+ batch_size: 512
drqbc/cfgs/task/online/walker_walk.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - easy
3
+ - _self_
4
+
5
+ task_name: walker_walk
6
+ nstep: 1
7
+ batch_size: 512
drqbc/conda_env.yml ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: drqv2
2
+ channels:
3
+ - pytorch
4
+ - nvidia
5
+ - defaults
6
+ dependencies:
7
+ - _libgcc_mutex=0.1
8
+ - _openmp_mutex=4.5
9
+ - absl-py=0.13.0
10
+ - anyio=2.2.0
11
+ - argon2-cffi=20.1.0
12
+ - async_generator=1.10
13
+ - attrs=21.2.0
14
+ - babel=2.9.1
15
+ - backcall=0.2.0
16
+ - blas=1.0
17
+ - bleach=4.1.0
18
+ - blosc=1.21.0
19
+ - brotli=1.0.9
20
+ - brotlipy=0.7.0
21
+ - brunsli=0.1
22
+ - bzip2=1.0.8
23
+ - c-ares=1.18.1
24
+ - ca-certificates=2021.10.26
25
+ - certifi=2021.10.8
26
+ - cffi=1.14.6
27
+ - cfitsio=3.470
28
+ - charls=2.2.0
29
+ - charset-normalizer=2.0.4
30
+ - cloudpickle=2.0.0
31
+ - cryptography=36.0.0
32
+ - cudatoolkit=11.1.74
33
+ - cycler=0.11.0
34
+ - cytoolz=0.11.0
35
+ - dask-core=2021.10.0
36
+ - debugpy=1.5.1
37
+ - defusedxml=0.7.1
38
+ - entrypoints=0.3
39
+ - ffmpeg=4.2.2
40
+ - fonttools=4.25.0
41
+ - freetype=2.11.0
42
+ - fsspec=2021.10.1
43
+ - giflib=5.2.1
44
+ - gmp=6.2.1
45
+ - gnutls=3.6.15
46
+ - idna=3.3
47
+ - imagecodecs=2021.8.26
48
+ - imageio=2.9.0
49
+ - importlib-metadata=4.8.2
50
+ - importlib_metadata=4.8.2
51
+ - intel-openmp=2021.4.0
52
+ - ipykernel=6.4.1
53
+ - ipython=7.29.0
54
+ - ipython_genutils=0.2.0
55
+ - jedi=0.18.0
56
+ - jinja2=3.0.2
57
+ - jpeg=9d
58
+ - json5=0.9.6
59
+ - jsonschema=3.2.0
60
+ - jupyter-packaging=0.7.12
61
+ - jupyter_client=7.1.0
62
+ - jupyter_core=4.9.1
63
+ - jupyter_server=1.4.1
64
+ - jupyterlab=3.0.14
65
+ - jupyterlab_pygments=0.1.2
66
+ - jupyterlab_server=2.10.2
67
+ - jxrlib=1.1
68
+ - kiwisolver=1.3.1
69
+ - krb5=1.19.2
70
+ - lame=3.100
71
+ - lcms2=2.12
72
+ - ld_impl_linux-64=2.35.1
73
+ - lerc=3.0
74
+ - libaec=1.0.4
75
+ - libcurl=7.80.0
76
+ - libdeflate=1.8
77
+ - libedit=3.1.20210910
78
+ - libev=4.33
79
+ - libffi=3.3
80
+ - libgcc-ng=9.3.0
81
+ - libgfortran-ng=7.5.0
82
+ - libgfortran4=7.5.0
83
+ - libgomp=9.3.0
84
+ - libidn2=2.3.2
85
+ - libnghttp2=1.46.0
86
+ - libopus=1.3.1
87
+ - libpng=1.6.37
88
+ - libsodium=1.0.18
89
+ - libssh2=1.9.0
90
+ - libstdcxx-ng=9.3.0
91
+ - libtasn1=4.16.0
92
+ - libtiff=4.2.0
93
+ - libunistring=0.9.10
94
+ - libuv=1.40.0
95
+ - libvpx=1.7.0
96
+ - libwebp=1.2.0
97
+ - libwebp-base=1.2.0
98
+ - libzopfli=1.0.3
99
+ - locket=0.2.1
100
+ - lz4-c=1.9.3
101
+ - markupsafe=2.0.1
102
+ - matplotlib-inline=0.1.2
103
+ - mistune=0.8.4
104
+ - mkl=2020.2
105
+ - mkl-service=2.3.0
106
+ - mkl_fft=1.3.0
107
+ - mkl_random=1.1.1
108
+ - munkres=1.1.4
109
+ - nbclassic=0.2.6
110
+ - nbclient=0.5.3
111
+ - nbconvert=6.3.0
112
+ - nbformat=5.1.3
113
+ - ncurses=6.3
114
+ - nest-asyncio=1.5.1
115
+ - nettle=3.7.3
116
+ - networkx=2.6.3
117
+ - notebook=6.4.6
118
+ - numpy=1.19.2
119
+ - numpy-base=1.19.2
120
+ - olefile=0.46
121
+ - openh264=2.1.1
122
+ - openjpeg=2.4.0
123
+ - openssl=1.1.1m
124
+ - packaging=21.3
125
+ - pandocfilters=1.4.3
126
+ - parso=0.8.3
127
+ - partd=1.2.0
128
+ - pexpect=4.8.0
129
+ - pickleshare=0.7.5
130
+ - pillow=8.4.0
131
+ - pip=21.1.3
132
+ - prometheus_client=0.12.0
133
+ - prompt-toolkit=3.0.20
134
+ - ptyprocess=0.7.0
135
+ - pycparser=2.21
136
+ - pygments=2.10.0
137
+ - pyopenssl=21.0.0
138
+ - pyparsing=2.4.7
139
+ - pyrsistent=0.18.0
140
+ - pysocks=1.7.1
141
+ - python=3.8.12
142
+ - python-dateutil=2.8.2
143
+ - pytorch=1.10.1
144
+ - pytorch-mutex=1.0
145
+ - pytz=2021.3
146
+ - pywavelets=1.1.1
147
+ - pyyaml=6.0
148
+ - pyzmq=22.3.0
149
+ - readline=8.1.2
150
+ - requests=2.27.1
151
+ - scikit-image=0.18.1
152
+ - scipy=1.6.2
153
+ - send2trash=1.8.0
154
+ - setuptools=58.0.4
155
+ - six=1.16.0
156
+ - snappy=1.1.8
157
+ - sniffio=1.2.0
158
+ - sqlite=3.37.0
159
+ - terminado=0.9.4
160
+ - testpath=0.5.0
161
+ - tifffile=2021.7.2
162
+ - tk=8.6.11
163
+ - toolz=0.11.2
164
+ - torchaudio=0.10.1
165
+ - torchvision=0.11.2
166
+ - tornado=6.1
167
+ - traitlets=5.1.1
168
+ - typing_extensions=3.10.0.2
169
+ - urllib3=1.26.7
170
+ - wcwidth=0.2.5
171
+ - webencodings=0.5.1
172
+ - wheel=0.37.1
173
+ - x264=1!157.20191217
174
+ - xz=5.2.5
175
+ - yaml=0.2.5
176
+ - zeromq=4.3.4
177
+ - zfp=0.5.5
178
+ - zipp=3.7.0
179
+ - zlib=1.2.11
180
+ - zstd=1.4.9
181
+ - pip:
182
+ - antlr4-python3-runtime==4.8
183
+ - astunparse==1.6.3
184
+ - cachetools==4.2.4
185
+ - cython==0.29.26
186
+ - decorator==4.4.2
187
+ - dm-control==0.0.403778684
188
+ - dm-env==1.5
189
+ - dm-tree==0.1.6
190
+ - dmc2gym==1.0.0
191
+ - fasteners==0.16.3
192
+ - flatbuffers==2.0
193
+ - future==0.18.2
194
+ - gast==0.4.0
195
+ - glfw==2.5.0
196
+ - google-auth==2.3.3
197
+ - google-auth-oauthlib==0.4.6
198
+ - google-pasta==0.2.0
199
+ - grpcio==1.43.0
200
+ - gym==0.21.0
201
+ - h5py==3.6.0
202
+ - hydra-core==1.1.0
203
+ - hydra-submitit-launcher==1.1.5
204
+ - imageio-ffmpeg==0.4.4
205
+ - importlib-resources==5.4.0
206
+ - ipdb==0.13.9
207
+ - joblib==1.1.0
208
+ - keras==2.7.0
209
+ - keras-preprocessing==1.1.2
210
+ - labmaze==1.0.5
211
+ - libclang==12.0.0
212
+ - lxml==4.7.1
213
+ - markdown==3.3.6
214
+ - matplotlib==3.4.2
215
+ - moviepy==1.0.3
216
+ - mujoco-py==2.1.2.14
217
+ - oauthlib==3.1.1
218
+ - omegaconf==2.1.1
219
+ - opencv-python==4.5.3.56
220
+ - opt-einsum==3.3.0
221
+ - pandas==1.3.0
222
+ - proglog==0.1.9
223
+ - protobuf==3.19.3
224
+ - pyasn1==0.4.8
225
+ - pyasn1-modules==0.2.8
226
+ - pyopengl==3.1.5
227
+ - requests-oauthlib==1.3.0
228
+ - rsa==4.8
229
+ - scikit-learn==1.0.2
230
+ - sklearn==0.0
231
+ - submitit==1.4.1
232
+ - tb-nightly==2.8.0a20220114
233
+ - tensorboard==2.7.0
234
+ - tensorboard-data-server==0.6.1
235
+ - tensorboard-plugin-wit==1.8.1
236
+ - tensorflow==2.7.0
237
+ - tensorflow-estimator==2.7.0
238
+ - tensorflow-io-gcs-filesystem==0.23.1
239
+ - termcolor==1.1.0
240
+ - threadpoolctl==3.0.0
241
+ - toml==0.10.2
242
+ - tqdm==4.62.3
243
+ - werkzeug==2.0.2
244
+ - wrapt==1.13.3
245
+ - yapf==0.31.0