peterdavidfagan
commited on
Commit
•
99a02ec
1
Parent(s):
781346b
6806579bb7a5c2405a51517b817b9a9766b5e524c9ff0844302850a8b050bd9e
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +7 -0
- default/checkpoint +1 -0
- default/metrics._reduction_counter.value/.zarray +1 -0
- default/metrics._reduction_counter.value/0 +0 -0
- default/metrics.loss.count/.zarray +1 -0
- default/metrics.loss.count/0 +0 -0
- default/metrics.loss.total/.zarray +1 -0
- default/metrics.loss.total/0 +0 -0
- default/opt_state.1.0.count/.zarray +1 -0
- default/opt_state.1.0.count/0 +0 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.bias/.zarray +1 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.bias/0.0 +0 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.scale/.zarray +1 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.scale/0.0 +0 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.bias/.zarray +1 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.bias/0.0 +0 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.scale/.zarray +1 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.scale/0.0 +0 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.bias/.zarray +1 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.bias/0.0 +0 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.kernel/.zarray +1 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.kernel/0.0.0 +3 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.bias/.zarray +1 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.bias/0.0 +0 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.kernel/.zarray +1 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.kernel/0.0.0 +3 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.bias/.zarray +1 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.bias/0.0.0 +0 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.kernel/.zarray +1 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.kernel/0.0.0.0 +3 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.bias/.zarray +1 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.bias/0.0 +0 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.kernel/.zarray +1 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.kernel/0.0.0.0 +3 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.bias/.zarray +1 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.bias/0.0.0 +0 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.kernel/.zarray +1 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.kernel/0.0.0.0 +3 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.bias/.zarray +1 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.bias/0.0.0 +0 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.kernel/.zarray +1 -0
- default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.kernel/0.0.0.0 +3 -0
- default/opt_state.1.0.mu.attention_blocks.posembed_input.pos_embedding/.zarray +1 -0
- default/opt_state.1.0.mu.attention_blocks.posembed_input.pos_embedding/0.0.0 +0 -0
- default/opt_state.1.0.mu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_0.bias/.zarray +1 -0
- default/opt_state.1.0.mu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_0.bias/0 +0 -0
- default/opt_state.1.0.mu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_0.kernel/.zarray +1 -0
- default/opt_state.1.0.mu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_0.kernel/0.0 +3 -0
- default/opt_state.1.0.mu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_1.bias/.zarray +1 -0
- default/opt_state.1.0.mu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_1.bias/0 +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.kernel/0.0.0 filter=lfs diff=lfs merge=lfs -text
|
37 |
+
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.kernel/0.0.0 filter=lfs diff=lfs merge=lfs -text
|
38 |
+
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.kernel/0.0.0.0 filter=lfs diff=lfs merge=lfs -text
|
39 |
+
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.kernel/0.0.0.0 filter=lfs diff=lfs merge=lfs -text
|
40 |
+
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.kernel/0.0.0.0 filter=lfs diff=lfs merge=lfs -text
|
41 |
+
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.kernel/0.0.0.0 filter=lfs diff=lfs merge=lfs -text
|
42 |
+
default/opt_state.1.0.mu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_0.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
|
default/checkpoint
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
��categorical_train_step��continuous_train_step��diffusion_train_step��metrics��_reduction_counter��value�.PLACEHOLDER://metrics._reduction_counter.value�loss��count� PLACEHOLDER://metrics.loss.count�total� PLACEHOLDER://metrics.loss.total�opt_state�����count�!PLACEHOLDER://opt_state.1.0.count�mu��attention_blocks��ScanEncoder1DBlock_0��LayerNorm_0��bias�UPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.bias�scale�VPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.scale�LayerNorm_1��bias�UPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.bias�scale�VPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.scale�MLPBlock_0��Dense_0��bias�\PLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.bias�kernel�^PLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.kernel�Dense_1��bias�\PLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.bias�kernel�^PLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.kernel�MultiHeadDotProductAttention_0��key��bias�lPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.bias�kernel�nPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.kernel�out��bias�lPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.bias�kernel�nPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.kernel�query��bias�nPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.bias�kernel�pPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.kernel�value��bias�nPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.bias�kernel�pPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.kernel�posembed_input��pos_embedding�LPLACEHOLDER://opt_state.1.0.mu.attention_blocks.posembed_input.pos_embedding�diffusion_action_head��denoiser��FourierFeatures_0��MLPBlock_0��Dense_0��bias�gPLACEHOLDER://opt_state.1.0.mu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_0.bias�kernel�iPLACEHOLDER://opt_state.1.0.mu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_0.kernel�Dense_1��bias�gPLACEHOLDER://opt_state.1.0.mu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_1.bias�kernel�iPLACEHOLDER://opt_state.1.0.mu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_1.kernel�fourier_kernel�^PLACEHOLDER://opt_state.1.0.mu.diffusion_action_head.denoiser.FourierFeatures_0.fourier_kernel�MLPBlock_0��Dense_0��bias�UPLACEHOLDER://opt_state.1.0.mu.diffusion_action_head.denoiser.MLPBlock_0.Dense_0.bias�kernel�WPLACEHOLDER://opt_state.1.0.mu.diffusion_action_head.denoiser.MLPBlock_0.Dense_0.kernel�Dense_1��bias�UPLACEHOLDER://opt_state.1.0.mu.diffusion_action_head.denoiser.MLPBlock_0.Dense_1.bias�kernel�WPLACEHOLDER://opt_state.1.0.mu.diffusion_action_head.denoiser.MLPBlock_0.Dense_1.kernel�image_encoder��embedding_function��Conv_0��bias�KPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.Conv_0.bias�kernel�MPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.Conv_0.kernel�Conv_1��bias�KPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.Conv_1.bias�kernel�MPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.Conv_1.kernel�Conv_2��bias�KPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.Conv_2.bias�kernel�MPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.Conv_2.kernel�Dense_0��bias�LPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.Dense_0.bias�kernel�NPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.Dense_0.kernel�GroupNorm_0��bias�PPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.GroupNorm_0.bias�scale�QPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.GroupNorm_0.scale�GroupNorm_1��bias�PPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.GroupNorm_1.bias�scale�QPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.GroupNorm_1.scale�image_col_position_embedding��embedding�SPLACEHOLDER://opt_state.1.0.mu.image_encoder.image_col_position_embedding.embedding�image_row_position_embedding��embedding�SPLACEHOLDER://opt_state.1.0.mu.image_encoder.image_row_position_embedding.embedding�readout_encoder��pos_embedding�<PLACEHOLDER://opt_state.1.0.mu.readout_encoder.pos_embedding�text_encoder��model��encoder��block��0��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.q.kernel�relative_attention_bias��embedding�yPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.relative_attention_bias.embedding�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.0.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.0.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.0.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.0.layer.1.layer_norm.weight�1��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.1.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.1.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.1.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.1.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.1.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.1.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.1.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.1.layer.1.layer_norm.weight�10��layer��0��SelfAttention��k��kernel�aPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.10.layer.0.SelfAttention.k.kernel�o��kernel�aPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.10.layer.0.SelfAttention.o.kernel�q��kernel�aPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.10.layer.0.SelfAttention.q.kernel�v��kernel�aPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.10.layer.0.SelfAttention.v.kernel�layer_norm��weight�\PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.10.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�cPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.10.layer.1.DenseReluDense.wi.kernel�wo��kernel�cPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.10.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�\PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.10.layer.1.layer_norm.weight�11��layer��0��SelfAttention��k��kernel�aPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.11.layer.0.SelfAttention.k.kernel�o��kernel�aPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.11.layer.0.SelfAttention.o.kernel�q��kernel�aPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.11.layer.0.SelfAttention.q.kernel�v��kernel�aPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.11.layer.0.SelfAttention.v.kernel�layer_norm��weight�\PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.11.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�cPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.11.layer.1.DenseReluDense.wi.kernel�wo��kernel�cPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.11.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�\PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.11.layer.1.layer_norm.weight�2��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.2.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.2.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.2.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.2.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.2.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.2.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.2.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.2.layer.1.layer_norm.weight�3��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.3.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.3.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.3.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.3.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.3.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.3.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.3.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.3.layer.1.layer_norm.weight�4��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.4.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.4.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.4.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.4.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.4.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.4.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.4.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.4.layer.1.layer_norm.weight�5��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.5.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.5.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.5.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.5.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.5.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.5.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.5.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.5.layer.1.layer_norm.weight�6��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.6.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.6.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.6.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.6.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.6.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.6.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.6.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.6.layer.1.layer_norm.weight�7��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.7.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.7.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.7.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.7.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.7.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.7.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.7.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.7.layer.1.layer_norm.weight�8��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.8.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.8.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.8.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.8.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.8.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.8.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.8.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.8.layer.1.layer_norm.weight�9��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.9.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.9.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.9.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.9.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.9.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.9.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.9.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.9.layer.1.layer_norm.weight�final_layer_norm��weight�QPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.final_layer_norm.weight�shared��embedding�BPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.shared.embedding�nu��attention_blocks��ScanEncoder1DBlock_0��LayerNorm_0��bias�UPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.bias�scale�VPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.scale�LayerNorm_1��bias�UPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.bias�scale�VPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.scale�MLPBlock_0��Dense_0��bias�\PLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.bias�kernel�^PLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.kernel�Dense_1��bias�\PLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.bias�kernel�^PLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.kernel�MultiHeadDotProductAttention_0��key��bias�lPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.bias�kernel�nPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.kernel�out��bias�lPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.bias�kernel�nPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.kernel�query��bias�nPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.bias�kernel�pPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.kernel�value��bias�nPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.bias�kernel�pPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.kernel�posembed_input��pos_embedding�LPLACEHOLDER://opt_state.1.0.nu.attention_blocks.posembed_input.pos_embedding�diffusion_action_head��denoiser��FourierFeatures_0��MLPBlock_0��Dense_0��bias�gPLACEHOLDER://opt_state.1.0.nu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_0.bias�kernel�iPLACEHOLDER://opt_state.1.0.nu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_0.kernel�Dense_1��bias�gPLACEHOLDER://opt_state.1.0.nu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_1.bias�kernel�iPLACEHOLDER://opt_state.1.0.nu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_1.kernel�fourier_kernel�^PLACEHOLDER://opt_state.1.0.nu.diffusion_action_head.denoiser.FourierFeatures_0.fourier_kernel�MLPBlock_0��Dense_0��bias�UPLACEHOLDER://opt_state.1.0.nu.diffusion_action_head.denoiser.MLPBlock_0.Dense_0.bias�kernel�WPLACEHOLDER://opt_state.1.0.nu.diffusion_action_head.denoiser.MLPBlock_0.Dense_0.kernel�Dense_1��bias�UPLACEHOLDER://opt_state.1.0.nu.diffusion_action_head.denoiser.MLPBlock_0.Dense_1.bias�kernel�WPLACEHOLDER://opt_state.1.0.nu.diffusion_action_head.denoiser.MLPBlock_0.Dense_1.kernel�image_encoder��embedding_function��Conv_0��bias�KPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.Conv_0.bias�kernel�MPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.Conv_0.kernel�Conv_1��bias�KPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.Conv_1.bias�kernel�MPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.Conv_1.kernel�Conv_2��bias�KPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.Conv_2.bias�kernel�MPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.Conv_2.kernel�Dense_0��bias�LPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.Dense_0.bias�kernel�NPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.Dense_0.kernel�GroupNorm_0��bias�PPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.GroupNorm_0.bias�scale�QPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.GroupNorm_0.scale�GroupNorm_1��bias�PPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.GroupNorm_1.bias�scale�QPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.GroupNorm_1.scale�image_col_position_embedding��embedding�SPLACEHOLDER://opt_state.1.0.nu.image_encoder.image_col_position_embedding.embedding�image_row_position_embedding��embedding�SPLACEHOLDER://opt_state.1.0.nu.image_encoder.image_row_position_embedding.embedding�readout_encoder��pos_embedding�<PLACEHOLDER://opt_state.1.0.nu.readout_encoder.pos_embedding�text_encoder��model��encoder��block��0��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.q.kernel�relative_attention_bias��embedding�yPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.relative_attention_bias.embedding�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.0.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.0.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.0.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.0.layer.1.layer_norm.weight�1��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.1.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.1.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.1.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.1.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.1.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.1.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.1.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.1.layer.1.layer_norm.weight�10��layer��0��SelfAttention��k��kernel�aPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.10.layer.0.SelfAttention.k.kernel�o��kernel�aPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.10.layer.0.SelfAttention.o.kernel�q��kernel�aPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.10.layer.0.SelfAttention.q.kernel�v��kernel�aPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.10.layer.0.SelfAttention.v.kernel�layer_norm��weight�\PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.10.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�cPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.10.layer.1.DenseReluDense.wi.kernel�wo��kernel�cPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.10.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�\PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.10.layer.1.layer_norm.weight�11��layer��0��SelfAttention��k��kernel�aPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.11.layer.0.SelfAttention.k.kernel�o��kernel�aPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.11.layer.0.SelfAttention.o.kernel�q��kernel�aPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.11.layer.0.SelfAttention.q.kernel�v��kernel�aPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.11.layer.0.SelfAttention.v.kernel�layer_norm��weight�\PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.11.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�cPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.11.layer.1.DenseReluDense.wi.kernel�wo��kernel�cPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.11.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�\PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.11.layer.1.layer_norm.weight�2��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.2.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.2.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.2.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.2.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.2.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.2.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.2.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.2.layer.1.layer_norm.weight�3��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.3.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.3.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.3.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.3.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.3.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.3.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.3.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.3.layer.1.layer_norm.weight�4��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.4.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.4.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.4.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.4.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.4.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.4.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.4.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.4.layer.1.layer_norm.weight�5��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.5.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.5.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.5.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.5.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.5.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.5.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.5.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.5.layer.1.layer_norm.weight�6��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.6.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.6.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.6.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.6.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.6.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.6.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.6.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.6.layer.1.layer_norm.weight�7��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.7.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.7.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.7.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.7.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.7.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.7.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.7.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.7.layer.1.layer_norm.weight�8��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.8.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.8.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.8.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.8.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.8.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.8.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.8.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.8.layer.1.layer_norm.weight�9��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.9.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.9.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.9.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.9.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.9.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.9.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.9.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.9.layer.1.layer_norm.weight�final_layer_norm��weight�QPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.final_layer_norm.weight�shared��embedding�BPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.shared.embedding���count�!PLACEHOLDER://opt_state.1.2.count�params��attention_blocks��ScanEncoder1DBlock_0��LayerNorm_0��bias�KPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.bias�scale�LPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.scale�LayerNorm_1��bias�KPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.bias�scale�LPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.scale�MLPBlock_0��Dense_0��bias�RPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.bias�kernel�TPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.kernel�Dense_1��bias�RPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.bias�kernel�TPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.kernel�MultiHeadDotProductAttention_0��key��bias�bPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.bias�kernel�dPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.kernel�out��bias�bPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.bias�kernel�dPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.kernel�query��bias�dPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.bias�kernel�fPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.kernel�value��bias�dPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.bias�kernel�fPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.kernel�posembed_input��pos_embedding�BPLACEHOLDER://params.attention_blocks.posembed_input.pos_embedding�diffusion_action_head��denoiser��FourierFeatures_0��MLPBlock_0��Dense_0��bias�]PLACEHOLDER://params.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_0.bias�kernel�_PLACEHOLDER://params.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_0.kernel�Dense_1��bias�]PLACEHOLDER://params.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_1.bias�kernel�_PLACEHOLDER://params.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_1.kernel�fourier_kernel�TPLACEHOLDER://params.diffusion_action_head.denoiser.FourierFeatures_0.fourier_kernel�MLPBlock_0��Dense_0��bias�KPLACEHOLDER://params.diffusion_action_head.denoiser.MLPBlock_0.Dense_0.bias�kernel�MPLACEHOLDER://params.diffusion_action_head.denoiser.MLPBlock_0.Dense_0.kernel�Dense_1��bias�KPLACEHOLDER://params.diffusion_action_head.denoiser.MLPBlock_0.Dense_1.bias�kernel�MPLACEHOLDER://params.diffusion_action_head.denoiser.MLPBlock_0.Dense_1.kernel�image_encoder��embedding_function��Conv_0��bias�APLACEHOLDER://params.image_encoder.embedding_function.Conv_0.bias�kernel�CPLACEHOLDER://params.image_encoder.embedding_function.Conv_0.kernel�Conv_1��bias�APLACEHOLDER://params.image_encoder.embedding_function.Conv_1.bias�kernel�CPLACEHOLDER://params.image_encoder.embedding_function.Conv_1.kernel�Conv_2��bias�APLACEHOLDER://params.image_encoder.embedding_function.Conv_2.bias�kernel�CPLACEHOLDER://params.image_encoder.embedding_function.Conv_2.kernel�Dense_0��bias�BPLACEHOLDER://params.image_encoder.embedding_function.Dense_0.bias�kernel�DPLACEHOLDER://params.image_encoder.embedding_function.Dense_0.kernel�GroupNorm_0��bias�FPLACEHOLDER://params.image_encoder.embedding_function.GroupNorm_0.bias�scale�GPLACEHOLDER://params.image_encoder.embedding_function.GroupNorm_0.scale�GroupNorm_1��bias�FPLACEHOLDER://params.image_encoder.embedding_function.GroupNorm_1.bias�scale�GPLACEHOLDER://params.image_encoder.embedding_function.GroupNorm_1.scale�image_col_position_embedding��embedding�IPLACEHOLDER://params.image_encoder.image_col_position_embedding.embedding�image_row_position_embedding��embedding�IPLACEHOLDER://params.image_encoder.image_row_position_embedding.embedding�readout_encoder��pos_embedding�2PLACEHOLDER://params.readout_encoder.pos_embedding�text_encoder��model��encoder��block��0��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.0.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.0.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.0.layer.0.SelfAttention.q.kernel�relative_attention_bias��embedding�oPLACEHOLDER://params.text_encoder.model.encoder.block.0.layer.0.SelfAttention.relative_attention_bias.embedding�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.0.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.0.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.0.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.0.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.0.layer.1.layer_norm.weight�1��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.1.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.1.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.1.layer.0.SelfAttention.q.kernel�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.1.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.1.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.1.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.1.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.1.layer.1.layer_norm.weight�10��layer��0��SelfAttention��k��kernel�WPLACEHOLDER://params.text_encoder.model.encoder.block.10.layer.0.SelfAttention.k.kernel�o��kernel�WPLACEHOLDER://params.text_encoder.model.encoder.block.10.layer.0.SelfAttention.o.kernel�q��kernel�WPLACEHOLDER://params.text_encoder.model.encoder.block.10.layer.0.SelfAttention.q.kernel�v��kernel�WPLACEHOLDER://params.text_encoder.model.encoder.block.10.layer.0.SelfAttention.v.kernel�layer_norm��weight�RPLACEHOLDER://params.text_encoder.model.encoder.block.10.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�YPLACEHOLDER://params.text_encoder.model.encoder.block.10.layer.1.DenseReluDense.wi.kernel�wo��kernel�YPLACEHOLDER://params.text_encoder.model.encoder.block.10.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�RPLACEHOLDER://params.text_encoder.model.encoder.block.10.layer.1.layer_norm.weight�11��layer��0��SelfAttention��k��kernel�WPLACEHOLDER://params.text_encoder.model.encoder.block.11.layer.0.SelfAttention.k.kernel�o��kernel�WPLACEHOLDER://params.text_encoder.model.encoder.block.11.layer.0.SelfAttention.o.kernel�q��kernel�WPLACEHOLDER://params.text_encoder.model.encoder.block.11.layer.0.SelfAttention.q.kernel�v��kernel�WPLACEHOLDER://params.text_encoder.model.encoder.block.11.layer.0.SelfAttention.v.kernel�layer_norm��weight�RPLACEHOLDER://params.text_encoder.model.encoder.block.11.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�YPLACEHOLDER://params.text_encoder.model.encoder.block.11.layer.1.DenseReluDense.wi.kernel�wo��kernel�YPLACEHOLDER://params.text_encoder.model.encoder.block.11.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�RPLACEHOLDER://params.text_encoder.model.encoder.block.11.layer.1.layer_norm.weight�2��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.2.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.2.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.2.layer.0.SelfAttention.q.kernel�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.2.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.2.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.2.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.2.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.2.layer.1.layer_norm.weight�3��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.3.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.3.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.3.layer.0.SelfAttention.q.kernel�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.3.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.3.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.3.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.3.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.3.layer.1.layer_norm.weight�4��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.4.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.4.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.4.layer.0.SelfAttention.q.kernel�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.4.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.4.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.4.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.4.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.4.layer.1.layer_norm.weight�5��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.5.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.5.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.5.layer.0.SelfAttention.q.kernel�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.5.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.5.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.5.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.5.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.5.layer.1.layer_norm.weight�6��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.6.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.6.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.6.layer.0.SelfAttention.q.kernel�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.6.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.6.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.6.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.6.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.6.layer.1.layer_norm.weight�7��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.7.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.7.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.7.layer.0.SelfAttention.q.kernel�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.7.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.7.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.7.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.7.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.7.layer.1.layer_norm.weight�8��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.8.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.8.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.8.layer.0.SelfAttention.q.kernel�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.8.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.8.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.8.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.8.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.8.layer.1.layer_norm.weight�9��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.9.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.9.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.9.layer.0.SelfAttention.q.kernel�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.9.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.9.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.9.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.9.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.9.layer.1.layer_norm.weight�final_layer_norm��weight�GPLACEHOLDER://params.text_encoder.model.encoder.final_layer_norm.weight�shared��embedding�8PLACEHOLDER://params.text_encoder.model.shared.embedding�rngs��diffusion�PLACEHOLDER://rngs.diffusion�dropout�PLACEHOLDER://rngs.dropout�params�PLACEHOLDER://rngs.params�patch_encoding�!PLACEHOLDER://rngs.patch_encoding�step�PLACEHOLDER://step�text_tokenize_fn�
|
default/metrics._reduction_counter.value/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<i4","fill_value":null,"filters":null,"order":"C","shape":[],"zarr_format":2}
|
default/metrics._reduction_counter.value/0
ADDED
Binary file (13 Bytes). View file
|
|
default/metrics.loss.count/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<i4","fill_value":null,"filters":null,"order":"C","shape":[],"zarr_format":2}
|
default/metrics.loss.count/0
ADDED
Binary file (13 Bytes). View file
|
|
default/metrics.loss.total/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[],"zarr_format":2}
|
default/metrics.loss.total/0
ADDED
Binary file (13 Bytes). View file
|
|
default/opt_state.1.0.count/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<i4","fill_value":null,"filters":null,"order":"C","shape":[],"zarr_format":2}
|
default/opt_state.1.0.count/0
ADDED
Binary file (13 Bytes). View file
|
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.bias/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[12,768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768],"zarr_format":2}
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.bias/0.0
ADDED
Binary file (34 kB). View file
|
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.scale/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[12,768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768],"zarr_format":2}
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.scale/0.0
ADDED
Binary file (34.2 kB). View file
|
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.bias/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[12,768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768],"zarr_format":2}
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.bias/0.0
ADDED
Binary file (34.1 kB). View file
|
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.scale/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[12,768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768],"zarr_format":2}
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.scale/0.0
ADDED
Binary file (34.2 kB). View file
|
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.bias/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[12,3072],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,3072],"zarr_format":2}
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.bias/0.0
ADDED
Binary file (137 kB). View file
|
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.kernel/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[12,768,3072],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768,3072],"zarr_format":2}
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.kernel/0.0.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:86ffdc45380d142cf8d87e82f2ebb29c1a77dd7887516a9578a710f3f409457d
|
3 |
+
size 105055877
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.bias/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[12,768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768],"zarr_format":2}
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.bias/0.0
ADDED
Binary file (21.6 kB). View file
|
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.kernel/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[12,3072,768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,3072,768],"zarr_format":2}
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.kernel/0.0.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:127008d9f9105febae1049f5324d72409e37de66c12e20fb35e722e2e65d018c
|
3 |
+
size 104961220
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.bias/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[12,12,64],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,12,64],"zarr_format":2}
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.bias/0.0.0
ADDED
Binary file (34.3 kB). View file
|
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.kernel/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[12,768,12,64],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768,12,64],"zarr_format":2}
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.kernel/0.0.0.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:811f48250470976ba0901a846ba060f84ce840532b0fcc0580a572f257be7921
|
3 |
+
size 26211098
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.bias/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[12,768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768],"zarr_format":2}
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.bias/0.0
ADDED
Binary file (22.1 kB). View file
|
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.kernel/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[12,12,64,768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,12,64,768],"zarr_format":2}
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.kernel/0.0.0.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c4ade1ab9c4a526959045601b815e205bc728fa9e725b903b92f2bdf71aac57d
|
3 |
+
size 26202498
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.bias/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[12,12,64],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,12,64],"zarr_format":2}
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.bias/0.0.0
ADDED
Binary file (34.1 kB). View file
|
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.kernel/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[12,768,12,64],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768,12,64],"zarr_format":2}
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.kernel/0.0.0.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4fa8b7babb7ea4ec29f711450c1500a603957b13804ebabc5ab16de228201c22
|
3 |
+
size 26215069
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.bias/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[12,12,64],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,12,64],"zarr_format":2}
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.bias/0.0.0
ADDED
Binary file (34 kB). View file
|
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.kernel/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[12,768,12,64],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768,12,64],"zarr_format":2}
|
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.kernel/0.0.0.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a97a82bde815191a7881d70787854ed98bc67d8f41352bc940dd7389db309fe4
|
3 |
+
size 26178396
|
default/opt_state.1.0.mu.attention_blocks.posembed_input.pos_embedding/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[1,74,768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[1,74,768],"zarr_format":2}
|
default/opt_state.1.0.mu.attention_blocks.posembed_input.pos_embedding/0.0.0
ADDED
Binary file (211 kB). View file
|
|
default/opt_state.1.0.mu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_0.bias/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[768],"zarr_format":2}
|
default/opt_state.1.0.mu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_0.bias/0
ADDED
Binary file (2.89 kB). View file
|
|
default/opt_state.1.0.mu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_0.kernel/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[768,768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[768,768],"zarr_format":2}
|
default/opt_state.1.0.mu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_0.kernel/0.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7b37577301e2c3e517d8ac6b5396d98dab90c876ab26090585c20879ca86c79b
|
3 |
+
size 2183388
|
default/opt_state.1.0.mu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_1.bias/.zarray
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"chunks":[768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[768],"zarr_format":2}
|
default/opt_state.1.0.mu.diffusion_action_head.denoiser.FourierFeatures_0.MLPBlock_0.Dense_1.bias/0
ADDED
Binary file (2.89 kB). View file
|
|