rm_ptm.infolm-l_p.1 / hparams.yaml
zwhe99's picture
Upload hparams.yaml with huggingface_hub
ac07c01 verified
raw
history blame contribute delete
773 Bytes
activations: Tanh
batch_size: 4
class_identifier: unified_metric
cross_entropy_weights: null
dropout: 0.1
encoder_learning_rate: 1.0e-06
encoder_model: XLM-RoBERTa
error_labels:
- minor
- major
final_activation: null
hidden_sizes:
- 3072
- 1024
input_segments:
- mt
- src
- ref
keep_embeddings_frozen: true
layer: mix
layer_norm: false
layer_transformation: sparsemax
layerwise_decay: 0.95
learning_rate: 1.5e-05
load_pretrained_weights: true
loss: mse
loss_lambda: 0.65
nr_frozen_epochs: 0.3
optimizer: AdamW
pool: avg
pretrained_model: microsoft/infoxlm-large
sent_layer: mix
train_data:
- data/wmt-human-eval/reward_dataset.csv
validation_data: 0.1
warmup_steps: 0
word_layer: 24
word_level_training: false
min_zscore: -1.8359497445108464
max_zscore: 0.8327298628095561