hf-transformers-bot commited on
Commit
a6d5daf
1 Parent(s): 3d59243

Upload tiny models for TapasForMaskedLM

Browse files
config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "agg_temperature": 1.0,
3
+ "aggregation_labels": null,
4
+ "aggregation_loss_importance": 0.8,
5
+ "aggregation_loss_weight": 1.0,
6
+ "aggregation_temperature": 1.0,
7
+ "allow_empty_column_selection": false,
8
+ "answer_loss_cutoff": 100,
9
+ "answer_loss_importance": 0.001,
10
+ "architectures": [
11
+ "TapasForMaskedLM"
12
+ ],
13
+ "attention_probs_dropout_prob": 0.1,
14
+ "average_approximation_function": "ratio",
15
+ "average_logits_per_cell": true,
16
+ "cell_selection_preference": 0.5,
17
+ "disable_per_token_loss": false,
18
+ "hidden_act": "gelu",
19
+ "hidden_dropout_prob": 0.1,
20
+ "hidden_size": 32,
21
+ "huber_loss_delta": 25.0,
22
+ "init_cell_selection_weights_to_zero": true,
23
+ "initializer_range": 0.02,
24
+ "intermediate_size": 37,
25
+ "layer_norm_eps": 1e-12,
26
+ "max_num_columns": 32,
27
+ "max_num_rows": 64,
28
+ "max_position_embeddings": 512,
29
+ "model_type": "tapas",
30
+ "no_aggregation_label_index": null,
31
+ "num_aggregation_labels": 4,
32
+ "num_attention_heads": 4,
33
+ "num_hidden_layers": 5,
34
+ "pad_token_id": 0,
35
+ "positive_label_weight": 10.0,
36
+ "positive_weight": 10.0,
37
+ "reset_position_index_per_cell": true,
38
+ "select_one_column": true,
39
+ "torch_dtype": "float32",
40
+ "transformers_version": "4.28.0.dev0",
41
+ "type_vocab_sizes": [
42
+ 3,
43
+ 256,
44
+ 256,
45
+ 2,
46
+ 256,
47
+ 256,
48
+ 10
49
+ ],
50
+ "use_answer_as_supervision": true,
51
+ "use_gumbel_for_agg": false,
52
+ "use_gumbel_for_aggregation": false,
53
+ "use_gumbel_for_cells": false,
54
+ "use_normalized_answer_loss": false,
55
+ "vocab_size": 30522
56
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1937a7edb4feb18b8adaeb72eefa79880e064bfc92a61248df8b6f8a79441eeb
3
+ size 4398865
special_tokens_map.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "[EMPTY]"
4
+ ],
5
+ "cls_token": "[CLS]",
6
+ "mask_token": "[MASK]",
7
+ "pad_token": "[PAD]",
8
+ "sep_token": "[SEP]",
9
+ "unk_token": "[UNK]"
10
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "[EMPTY]"
4
+ ],
5
+ "cell_trim_length": -1,
6
+ "clean_up_tokenization_spaces": true,
7
+ "cls_token": "[CLS]",
8
+ "do_basic_tokenize": true,
9
+ "do_lower_case": true,
10
+ "drop_rows_to_fit": false,
11
+ "empty_token": "[EMPTY]",
12
+ "mask_token": "[MASK]",
13
+ "max_column_id": null,
14
+ "max_question_length": null,
15
+ "max_row_id": null,
16
+ "min_question_length": null,
17
+ "model_max_length": 512,
18
+ "never_split": null,
19
+ "pad_token": "[PAD]",
20
+ "sep_token": "[SEP]",
21
+ "special_tokens_map_file": "/home/runner/.cache/huggingface/hub/models--google--tapas-base-finetuned-sqa/snapshots/81916d20eef75766aeae71b9487fd615017b0413/special_tokens_map.json",
22
+ "strip_accents": null,
23
+ "strip_column_names": false,
24
+ "tokenize_chinese_chars": true,
25
+ "tokenizer_class": "TapasTokenizer",
26
+ "unk_token": "[UNK]",
27
+ "update_answer_coordinates": false
28
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff