Sean MacAvaney commited on
Commit
888b367
1 Parent(s): 3c59f10

initial commit

Browse files
config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "model_type": "DualEncoder",
3
+ "shared": false,
4
+ "transformers_version": "4.20.1"
5
+ }
doc_encoder/config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation": "relu",
3
+ "architectures": [
4
+ "TransformerMLMSparseEncoder"
5
+ ],
6
+ "doc_quality": "no",
7
+ "model_type": "MLM",
8
+ "norm": "log1p",
9
+ "pool": "max",
10
+ "term_importance": "no",
11
+ "tf_base_model_name_or_dir": "distilbert-base-uncased",
12
+ "torch_dtype": "float32",
13
+ "transformers_version": "4.20.1"
14
+ }
doc_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:594416ea602438fae234406a49c02375901c43f13d59dec6bc15469021b91835
3
+ size 267977455
loss.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e067575663ecbca8629d3870eb9abfb013912e87e50bf69fc1b437f433861a66
3
+ size 559
query_encoder/config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BinaryEncoder"
4
+ ],
5
+ "model_type": "BINARY",
6
+ "scale": 0.3,
7
+ "torch_dtype": "float32",
8
+ "transformers_version": "4.20.1",
9
+ "vocab_size": 30522
10
+ }
query_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0a0da240c2273946ec9b14f901943af502ba16c8488e3f80ed8ac3752b607e4
3
+ size 747
tokenizer/class_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"tokenizer_class": "lsr.tokenizer.HFTokenizer"}
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_lower_case": true,
4
+ "mask_token": "[MASK]",
5
+ "model_max_length": 512,
6
+ "name_or_path": "distilbert-base-uncased",
7
+ "pad_token": "[PAD]",
8
+ "sep_token": "[SEP]",
9
+ "special_tokens_map_file": null,
10
+ "strip_accents": null,
11
+ "tokenize_chinese_chars": true,
12
+ "tokenizer_class": "DistilBertTokenizer",
13
+ "unk_token": "[UNK]"
14
+ }
tokenizer/vocab.txt ADDED
The diff for this file is too large to render. See raw diff