blackcodetavern
commited on
Commit
•
f13af09
1
Parent(s):
f05a97b
initial commit
Browse files- LICENSE +22 -0
- README.md +27 -0
- config.json +24 -0
- model.onnx +3 -0
- special_tokens_map.json +7 -0
- tokenizer.json +0 -0
- tokenizer_config.json +16 -0
- vocab.txt +0 -0
LICENSE
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 Philip May, Deutsche Telekom AG
|
4 |
+
Copyright (c) 2022 deepset GmbH
|
5 |
+
|
6 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
+
of this software and associated documentation files (the "Software"), to deal
|
8 |
+
in the Software without restriction, including without limitation the rights
|
9 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
+
copies of the Software, and to permit persons to whom the Software is
|
11 |
+
furnished to do so, subject to the following conditions:
|
12 |
+
|
13 |
+
The above copyright notice and this permission notice shall be included in all
|
14 |
+
copies or substantial portions of the Software.
|
15 |
+
|
16 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
19 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
22 |
+
SOFTWARE.
|
README.md
CHANGED
@@ -1,3 +1,30 @@
|
|
1 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
license: mit
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
pipeline_tag: sentence-similarity
|
3 |
+
language:
|
4 |
+
- de
|
5 |
+
tags:
|
6 |
+
- sentence-transformers
|
7 |
+
- sentence-similarity
|
8 |
+
- transformers
|
9 |
+
- setfit
|
10 |
+
- onnx
|
11 |
license: mit
|
12 |
+
datasets:
|
13 |
+
- deutsche-telekom/ger-backtrans-paraphrase
|
14 |
---
|
15 |
+
|
16 |
+
# German BERT large paraphrase cosine
|
17 |
+
This is a [sentence-transformers](https://www.SBERT.net) model.
|
18 |
+
It maps sentences & paragraphs (text) into a 1024 dimensional dense vector space.
|
19 |
+
The model is intended to be used together with [SetFit](https://github.com/huggingface/setfit)
|
20 |
+
to improve German few-shot text classification.
|
21 |
+
|
22 |
+
This is the ONNX-Version of [deutsche-telekom/gbert-large-paraphrase-cosine](https://huggingface.co/deutsche-telekom/gbert-large-paraphrase-cosine)
|
23 |
+
|
24 |
+
|
25 |
+
## Licensing
|
26 |
+
Copyright (c) 2023 [Philip May](https://may.la/), [Deutsche Telekom AG](https://www.telekom.com/)\
|
27 |
+
Copyright (c) 2022 [deepset GmbH](https://www.deepset.ai/)
|
28 |
+
Licensed under the **MIT License** (the "License"); you may not use this file except in compliance with the License.
|
29 |
+
You may obtain a copy of the License by reviewing the file
|
30 |
+
[LICENSE](https://huggingface.co/deutsche-telekom/gbert-large-paraphrase-cosine/blob/main/LICENSE) in the repository.
|
config.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "deutsche-telekom/gbert-large-paraphrase-cosine",
|
3 |
+
"architectures": [
|
4 |
+
"BertModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"hidden_act": "gelu",
|
9 |
+
"hidden_dropout_prob": 0.1,
|
10 |
+
"hidden_size": 1024,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"intermediate_size": 4096,
|
13 |
+
"layer_norm_eps": 1e-12,
|
14 |
+
"max_position_embeddings": 512,
|
15 |
+
"model_type": "bert",
|
16 |
+
"num_attention_heads": 16,
|
17 |
+
"num_hidden_layers": 24,
|
18 |
+
"pad_token_id": 0,
|
19 |
+
"position_embedding_type": "absolute",
|
20 |
+
"transformers_version": "4.31.0",
|
21 |
+
"type_vocab_size": 2,
|
22 |
+
"use_cache": true,
|
23 |
+
"vocab_size": 31102
|
24 |
+
}
|
model.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a2e5ec2e21d9665a52c6fc5add7e23116cbc2253acb86de96ebc9e5bde698a26
|
3 |
+
size 1339229961
|
special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"clean_up_tokenization_spaces": true,
|
3 |
+
"cls_token": "[CLS]",
|
4 |
+
"do_basic_tokenize": true,
|
5 |
+
"do_lower_case": false,
|
6 |
+
"mask_token": "[MASK]",
|
7 |
+
"max_len": 512,
|
8 |
+
"model_max_length": 512,
|
9 |
+
"never_split": null,
|
10 |
+
"pad_token": "[PAD]",
|
11 |
+
"sep_token": "[SEP]",
|
12 |
+
"strip_accents": false,
|
13 |
+
"tokenize_chinese_chars": true,
|
14 |
+
"tokenizer_class": "BertTokenizer",
|
15 |
+
"unk_token": "[UNK]"
|
16 |
+
}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|