File size: 142,173 Bytes
25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 78cc426 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 3c4dae4 25d3738 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 |
---
language:
- en
library_name: sentence-transformers
tags:
- sentence-transformers
- sentence-similarity
- feature-extraction
- generated_from_trainer
- dataset_size:78183
- loss:AdaptiveLayerLoss
- loss:CoSENTLoss
- loss:GISTEmbedLoss
- loss:OnlineContrastiveLoss
- loss:MultipleNegativesSymmetricRankingLoss
base_model: microsoft/deberta-v3-small
datasets:
- sentence-transformers/all-nli
- sentence-transformers/stsb
- tals/vitaminc
- nyu-mll/glue
- allenai/scitail
- sentence-transformers/xsum
- sentence-transformers/sentence-compression
- allenai/sciq
- allenai/qasc
- allenai/openbookqa
- sentence-transformers/msmarco-msmarco-distilbert-base-v3
- sentence-transformers/natural-questions
- sentence-transformers/trivia-qa
- sentence-transformers/quora-duplicates
- sentence-transformers/gooaq
metrics:
- pearson_cosine
- spearman_cosine
- pearson_manhattan
- spearman_manhattan
- pearson_euclidean
- spearman_euclidean
- pearson_dot
- spearman_dot
- pearson_max
- spearman_max
widget:
- source_sentence: The X and Y chromosomes in human beings that determine the sex
of an individual.
sentences:
- A glacier leaves behind bare rock when it retreats.
- Prokaryotes are unicellular organisms that lack organelles surrounded by membranes.
- Mammalian sex determination is determined genetically by the presence of chromosomes
identified by the letters x and y.
- source_sentence: Police officer with riot shield stands in front of crowd.
sentences:
- A police officer stands in front of a crowd.
- A pair of people play video games together on a couch.
- People are outside digging a hole.
- source_sentence: A young girl sitting on a white comforter on a bed covered with
clothing, holding a yellow stuffed duck.
sentences:
- A man standing in a room is pointing up.
- A Little girl is enjoying cake outside.
- A yellow duck being held by a girl.
- source_sentence: A teenage girl in winter clothes slides down a decline in a red
sled.
sentences:
- A woman preparing vegetables.
- A girl is sliding on a red sled.
- A person is on a beach.
- source_sentence: How many hymns of Luther were included in the Achtliederbuch?
sentences:
- the ABC News building was renamed Peter Jennings Way in 2006 in honor of the recently
deceased longtime ABC News chief anchor and anchor of World News Tonight.
- In early 2009, Disney–ABC Television Group merged ABC Entertainment and ABC Studios
into a new division, ABC Entertainment Group, which would be responsible for both
its production and broadcasting operations.
- Luther's hymns were included in early Lutheran hymnals and spread the ideas of
the Reformation.
pipeline_tag: sentence-similarity
model-index:
- name: SentenceTransformer based on microsoft/deberta-v3-small
results:
- task:
type: semantic-similarity
name: Semantic Similarity
dataset:
name: sts test
type: sts-test
metrics:
- type: pearson_cosine
value: 0.566653720937157
name: Pearson Cosine
- type: spearman_cosine
value: 0.5551442914704277
name: Spearman Cosine
- type: pearson_manhattan
value: 0.5771354814213894
name: Pearson Manhattan
- type: spearman_manhattan
value: 0.5723970841918167
name: Spearman Manhattan
- type: pearson_euclidean
value: 0.5619024776733639
name: Pearson Euclidean
- type: spearman_euclidean
value: 0.5593253322063549
name: Spearman Euclidean
- type: pearson_dot
value: 0.23527108587659004
name: Pearson Dot
- type: spearman_dot
value: 0.24219982461742934
name: Spearman Dot
- type: pearson_max
value: 0.5771354814213894
name: Pearson Max
- type: spearman_max
value: 0.5723970841918167
name: Spearman Max
- type: pearson_cosine
value: 0.566653720937157
name: Pearson Cosine
- type: spearman_cosine
value: 0.5551442914704277
name: Spearman Cosine
- type: pearson_manhattan
value: 0.5771354814213894
name: Pearson Manhattan
- type: spearman_manhattan
value: 0.5723970841918167
name: Spearman Manhattan
- type: pearson_euclidean
value: 0.5619024776733639
name: Pearson Euclidean
- type: spearman_euclidean
value: 0.5593253322063549
name: Spearman Euclidean
- type: pearson_dot
value: 0.23527108587659004
name: Pearson Dot
- type: spearman_dot
value: 0.24219982461742934
name: Spearman Dot
- type: pearson_max
value: 0.5771354814213894
name: Pearson Max
- type: spearman_max
value: 0.5723970841918167
name: Spearman Max
- type: pearson_cosine
value: 0.566653720937157
name: Pearson Cosine
- type: spearman_cosine
value: 0.5551442914704277
name: Spearman Cosine
- type: pearson_manhattan
value: 0.5771354814213894
name: Pearson Manhattan
- type: spearman_manhattan
value: 0.5723970841918167
name: Spearman Manhattan
- type: pearson_euclidean
value: 0.5619024776733639
name: Pearson Euclidean
- type: spearman_euclidean
value: 0.5593253322063549
name: Spearman Euclidean
- type: pearson_dot
value: 0.23527108587659004
name: Pearson Dot
- type: spearman_dot
value: 0.24219982461742934
name: Spearman Dot
- type: pearson_max
value: 0.5771354814213894
name: Pearson Max
- type: spearman_max
value: 0.5723970841918167
name: Spearman Max
- type: pearson_cosine
value: 0.566653720937157
name: Pearson Cosine
- type: spearman_cosine
value: 0.5551442914704277
name: Spearman Cosine
- type: pearson_manhattan
value: 0.5771354814213894
name: Pearson Manhattan
- type: spearman_manhattan
value: 0.5723970841918167
name: Spearman Manhattan
- type: pearson_euclidean
value: 0.5619024776733639
name: Pearson Euclidean
- type: spearman_euclidean
value: 0.5593253322063549
name: Spearman Euclidean
- type: pearson_dot
value: 0.23527108587659004
name: Pearson Dot
- type: spearman_dot
value: 0.24219982461742934
name: Spearman Dot
- type: pearson_max
value: 0.5771354814213894
name: Pearson Max
- type: spearman_max
value: 0.5723970841918167
name: Spearman Max
---
# SentenceTransformer based on microsoft/deberta-v3-small
This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [microsoft/deberta-v3-small](https://huggingface.co/microsoft/deberta-v3-small) on the [nli-pairs](https://huggingface.co/datasets/sentence-transformers/all-nli), [sts-label](https://huggingface.co/datasets/sentence-transformers/stsb), [vitaminc-pairs](https://huggingface.co/datasets/tals/vitaminc), [qnli-contrastive](https://huggingface.co/datasets/nyu-mll/glue), [scitail-pairs-qa](https://huggingface.co/datasets/allenai/scitail), [scitail-pairs-pos](https://huggingface.co/datasets/allenai/scitail), [xsum-pairs](https://huggingface.co/datasets/sentence-transformers/xsum), [compression-pairs](https://huggingface.co/datasets/sentence-transformers/sentence-compression), [sciq_pairs](https://huggingface.co/datasets/allenai/sciq), [qasc_pairs](https://huggingface.co/datasets/allenai/qasc), [openbookqa_pairs](https://huggingface.co/datasets/allenai/openbookqa), [msmarco_pairs](https://huggingface.co/datasets/sentence-transformers/msmarco-msmarco-distilbert-base-v3), [nq_pairs](https://huggingface.co/datasets/sentence-transformers/natural-questions), [trivia_pairs](https://huggingface.co/datasets/sentence-transformers/trivia-qa), [quora_pairs](https://huggingface.co/datasets/sentence-transformers/quora-duplicates) and [gooaq_pairs](https://huggingface.co/datasets/sentence-transformers/gooaq) datasets. It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. \n\n train_loss = AdaptiveLayerLoss(model=model, loss=train_loss, n_layers_per_step = -1, last_layer_weight = 1.5, prior_layers_weight= 0.15, kl_div_weight = 2, kl_temperature= 2,) num_epochs = 4, learning_rate = 2e-5, warmup_ratio=0.25, weight_decay = 5e-7, schedule = "cosine_with_restarts", num_cycles = 5
## Model Details
### Model Description
- **Model Type:** Sentence Transformer
- **Base model:** [microsoft/deberta-v3-small](https://huggingface.co/microsoft/deberta-v3-small) <!-- at revision a36c739020e01763fe789b4b85e2df55d6180012 -->
- **Maximum Sequence Length:** 512 tokens
- **Output Dimensionality:** 768 tokens
- **Similarity Function:** Cosine Similarity
- **Training Datasets:**
- [nli-pairs](https://huggingface.co/datasets/sentence-transformers/all-nli)
- [sts-label](https://huggingface.co/datasets/sentence-transformers/stsb)
- [vitaminc-pairs](https://huggingface.co/datasets/tals/vitaminc)
- [qnli-contrastive](https://huggingface.co/datasets/nyu-mll/glue)
- [scitail-pairs-qa](https://huggingface.co/datasets/allenai/scitail)
- [scitail-pairs-pos](https://huggingface.co/datasets/allenai/scitail)
- [xsum-pairs](https://huggingface.co/datasets/sentence-transformers/xsum)
- [compression-pairs](https://huggingface.co/datasets/sentence-transformers/sentence-compression)
- [sciq_pairs](https://huggingface.co/datasets/allenai/sciq)
- [qasc_pairs](https://huggingface.co/datasets/allenai/qasc)
- [openbookqa_pairs](https://huggingface.co/datasets/allenai/openbookqa)
- [msmarco_pairs](https://huggingface.co/datasets/sentence-transformers/msmarco-msmarco-distilbert-base-v3)
- [nq_pairs](https://huggingface.co/datasets/sentence-transformers/natural-questions)
- [trivia_pairs](https://huggingface.co/datasets/sentence-transformers/trivia-qa)
- [quora_pairs](https://huggingface.co/datasets/sentence-transformers/quora-duplicates)
- [gooaq_pairs](https://huggingface.co/datasets/sentence-transformers/gooaq)
- **Language:** en
<!-- - **License:** Unknown -->
### Model Sources
- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
### Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: DebertaV2Model
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
)
```
## Usage
### Direct Usage (Sentence Transformers)
First install the Sentence Transformers library:
```bash
pip install -U sentence-transformers
```
Then you can load this model and run inference.
```python
from sentence_transformers import SentenceTransformer
# Download from the 🤗 Hub
model = SentenceTransformer("bobox/DeBERTaV3-small-SenTra-AdaptiveLayers-AllSoft-HighTemp")
# Run inference
sentences = [
'How many hymns of Luther were included in the Achtliederbuch?',
"Luther's hymns were included in early Lutheran hymnals and spread the ideas of the Reformation.",
'the ABC News building was renamed Peter Jennings Way in 2006 in honor of the recently deceased longtime ABC News chief anchor and anchor of World News Tonight.',
]
embeddings = model.encode(sentences)
print(embeddings.shape)
# [3, 768]
# Get the similarity scores for the embeddings
similarities = model.similarity(embeddings, embeddings)
print(similarities.shape)
# [3, 3]
```
<!--
### Direct Usage (Transformers)
<details><summary>Click to see the direct usage in Transformers</summary>
</details>
-->
<!--
### Downstream Usage (Sentence Transformers)
You can finetune this model on your own dataset.
<details><summary>Click to expand</summary>
</details>
-->
<!--
### Out-of-Scope Use
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
-->
## Evaluation
### Metrics
#### Semantic Similarity
* Dataset: `sts-test`
* Evaluated with [<code>EmbeddingSimilarityEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator)
| Metric | Value |
|:--------------------|:-----------|
| pearson_cosine | 0.5667 |
| **spearman_cosine** | **0.5551** |
| pearson_manhattan | 0.5771 |
| spearman_manhattan | 0.5724 |
| pearson_euclidean | 0.5619 |
| spearman_euclidean | 0.5593 |
| pearson_dot | 0.2353 |
| spearman_dot | 0.2422 |
| pearson_max | 0.5771 |
| spearman_max | 0.5724 |
#### Semantic Similarity
* Dataset: `sts-test`
* Evaluated with [<code>EmbeddingSimilarityEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator)
| Metric | Value |
|:--------------------|:-----------|
| pearson_cosine | 0.5667 |
| **spearman_cosine** | **0.5551** |
| pearson_manhattan | 0.5771 |
| spearman_manhattan | 0.5724 |
| pearson_euclidean | 0.5619 |
| spearman_euclidean | 0.5593 |
| pearson_dot | 0.2353 |
| spearman_dot | 0.2422 |
| pearson_max | 0.5771 |
| spearman_max | 0.5724 |
#### Semantic Similarity
* Dataset: `sts-test`
* Evaluated with [<code>EmbeddingSimilarityEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator)
| Metric | Value |
|:--------------------|:-----------|
| pearson_cosine | 0.5667 |
| **spearman_cosine** | **0.5551** |
| pearson_manhattan | 0.5771 |
| spearman_manhattan | 0.5724 |
| pearson_euclidean | 0.5619 |
| spearman_euclidean | 0.5593 |
| pearson_dot | 0.2353 |
| spearman_dot | 0.2422 |
| pearson_max | 0.5771 |
| spearman_max | 0.5724 |
#### Semantic Similarity
* Dataset: `sts-test`
* Evaluated with [<code>EmbeddingSimilarityEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator)
| Metric | Value |
|:--------------------|:-----------|
| pearson_cosine | 0.5667 |
| **spearman_cosine** | **0.5551** |
| pearson_manhattan | 0.5771 |
| spearman_manhattan | 0.5724 |
| pearson_euclidean | 0.5619 |
| spearman_euclidean | 0.5593 |
| pearson_dot | 0.2353 |
| spearman_dot | 0.2422 |
| pearson_max | 0.5771 |
| spearman_max | 0.5724 |
<!--
## Bias, Risks and Limitations
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
-->
<!--
### Recommendations
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
-->
## Training Details
### Training Datasets
#### nli-pairs
* Dataset: [nli-pairs](https://huggingface.co/datasets/sentence-transformers/all-nli) at [d482672](https://huggingface.co/datasets/sentence-transformers/all-nli/tree/d482672c8e74ce18da116f430137434ba2e52fab)
* Size: 6,500 training samples
* Columns: <code>sentence1</code> and <code>sentence2</code>
* Approximate statistics based on the first 1000 samples:
| | sentence1 | sentence2 |
|:--------|:----------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|
| type | string | string |
| details | <ul><li>min: 5 tokens</li><li>mean: 16.62 tokens</li><li>max: 62 tokens</li></ul> | <ul><li>min: 4 tokens</li><li>mean: 9.46 tokens</li><li>max: 29 tokens</li></ul> |
* Samples:
| sentence1 | sentence2 |
|:---------------------------------------------------------------------------|:-------------------------------------------------|
| <code>A person on a horse jumps over a broken down airplane.</code> | <code>A person is outdoors, on a horse.</code> |
| <code>Children smiling and waving at camera</code> | <code>There are children present</code> |
| <code>A boy is jumping on skateboard in the middle of a red bridge.</code> | <code>The boy does a skateboarding trick.</code> |
* Loss: [<code>AdaptiveLayerLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#adaptivelayerloss) with these parameters:
```json
{
"loss": "GISTEmbedLoss",
"n_layers_per_step": -1,
"last_layer_weight": 1.5,
"prior_layers_weight": 0.15,
"kl_div_weight": 2,
"kl_temperature": 2
}
```
#### sts-label
* Dataset: [sts-label](https://huggingface.co/datasets/sentence-transformers/stsb) at [ab7a5ac](https://huggingface.co/datasets/sentence-transformers/stsb/tree/ab7a5ac0e35aa22088bdcf23e7fd99b220e53308)
* Size: 5,749 training samples
* Columns: <code>sentence1</code>, <code>sentence2</code>, and <code>score</code>
* Approximate statistics based on the first 1000 samples:
| | sentence1 | sentence2 | score |
|:--------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|:---------------------------------------------------------------|
| type | string | string | float |
| details | <ul><li>min: 6 tokens</li><li>mean: 9.81 tokens</li><li>max: 27 tokens</li></ul> | <ul><li>min: 5 tokens</li><li>mean: 9.74 tokens</li><li>max: 25 tokens</li></ul> | <ul><li>min: 0.0</li><li>mean: 0.54</li><li>max: 1.0</li></ul> |
* Samples:
| sentence1 | sentence2 | score |
|:-----------------------------------------------------------|:----------------------------------------------------------------------|:------------------|
| <code>A plane is taking off.</code> | <code>An air plane is taking off.</code> | <code>1.0</code> |
| <code>A man is playing a large flute.</code> | <code>A man is playing a flute.</code> | <code>0.76</code> |
| <code>A man is spreading shreded cheese on a pizza.</code> | <code>A man is spreading shredded cheese on an uncooked pizza.</code> | <code>0.76</code> |
* Loss: [<code>CoSENTLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters:
```json
{
"scale": 20.0,
"similarity_fct": "pairwise_cos_sim"
}
```
#### vitaminc-pairs
* Dataset: [vitaminc-pairs](https://huggingface.co/datasets/tals/vitaminc) at [be6febb](https://huggingface.co/datasets/tals/vitaminc/tree/be6febb761b0b2807687e61e0b5282e459df2fa0)
* Size: 3,194 training samples
* Columns: <code>label</code>, <code>sentence1</code>, and <code>sentence2</code>
* Approximate statistics based on the first 1000 samples:
| | label | sentence1 | sentence2 |
|:--------|:-----------------------------|:---------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|
| type | int | string | string |
| details | <ul><li>1: 100.00%</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 15.8 tokens</li><li>max: 75 tokens</li></ul> | <ul><li>min: 8 tokens</li><li>mean: 38.29 tokens</li><li>max: 512 tokens</li></ul> |
* Samples:
| label | sentence1 | sentence2 |
|:---------------|:---------------------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| <code>1</code> | <code>Kyle Kendricks was otherwise called the Professor .</code> | <code>`` Chicago Cubs ( �present ) } } Kyle Christian Hendricks ( born December 7 , 1989 ) , nicknamed `` '' The Proffessor , '' '' is an American professional baseball pitcher for the Chicago Cubs of Major League Baseball ( MLB ) . ''</code> |
| <code>1</code> | <code>Since 1982 , 533 people have been executed in Texas .</code> | <code>Since the death penalty was re-instituted in the United States with the 1976 Gregg v. Georgia decision , Texas has executed more inmates than any other state , beginning in 1982 with the execution of Charles Brooks , Jr.. Since 1982 , 533 people have been executed in Texas. 1923 , the Texas Department of Criminal Justice ( TDCJ ) has been in charge of executions in the state .</code> |
| <code>1</code> | <code>Hilltop Hoods have released two `` restrung '' albums .</code> | <code>`` The group released its first extended play , Back Once Again , in 1997 and have subsequently released seven studio albums , two `` '' restrung '' '' albums and three DVDs . ''</code> |
* Loss: [<code>AdaptiveLayerLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#adaptivelayerloss) with these parameters:
```json
{
"loss": "GISTEmbedLoss",
"n_layers_per_step": -1,
"last_layer_weight": 1.5,
"prior_layers_weight": 0.15,
"kl_div_weight": 2,
"kl_temperature": 2
}
```
#### qnli-contrastive
* Dataset: [qnli-contrastive](https://huggingface.co/datasets/nyu-mll/glue) at [bcdcba7](https://huggingface.co/datasets/nyu-mll/glue/tree/bcdcba79d07bc864c1c254ccfcedcce55bcc9a8c)
* Size: 4,000 training samples
* Columns: <code>sentence1</code>, <code>sentence2</code>, and <code>label</code>
* Approximate statistics based on the first 1000 samples:
| | sentence1 | sentence2 | label |
|:--------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|:-----------------------------|
| type | string | string | int |
| details | <ul><li>min: 6 tokens</li><li>mean: 13.79 tokens</li><li>max: 40 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 35.8 tokens</li><li>max: 499 tokens</li></ul> | <ul><li>0: 100.00%</li></ul> |
* Samples:
| sentence1 | sentence2 | label |
|:-----------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------|
| <code>Vinters have adopted solar technology to do what?</code> | <code>More recently the technology has been embraced by vinters, who use the energy generated by solar panels to power grape presses.</code> | <code>0</code> |
| <code>Who did Madonna's look and style of dressing influence?</code> | <code>It attracted the attention of organizations who complained that the song and its accompanying video promoted premarital sex and undermined family values, and moralists sought to have the song and video banned.</code> | <code>0</code> |
| <code>In addition to hearing him play, what else did people seek from Chopin in London?</code> | <code>The Prince, who was himself a talented musician, moved close to the keyboard to view Chopin's technique.</code> | <code>0</code> |
* Loss: [<code>AdaptiveLayerLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#adaptivelayerloss) with these parameters:
```json
{
"loss": "OnlineContrastiveLoss",
"n_layers_per_step": -1,
"last_layer_weight": 1.5,
"prior_layers_weight": 0.15,
"kl_div_weight": 2,
"kl_temperature": 2
}
```
#### scitail-pairs-qa
* Dataset: [scitail-pairs-qa](https://huggingface.co/datasets/allenai/scitail) at [0cc4353](https://huggingface.co/datasets/allenai/scitail/tree/0cc4353235b289165dfde1c7c5d1be983f99ce44)
* Size: 4,300 training samples
* Columns: <code>sentence2</code> and <code>sentence1</code>
* Approximate statistics based on the first 1000 samples:
| | sentence2 | sentence1 |
|:--------|:---------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|
| type | string | string |
| details | <ul><li>min: 7 tokens</li><li>mean: 16.0 tokens</li><li>max: 41 tokens</li></ul> | <ul><li>min: 7 tokens</li><li>mean: 14.71 tokens</li><li>max: 34 tokens</li></ul> |
* Samples:
| sentence2 | sentence1 |
|:--------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------|
| <code>The fetal period lasts approximately 30 weeks weeks.</code> | <code>Approximately how many weeks does the fetal period last?</code> |
| <code>Corals build hard exoskeletons that grow to become coral reefs.</code> | <code>Corals build hard exoskeletons that grow to become what?</code> |
| <code>A voltaic cell generates an electric current through a reaction known as a(n) spontaneous redox.</code> | <code>A voltaic cell uses what type of reaction to generate an electric current</code> |
* Loss: [<code>AdaptiveLayerLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#adaptivelayerloss) with these parameters:
```json
{
"loss": "GISTEmbedLoss",
"n_layers_per_step": -1,
"last_layer_weight": 1.5,
"prior_layers_weight": 0.15,
"kl_div_weight": 2,
"kl_temperature": 2
}
```
#### scitail-pairs-pos
* Dataset: [scitail-pairs-pos](https://huggingface.co/datasets/allenai/scitail) at [0cc4353](https://huggingface.co/datasets/allenai/scitail/tree/0cc4353235b289165dfde1c7c5d1be983f99ce44)
* Size: 2,200 training samples
* Columns: <code>sentence1</code> and <code>sentence2</code>
* Approximate statistics based on the first 1000 samples:
| | sentence1 | sentence2 |
|:--------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|
| type | string | string |
| details | <ul><li>min: 7 tokens</li><li>mean: 23.76 tokens</li><li>max: 74 tokens</li></ul> | <ul><li>min: 7 tokens</li><li>mean: 15.27 tokens</li><li>max: 41 tokens</li></ul> |
* Samples:
| sentence1 | sentence2 |
|:-----------------------------------------------------------------------------------------------------------------------|:----------------------------------------------------------------------|
| <code>As the water vapor cools, it condenses , forming tiny droplets in clouds.</code> | <code>Clouds are formed from water droplets.</code> |
| <code>Poison ivy is green, with three leaflets on each leaf, grows as a shrub or vine, and may be in your yard.</code> | <code>Poison ivy typically has three groups of leaves.</code> |
| <code>(Formic acid is the poison found in the > sting of fire ants.)</code> | <code>Formic acid is found in the secretions of stinging ants.</code> |
* Loss: [<code>AdaptiveLayerLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#adaptivelayerloss) with these parameters:
```json
{
"loss": "GISTEmbedLoss",
"n_layers_per_step": -1,
"last_layer_weight": 1.5,
"prior_layers_weight": 0.15,
"kl_div_weight": 2,
"kl_temperature": 2
}
```
#### xsum-pairs
* Dataset: [xsum-pairs](https://huggingface.co/datasets/sentence-transformers/xsum) at [788ddaf](https://huggingface.co/datasets/sentence-transformers/xsum/tree/788ddafe04e539956d56b567bc32a036ee7b9206)
* Size: 2,500 training samples
* Columns: <code>sentence1</code> and <code>sentence2</code>
* Approximate statistics based on the first 1000 samples:
| | sentence1 | sentence2 |
|:--------|:-------------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|
| type | string | string |
| details | <ul><li>min: 14 tokens</li><li>mean: 345.33 tokens</li><li>max: 512 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 27.11 tokens</li><li>max: 60 tokens</li></ul> |
* Samples:
| sentence1 | sentence2 |
|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------|
| <code>Rahim Kalantar told the BBC his son Ali, 18, travelled to Syria with two friends from Coventry in March and believed he was now fighting with Isis.<br>He said he was sent "down this road" by an imam - who denied the allegations.<br>Up to 500 Britons are thought to have travelled to the Middle East to fight in the conflict, officials say.<br>Mr Kalantar - speaking to BBC Two's Newsnight, in collaboration with the BBC's Afghan Service and Newsday - said he worries about his son Ali "every minute" and that his grief is "limitless".<br>He said he believed Ali - who was planning to study computer science at university - had been radicalised during classes at a mosque after evening prayer.<br>"He [the imam] encouraged them and sent them down this road," he said.<br>The BBC contacted the mosque to speak to the imam, who refused to give an interview but said he completely denied the allegations.<br>Ali is believed to have travelled to Syria with Rashed Amani, also 18, who had been studying business at Coventry University.<br>Rashed's father, Khabir, said family members had travelled to the Turkish-Syrian border in the hope of finding the boys, but came back "empty-handed" after searching for more than two weeks.<br>He said he did not know what had happened to his son, who he fears has joined Isis - the militant-led group that has made rapid advances through Iraq in recent weeks.<br>"Maybe somebody worked with him, I don't know. Maybe somebody brainwashed him because he was not like that," he said.<br>The third teenager, Moh Ismael, is also believed to be in Syria with his friends. He is understood to have posted a message on Twitter saying he was with Isis.<br>It comes after Britons - including Reyaad Khan and Nasser Muthana from Cardiff - featured in an apparent recruitment video for jihadists in Iraq and Syria.<br>The video was posted online on Friday by accounts with links to Isis.<br>The BBC has learned a third Briton in the video is from Aberdeen. The man, named locally as Raqib, grew up in Scotland but was originally from Bangladesh.<br>Lord Carlile, a former independent reviewer of terrorism laws, told the BBC that the Muslim community was best placed to stop jihadists recruiting in the UK.<br>The Liberal Democrat peer also said the UK needed to reintroduce tougher measures to stop terrorism.<br>It comes after former MI6 director, Richard Barrett, said security services would not be able to track all Britons who return to the UK after fighting in Syria.<br>He said the number of those posing a threat would be small but unpredictable.<br>The Metropolitan Police has insisted it has the tools to monitor British jihadists returning from that country.<br>Shiraz Maher, a radicalisation expert, told Newsnight that social media was now acting as a recruitment ground for potential jihadists in the UK.<br>"You have hundreds of foreign fighters on the ground who in real time are giving you a live feed of what is happening and they are engaged in a conversation.<br>"It is these individual people who have been empowered to become recruiters in their own right," he said.<br>Lord Carlile said the "most important partners" in preventing young Muslims from being radicalised were the "Muslim communities themselves".<br>"Mothers, wives, sisters do not want their husbands, brothers, sons to become valid jihadists and run the risk of being killed in a civil war," he told the programme.<br>He also told BBC Radio 4's World at One programme that the government should look at reintroducing "something like control orders", which were scrapped in 2011 and replaced with the less restrictive Terrorism Prevention and Investigation Measures (TPims).<br>He said: "We need to look at preventing violent extremism before people leave the country and also we need to look for further measures."</code> | <code>The father of a British teenager who travelled to Syria to join jihadists believes his son was radicalised by an imam at a UK mosque.</code> |
| <code>Jawad Fairooz and Matar Matar were detained in May after resigning from parliament in protest at the handling of the protests.<br>Mr Matar told the BBC they had been tortured in prison.<br>They were prosecuted in a security court on charges of taking part in illegal protests and defaming the country.<br>It is not clear if they still face trial in a civilian court.<br>Civilian courts took over jurisdiction after King Hamad Bin Issa Al Khalifa lifted a state of emergency in June.<br>Mr Matar told the BBC he believed his arrest had been intended to put a pressure on his al-Wifaq party.<br>"At some stages we were tortured," he said. "In one of the cases we were beaten."<br>Human rights lawyer Mohamed al-Tajir was also released.<br>He was detained in April having defended people arrested during the Saudi-backed suppression of protests in March.<br>Correspondents say their release appears to be an attempt at defusing tensions in the country, a key US ally in the region that hosts the US Navy's 5th Fleet.<br>Bahrain's King Hamad Bin Issa Al Khalifa recently accepted a series of reforms drawn up by a government-backed committee created to address grievances that emerged during the protests.<br>The kingdom's Shia community makes up about 70% of the population but many say they are discriminated against by the minority Sunni monarchy.</code> | <code>Bahrain has freed two former Shia opposition MPs arrested in the wake of widespread anti-government protests.</code> |
| <code>Liverpool City Region, in case you were wondering, includes Merseyside's five councils (Knowsley, Liverpool, Sefton, St Helens, and Wirral) as well as Halton in Cheshire.<br>Who are the eight candidates desperate for your support on 4 May, though, and what are their priorities?<br>BBC Radio Merseyside's political reporter Claire Hamilton has produced a potted biography for each of them.<br>We're also asking all of them for a "minute manifesto" video.<br>Candidates are listed below in alphabetical order<br>Roger Bannister, Trade Union & Socialist Coalition<br>Veteran trade unionist Roger Bannister believes the Liverpool City Region Combined Authority should never have approved the contract for a fleet of new driver-only Merseyrail trains. He says he would seek to reverse this decision. He also believes local authorities have passed harmful austerity budgets on people struggling to make ends meet. He stood for Liverpool city mayor in 2016, coming fourth with 5% of the vote.<br>Paul Breen, Get the Coppers off the Jury<br>Paul Breen is a resident of Norris Green, Liverpool and became the last candidate to be nominated. He is listed as treasurer of the party on the Electoral Commission's website, with Patricia Breen listed as deputy treasurer. He has not yet released any material detailing his manifesto but told the BBC the title of his campaign speaks for itself. He simply does not believe that police officers should be allowed to serve on juries.<br>Mr Breen declined to provide a "minute manifesto"<br>Tony Caldeira, Conservative<br>Born in Liverpool and educated in St Helens, Tony Caldeira started out working on a stall selling cushions made by his mother at Liverpool's Great Homer Street market. His business expanded and now operates in Kirkby, distributing world-wide. Mr Caldeira has stood for Liverpool mayor twice, coming sixth in 2016 with just under 4% of the vote. He has pledged to improve the area's transport network, speed up the planning process and build homes and workplaces on brownfield sites rather than green spaces.<br>Carl Cashman, Liberal Democrats<br>Born in Whiston, Knowsley, Carl Cashman is leader of the Liberal Democrat group on Knowsley Council. He and his two Lib Dem council colleagues were elected in 2016, breaking a four-year period when Labour was the only party represented. Aged 25, he's the youngest of the candidates. Mr Cashman believes maintaining strong ties with Europe and the region will be key, and has pledged to open a Liverpool City Region embassy in Brussels. He also wants to better integrate ticketing across public transport and make the current Walrus card more similar to the Oyster card used by Londoners.<br>Tom Crone, Green Party<br>Tom Crone is leader of the Green group on Liverpool City Council. He won 10% of the vote in the mayoral elections in Liverpool in 2016 and came third. Originally from Norwich, he has lived in Liverpool since 2000 after arriving as a student. Mr Crone is keen to see a shift away from traditional heavy industry in the city region towards greener "tech" industries. He's also passionate about making public transport more affordable and environmentally friendly. He says he'll look to prioritise new routes for cyclists and pedestrians.<br>Tabitha Morton, Women's Equality Party<br>Tabitha Morton was born in Netherton, Sefton. She left school with no formal qualifications, and started work at 16 at a local market, and later in cleaning. She was taken on for NVQ training by a company in Liverpool, and stayed on to train others. She now works for a global manufacturer, in what she describes as "a male-dominated industry". She says she would prioritise grants for employers offering equal apprenticeships for young women and men and ring-fence funds for training women in sectors in which they're underrepresented.<br>Steve Rotheram, Labour<br>Born in Kirkby, former bricklayer Steve Rotheram was a city councillor in Liverpool and also Lord Mayor during the city's European Capital of Culture year in 2008. He was also elected MP for Liverpool Walton in 2010, and re-elected to the seat in 2015. Mr Rotheram is pledging to cut the cost of the fast tag for motorists driving through the Mersey tunnels. He wants to improve education and offer better careers advice for young people, and also wants to make brownfield sites more attractive to developers.<br>Paula Walters, UKIP<br>Wallasey-born Paula Walters is chairman of UKIP in Wirral and lives in New Brighton with her family. She has campaigned to scrap tunnel tolls for several years. She says her local UKIP branch is one of the most thriving in the North West. A civil servant, she studied English and biomolecular science at degree-level. She has also lived in South Africa where she attended the University of Pretoria. She believes Liverpool city centre has attracted money at the expense of outlying areas, one of the things she wants to tackle.</code> | <code>Those hoping to become the first mayor of the Liverpool City Region have less than a month remaining in which to secure your vote.</code> |
* Loss: [<code>AdaptiveLayerLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#adaptivelayerloss) with these parameters:
```json
{
"loss": "GISTEmbedLoss",
"n_layers_per_step": -1,
"last_layer_weight": 1.5,
"prior_layers_weight": 0.15,
"kl_div_weight": 2,
"kl_temperature": 2
}
```
#### compression-pairs
* Dataset: [compression-pairs](https://huggingface.co/datasets/sentence-transformers/sentence-compression) at [605bc91](https://huggingface.co/datasets/sentence-transformers/sentence-compression/tree/605bc91d95631895ba25b6eda51a3cb596976c90)
* Size: 4,000 training samples
* Columns: <code>sentence1</code> and <code>sentence2</code>
* Approximate statistics based on the first 1000 samples:
| | sentence1 | sentence2 |
|:--------|:------------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|
| type | string | string |
| details | <ul><li>min: 10 tokens</li><li>mean: 31.89 tokens</li><li>max: 125 tokens</li></ul> | <ul><li>min: 5 tokens</li><li>mean: 10.21 tokens</li><li>max: 28 tokens</li></ul> |
* Samples:
| sentence1 | sentence2 |
|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------|
| <code>The USHL completed an expansion draft on Monday as 10 players who were on the rosters of USHL teams during the 2009-10 season were selected by the League's two newest entries, the Muskegon Lumberjacks and Dubuque Fighting Saints.</code> | <code>USHL completes expansion draft</code> |
| <code>Major League Baseball Commissioner Bud Selig will be speaking at St. Norbert College next month.</code> | <code>Bud Selig to speak at St. Norbert College</code> |
| <code>It's fresh cherry time in Michigan and the best time to enjoy this delicious and nutritious fruit.</code> | <code>It's cherry time</code> |
* Loss: [<code>AdaptiveLayerLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#adaptivelayerloss) with these parameters:
```json
{
"loss": "MultipleNegativesSymmetricRankingLoss",
"n_layers_per_step": -1,
"last_layer_weight": 1.5,
"prior_layers_weight": 0.15,
"kl_div_weight": 2,
"kl_temperature": 2
}
```
#### sciq_pairs
* Dataset: [sciq_pairs](https://huggingface.co/datasets/allenai/sciq) at [2c94ad3](https://huggingface.co/datasets/allenai/sciq/tree/2c94ad3e1aafab77146f384e23536f97a4849815)
* Size: 6,500 training samples
* Columns: <code>sentence1</code> and <code>sentence2</code>
* Approximate statistics based on the first 1000 samples:
| | sentence1 | sentence2 |
|:--------|:----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|
| type | string | string |
| details | <ul><li>min: 7 tokens</li><li>mean: 17.26 tokens</li><li>max: 60 tokens</li></ul> | <ul><li>min: 2 tokens</li><li>mean: 84.37 tokens</li><li>max: 512 tokens</li></ul> |
* Samples:
| sentence1 | sentence2 |
|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| <code>What type of organism is commonly used in preparation of foods such as cheese and yogurt?</code> | <code>Mesophiles grow best in moderate temperature, typically between 25°C and 40°C (77°F and 104°F). Mesophiles are often found living in or on the bodies of humans or other animals. The optimal growth temperature of many pathogenic mesophiles is 37°C (98°F), the normal human body temperature. Mesophilic organisms have important uses in food preparation, including cheese, yogurt, beer and wine.</code> |
| <code>What phenomenon makes global winds blow northeast to southwest or the reverse in the northern hemisphere and northwest to southeast or the reverse in the southern hemisphere?</code> | <code>Without Coriolis Effect the global winds would blow north to south or south to north. But Coriolis makes them blow northeast to southwest or the reverse in the Northern Hemisphere. The winds blow northwest to southeast or the reverse in the southern hemisphere.</code> |
| <code>Changes from a less-ordered state to a more-ordered state (such as a liquid to a solid) are always what?</code> | <code>Summary Changes of state are examples of phase changes, or phase transitions. All phase changes are accompanied by changes in the energy of a system. Changes from a more-ordered state to a less-ordered state (such as a liquid to a gas) areendothermic. Changes from a less-ordered state to a more-ordered state (such as a liquid to a solid) are always exothermic. The conversion of a solid to a liquid is called fusion (or melting). The energy required to melt 1 mol of a substance is its enthalpy of fusion (ΔHfus). The energy change required to vaporize 1 mol of a substance is the enthalpy of vaporization (ΔHvap). The direct conversion of a solid to a gas is sublimation. The amount of energy needed to sublime 1 mol of a substance is its enthalpy of sublimation (ΔHsub) and is the sum of the enthalpies of fusion and vaporization. Plots of the temperature of a substance versus heat added or versus heating time at a constant rate of heating are calledheating curves. Heating curves relate temperature changes to phase transitions. A superheated liquid, a liquid at a temperature and pressure at which it should be a gas, is not stable. A cooling curve is not exactly the reverse of the heating curve because many liquids do not freeze at the expected temperature. Instead, they form a supercooled liquid, a metastable liquid phase that exists below the normal melting point. Supercooled liquids usually crystallize on standing, or adding a seed crystal of the same or another substance can induce crystallization.</code> |
* Loss: [<code>AdaptiveLayerLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#adaptivelayerloss) with these parameters:
```json
{
"loss": "GISTEmbedLoss",
"n_layers_per_step": -1,
"last_layer_weight": 1.5,
"prior_layers_weight": 0.15,
"kl_div_weight": 2,
"kl_temperature": 2
}
```
#### qasc_pairs
* Dataset: [qasc_pairs](https://huggingface.co/datasets/allenai/qasc) at [a34ba20](https://huggingface.co/datasets/allenai/qasc/tree/a34ba204eb9a33b919c10cc08f4f1c8dae5ec070)
* Size: 6,500 training samples
* Columns: <code>id</code>, <code>sentence1</code>, and <code>sentence2</code>
* Approximate statistics based on the first 1000 samples:
| | id | sentence1 | sentence2 |
|:--------|:-----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|
| type | string | string | string |
| details | <ul><li>min: 17 tokens</li><li>mean: 21.35 tokens</li><li>max: 27 tokens</li></ul> | <ul><li>min: 5 tokens</li><li>mean: 11.47 tokens</li><li>max: 25 tokens</li></ul> | <ul><li>min: 14 tokens</li><li>mean: 35.55 tokens</li><li>max: 66 tokens</li></ul> |
* Samples:
| id | sentence1 | sentence2 |
|:--------------------------------------------|:---------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| <code>3E7TUJ2EGCLQNOV1WEAJ2NN9ROPD9K</code> | <code>What type of water formation is formed by clouds?</code> | <code>beads of water are formed by water vapor condensing. Clouds are made of water vapor.. Beads of water can be formed by clouds.</code> |
| <code>3LS2AMNW5FPNJK3C3PZLZCPX562OQO</code> | <code>Where do beads of water come from?</code> | <code>beads of water are formed by water vapor condensing. Condensation is the change of water vapor to a liquid.. Vapor turning into a liquid leaves behind beads of water</code> |
| <code>3TMFV4NEP8DPIPCI8H9VUFHJG8V8W3</code> | <code>What forms beads of water? </code> | <code>beads of water are formed by water vapor condensing. An example of water vapor is steam.. Steam forms beads of water.</code> |
* Loss: [<code>AdaptiveLayerLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#adaptivelayerloss) with these parameters:
```json
{
"loss": "GISTEmbedLoss",
"n_layers_per_step": -1,
"last_layer_weight": 1.5,
"prior_layers_weight": 0.15,
"kl_div_weight": 2,
"kl_temperature": 2
}
```
#### openbookqa_pairs
* Dataset: [openbookqa_pairs](https://huggingface.co/datasets/allenai/openbookqa) at [388097e](https://huggingface.co/datasets/allenai/openbookqa/tree/388097ea7776314e93a529163e0fea805b8a6454)
* Size: 2,740 training samples
* Columns: <code>sentence1</code> and <code>sentence2</code>
* Approximate statistics based on the first 1000 samples:
| | sentence1 | sentence2 |
|:--------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|
| type | string | string |
| details | <ul><li>min: 3 tokens</li><li>mean: 13.83 tokens</li><li>max: 78 tokens</li></ul> | <ul><li>min: 4 tokens</li><li>mean: 11.37 tokens</li><li>max: 30 tokens</li></ul> |
* Samples:
| sentence1 | sentence2 |
|:-------------------------------------------------|:--------------------------------------------------------------------------|
| <code>The sun is responsible for</code> | <code>the sun is the source of energy for physical cycles on Earth</code> |
| <code>When food is reduced in the stomach</code> | <code>digestion is when stomach acid breaks down food</code> |
| <code>Stars are</code> | <code>a star is made of gases</code> |
* Loss: [<code>AdaptiveLayerLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#adaptivelayerloss) with these parameters:
```json
{
"loss": "GISTEmbedLoss",
"n_layers_per_step": -1,
"last_layer_weight": 1.5,
"prior_layers_weight": 0.15,
"kl_div_weight": 2,
"kl_temperature": 2
}
```
#### msmarco_pairs
* Dataset: [msmarco_pairs](https://huggingface.co/datasets/sentence-transformers/msmarco-msmarco-distilbert-base-v3) at [28ff31e](https://huggingface.co/datasets/sentence-transformers/msmarco-msmarco-distilbert-base-v3/tree/28ff31e4c97cddd53d298497f766e653f1e666f9)
* Size: 6,500 training samples
* Columns: <code>sentence1</code> and <code>sentence2</code>
* Approximate statistics based on the first 1000 samples:
| | sentence1 | sentence2 |
|:--------|:---------------------------------------------------------------------------------|:------------------------------------------------------------------------------------|
| type | string | string |
| details | <ul><li>min: 4 tokens</li><li>mean: 8.61 tokens</li><li>max: 27 tokens</li></ul> | <ul><li>min: 18 tokens</li><li>mean: 75.09 tokens</li><li>max: 206 tokens</li></ul> |
* Samples:
| sentence1 | sentence2 |
|:------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| <code>what are the liberal arts?</code> | <code>liberal arts. 1. the academic course of instruction at a college intended to provide general knowledge and comprising the arts, humanities, natural sciences, and social sciences, as opposed to professional or technical subjects.</code> |
| <code>what is the mechanism of action of fibrinolytic or thrombolytic drugs?</code> | <code>Baillière's Clinical Haematology. 6 Mechanism of action of the thrombolytic agents. 6 Mechanism of action of the thrombolytic agents JEFFREY I. WEITZ Fibrin formed during the haemostatic, inflammatory or tissue repair process serves a temporary role, and must be degraded to restore normal tissue function and structure.</code> |
| <code>what is normal plat count</code> | <code>78 Followers. A. Platelets are the tiny blood cells that help stop bleeding by binding together to form a clump or plug at sites of injury inside blood vessels. A normal platelet count is between 150,000 and 450,000 platelets per microliter (one-millionth of a liter, abbreviated mcL).The average platelet count is 237,000 per mcL in men and 266,000 per mcL in women.8 Followers. A. Platelets are the tiny blood cells that help stop bleeding by binding together to form a clump or plug at sites of injury inside blood vessels. A normal platelet count is between 150,000 and 450,000 platelets per microliter (one-millionth of a liter, abbreviated mcL).</code> |
* Loss: [<code>AdaptiveLayerLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#adaptivelayerloss) with these parameters:
```json
{
"loss": "GISTEmbedLoss",
"n_layers_per_step": -1,
"last_layer_weight": 1.5,
"prior_layers_weight": 0.15,
"kl_div_weight": 2,
"kl_temperature": 2
}
```
#### nq_pairs
* Dataset: [nq_pairs](https://huggingface.co/datasets/sentence-transformers/natural-questions) at [f9e894e](https://huggingface.co/datasets/sentence-transformers/natural-questions/tree/f9e894e1081e206e577b4eaa9ee6de2b06ae6f17)
* Size: 6,500 training samples
* Columns: <code>sentence1</code> and <code>sentence2</code>
* Approximate statistics based on the first 1000 samples:
| | sentence1 | sentence2 |
|:--------|:-----------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|
| type | string | string |
| details | <ul><li>min: 10 tokens</li><li>mean: 11.77 tokens</li><li>max: 21 tokens</li></ul> | <ul><li>min: 16 tokens</li><li>mean: 131.57 tokens</li><li>max: 512 tokens</li></ul> |
* Samples:
| sentence1 | sentence2 |
|:----------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| <code>when did richmond last play in a preliminary final</code> | <code>Richmond Football Club Richmond began 2017 with 5 straight wins, a feat it had not achieved since 1995. A series of close losses hampered the Tigers throughout the middle of the season, including a 5-point loss to the Western Bulldogs, 2-point loss to Fremantle, and a 3-point loss to the Giants. Richmond ended the season strongly with convincing victories over Fremantle and St Kilda in the final two rounds, elevating the club to 3rd on the ladder. Richmond's first final of the season against the Cats at the MCG attracted a record qualifying final crowd of 95,028; the Tigers won by 51 points. Having advanced to the first preliminary finals for the first time since 2001, Richmond defeated Greater Western Sydney by 36 points in front of a crowd of 94,258 to progress to the Grand Final against Adelaide, their first Grand Final appearance since 1982. The attendance was 100,021, the largest crowd to a grand final since 1986. The Crows led at quarter time and led by as many as 13, but the Tigers took over the game as it progressed and scored seven straight goals at one point. They eventually would win by 48 points – 16.12 (108) to Adelaide's 8.12 (60) – to end their 37-year flag drought.[22] Dustin Martin also became the first player to win a Premiership medal, the Brownlow Medal and the Norm Smith Medal in the same season, while Damien Hardwick was named AFL Coaches Association Coach of the Year. Richmond's jump from 13th to premiers also marked the biggest jump from one AFL season to the next.</code> |
| <code>who sang what in the world's come over you</code> | <code>Jack Scott (singer) At the beginning of 1960, Scott again changed record labels, this time to Top Rank Records.[1] He then recorded four Billboard Hot 100 hits – "What in the World's Come Over You" (#5), "Burning Bridges" (#3) b/w "Oh Little One" (#34), and "It Only Happened Yesterday" (#38).[1] "What in the World's Come Over You" was Scott's second gold disc winner.[6] Scott continued to record and perform during the 1960s and 1970s.[1] His song "You're Just Gettin' Better" reached the country charts in 1974.[1] In May 1977, Scott recorded a Peel session for BBC Radio 1 disc jockey, John Peel.</code> |
| <code>who produces the most wool in the world</code> | <code>Wool Global wool production is about 2 million tonnes per year, of which 60% goes into apparel. Wool comprises ca 3% of the global textile market, but its value is higher owing to dying and other modifications of the material.[1] Australia is a leading producer of wool which is mostly from Merino sheep but has been eclipsed by China in terms of total weight.[30] New Zealand (2016) is the third-largest producer of wool, and the largest producer of crossbred wool. Breeds such as Lincoln, Romney, Drysdale, and Elliotdale produce coarser fibers, and wool from these sheep is usually used for making carpets.</code> |
* Loss: [<code>AdaptiveLayerLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#adaptivelayerloss) with these parameters:
```json
{
"loss": "GISTEmbedLoss",
"n_layers_per_step": -1,
"last_layer_weight": 1.5,
"prior_layers_weight": 0.15,
"kl_div_weight": 2,
"kl_temperature": 2
}
```
#### trivia_pairs
* Dataset: [trivia_pairs](https://huggingface.co/datasets/sentence-transformers/trivia-qa) at [a7c36e3](https://huggingface.co/datasets/sentence-transformers/trivia-qa/tree/a7c36e3c8c8c01526bc094d79bf80d4c848b0ad0)
* Size: 6,500 training samples
* Columns: <code>sentence1</code> and <code>sentence2</code>
* Approximate statistics based on the first 1000 samples:
| | sentence1 | sentence2 |
|:--------|:----------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|
| type | string | string |
| details | <ul><li>min: 8 tokens</li><li>mean: 15.16 tokens</li><li>max: 48 tokens</li></ul> | <ul><li>min: 19 tokens</li><li>mean: 456.87 tokens</li><li>max: 512 tokens</li></ul> |
* Samples:
| sentence1 | sentence2 |
|:------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| <code>Which American-born Sinclair won the Nobel Prize for Literature in 1930?</code> | <code>The Nobel Prize in Literature 1930 The Nobel Prize in Literature 1930 Sinclair Lewis The Nobel Prize in Literature 1930 Sinclair Lewis Prize share: 1/1 The Nobel Prize in Literature 1930 was awarded to Sinclair Lewis "for his vigorous and graphic art of description and his ability to create, with wit and humour, new types of characters". Photos: Copyright © The Nobel Foundation Share this: To cite this page MLA style: "The Nobel Prize in Literature 1930". Nobelprize.org. Nobel Media AB 2014. Web. 18 Jan 2017. <http://www.nobelprize.org/nobel_prizes/literature/laureates/1930/></code> |
| <code>Where in England was Dame Judi Dench born?</code> | <code>Judi Dench - IMDb IMDb Actress | Music Department | Soundtrack Judi Dench was born in York, England, to Eleanora Olive (Jones), who was from Dublin, Ireland, and Reginald Arthur Dench, a doctor from Dorset, England. She attended Mount School in York, and studied at the Central School of Speech and Drama. She has performed with Royal Shakespeare Company, the National Theatre, and at Old Vic Theatre. She is a ... See full bio » Born: a list of 35 people created 02 Jul 2011 a list of 35 people created 19 Apr 2012 a list of 35 people created 28 May 2014 a list of 25 people created 05 Aug 2014 a list of 26 people created 18 May 2015 Do you have a demo reel? Add it to your IMDbPage How much of Judi Dench's work have you seen? User Polls Won 1 Oscar. Another 59 wins & 163 nominations. See more awards » Known For 2016 The Hollow Crown (TV Series) Cecily, Duchess of York 2015 The Vote (TV Movie) Christine Metcalfe - Total War (1996) ... Narrator (voice) - Stalemate (1996) ... Narrator (voice) 1992 The Torch (TV Mini-Series) Aba 1990 Screen One (TV Series) Anne 1989 Behaving Badly (TV Mini-Series) Bridget 1981 BBC2 Playhouse (TV Series) Sister Scarli 1976 Arena (TV Series documentary) Sweetie Simpkins 1973 Ooh La La! (TV Series) Amélie 1966 Court Martial (TV Series) Marthe 1963 Z Cars (TV Series) Elena Collins 1963 Love Story (TV Series) Pat McKendrick 1960 The Terrible Choice (TV Series) Good Angel Music department (1 credit) A Fine Romance (TV Series) (theme sung by - 14 episodes, 1981 - 1983) (theme song sung by - 12 episodes, 1983 - 1984) - A Romantic Meal (1984) ... (theme song sung by) - Problems (1984) ... (theme song sung by) 2013 Fifty Years on Stage (TV Movie) (performer: "Send in the Clowns") 2009 Nine (performer: "Folies Bergère") - What's Wrong with Mrs Bale? (1997) ... (performer: "Raindrops Keep Fallin' On My Head" - uncredited) - Misunderstandings (1993) ... (performer: "Walkin' My Baby Back Home" - uncredited) 1982-1984 A Fine Romance (TV Series) (performer - 2 episodes) - The Telephone Call (1984) ... (performer: "Boogie Woogie Bugle Boy" - uncredited) - Furniture (1982) ... (performer: "Rule, Britannia!" - uncredited) Hide 2009 Waiting in Rhyme (Video short) (special thanks) 2007 Expresso (Short) (special thanks) 1999 Shakespeare in Love and on Film (TV Movie documentary) (thanks - as Dame Judi Dench) Hide 2016 Rio Olympics (TV Mini-Series) Herself 2015 In Conversation (TV Series documentary) Herself 2015 Entertainment Tonight (TV Series) Herself 2015 CBS This Morning (TV Series) Herself - Guest 2015 The Insider (TV Series) Herself 1999-2014 Cinema 3 (TV Series) Herself 2013 Good Day L.A. (TV Series) Herself - Guest 2013 Arena (TV Series documentary) Herself 2013 At the Movies (TV Series) Herself 2013 Shooting Bond (Video documentary) Herself 2013 Bond's Greatest Moments (TV Movie documentary) Herself 2012 Made in Hollywood (TV Series) Herself 1999-2012 Charlie Rose (TV Series) Herself - Guest 2008-2012 This Morning (TV Series) Herself - Guest 2012 The Secrets of Skyfall (TV Short documentary) Herself 2012 Anderson Live (TV Series) Herself 2012 J. Edgar: A Complicated Man (Video documentary short) Herself 2011 The Many Faces of... (TV Series documentary) Herself / Various Characters 2011 Na plovárne (TV Series) Herself 2010 BBC Proms (TV Series) Herself 2010 The South Bank Show Revisited (TV Series documentary) Herself - Episode #6.68 (2009) ... Herself - Guest (as Dame Judi Dench) 2007-2009 Breakfast (TV Series) 2009 Larry King Live (TV Series) Herself - Guest 2009 The One Show (TV Series) Herself 2009 Cranford in Detail (Video documentary short) Herself / Miss Matty Jenkins (as Dame Judi Dench) 2005-2008 The South Bank Show (TV Series documentary) Herself 2008 Tavis Smiley (TV Series) Herself - Guest 2007 ITV News (TV Series) Herself - BAFTA Nominee 2007 The Making of Cranford (Video documentary short) Herself / Miss Matty Jenkyns (as Dame Judi Dench) 2006 Becoming Bond (TV Movie documentary) Herself 2006 Corazón de... (TV Series) Hers</code> |
| <code>In which decade did Billboard magazine first publish and American hit chart?</code> | <code>The US Billboard song chart The US Billboard song chart Search this site with Google Song chart US Billboard The Billboard magazine has published various music charts starting (with sheet music) in 1894, the first "Music Hit Parade" was published in 1936 , the first "Music Popularity Chart" was calculated in 1940 . These charts became less irregular until the weekly "Hot 100" was started in 1958 . The current chart combines sales, airplay and downloads. A music collector that calls himself Bullfrog has been consolidating the complete chart from 1894 to the present day. he has published this information in a comprehenive spreadsheet (which can be obtained at bullfrogspond.com/ ). The Bullfrog data assigns each song a unique identifier, something like "1968_076" (which just happens to be the Bee Gees song "I've Gotta Get A Message To You"). This "Whitburn Number" is provided to match with the books of Joel Whitburn and consists of the year and a ranking within the year. A song that first entered the charts in December and has a long run is listed the following year. This numbering scheme means that songs which are still in the charts cannot be assigned a final id, because their ranking might change. So the definitive listing for a year cannot be final until about April. In our listing we only use songs with finalised IDs, this means that every year we have to wait until last year's entries are finalised before using them. (Source bullfrogspond.com/ , the original version used here was 20090808 with extra data from: the 2009 data from 20091219 the 2010 data from 20110305 the 2011 data from 20120929 the 2012 data from 20130330 the 2013 data from 20150328 The 20150328 data was the last one produced before the Billboard company forced the data to be withdrawn. As far as we know there are no more recent data sets available. This pattern of obtaining the data for a particular year in the middle of the following one comes from the way that the Bullfrog project generates the identifier for a song (what they call the "Prefix" in the spreadsheet). Recent entries are identified with keys like "2015-008" while older ones have keys like "2013_177". In the second case the underscore is significant, it indicates that this was the 177th biggest song released in 2013. Now, of course, during the year no one knows where a particular song will rank, so the underscore names can't be assigned until every song from a particular year has dropped out of the charts, so recent records are temporarily assigned a name with a dash. In about May of the following year the rankings are calculated and the final identifiers are assigned. That is why we at the Turret can only grab this data retrospectively. Attributes The original spreadsheet has a number of attributes, we have limited our attention to just a few of them: 134 9 The songs with the most entries on the chart were White Christmas (with 33 versions and a total of 110 weeks) and Stardust (with 19 and a total of 106 weeks). position The peak position that songs reached in the charts should show an smooth curve from number one down to the lowest position. This chart has more songs in the lower peak positions than one would expect. Before 1991 the profile of peak positions was exactly as you would expect, that year Billboard introduced the concept of "Recurrent" tracks, that is they removed any track from the chart which had spent more than twenty weeks in the chart and had fallen to the lower positions. weeks The effect of the "Recurrent" process, by which tracks are removed if they have spent at least twenty weeks in the chart and have fallen to the lower reaches, can clearly be seen in the strange spike in this attribute. This "adjustment" was intended to promote newer songs and ensure the chart does not become "stale". In fact since it was introduced in 1991 the length of long chart runs has increased, this might reflect the more conscious efforts of record companies to "game" the charts by controlling release times and promotions, or it coul</code> |
* Loss: [<code>AdaptiveLayerLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#adaptivelayerloss) with these parameters:
```json
{
"loss": "GISTEmbedLoss",
"n_layers_per_step": -1,
"last_layer_weight": 1.5,
"prior_layers_weight": 0.15,
"kl_div_weight": 2,
"kl_temperature": 2
}
```
#### quora_pairs
* Dataset: [quora_pairs](https://huggingface.co/datasets/sentence-transformers/quora-duplicates) at [451a485](https://huggingface.co/datasets/sentence-transformers/quora-duplicates/tree/451a4850bd141edb44ade1b5828c259abd762cdb)
* Size: 4,000 training samples
* Columns: <code>sentence1</code> and <code>sentence2</code>
* Approximate statistics based on the first 1000 samples:
| | sentence1 | sentence2 |
|:--------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|
| type | string | string |
| details | <ul><li>min: 6 tokens</li><li>mean: 13.53 tokens</li><li>max: 42 tokens</li></ul> | <ul><li>min: 6 tokens</li><li>mean: 13.68 tokens</li><li>max: 43 tokens</li></ul> |
* Samples:
| sentence1 | sentence2 |
|:----------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------|
| <code>Astrology: I am a Capricorn Sun Cap moon and cap rising...what does that say about me?</code> | <code>I'm a triple Capricorn (Sun, Moon and ascendant in Capricorn) What does this say about me?</code> |
| <code>How can I be a good geologist?</code> | <code>What should I do to be a great geologist?</code> |
| <code>How do I read and find my YouTube comments?</code> | <code>How can I see all my Youtube comments?</code> |
* Loss: [<code>AdaptiveLayerLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#adaptivelayerloss) with these parameters:
```json
{
"loss": "GISTEmbedLoss",
"n_layers_per_step": -1,
"last_layer_weight": 1.5,
"prior_layers_weight": 0.15,
"kl_div_weight": 2,
"kl_temperature": 2
}
```
#### gooaq_pairs
* Dataset: [gooaq_pairs](https://huggingface.co/datasets/sentence-transformers/gooaq) at [b089f72](https://huggingface.co/datasets/sentence-transformers/gooaq/tree/b089f728748a068b7bc5234e5bcf5b25e3c8279c)
* Size: 6,500 training samples
* Columns: <code>sentence1</code> and <code>sentence2</code>
* Approximate statistics based on the first 1000 samples:
| | sentence1 | sentence2 |
|:--------|:---------------------------------------------------------------------------------|:------------------------------------------------------------------------------------|
| type | string | string |
| details | <ul><li>min: 8 tokens</li><li>mean: 11.6 tokens</li><li>max: 21 tokens</li></ul> | <ul><li>min: 13 tokens</li><li>mean: 57.74 tokens</li><li>max: 127 tokens</li></ul> |
* Samples:
| sentence1 | sentence2 |
|:---------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| <code>is toprol xl the same as metoprolol?</code> | <code>Metoprolol succinate is also known by the brand name Toprol XL. It is the extended-release form of metoprolol. Metoprolol succinate is approved to treat high blood pressure, chronic chest pain, and congestive heart failure.</code> |
| <code>are you experienced cd steve hoffman?</code> | <code>The Are You Experienced album was apparently mastered from the original stereo UK master tapes (according to Steve Hoffman - one of the very few who has heard both the master tapes and the CDs produced over the years). ... The CD booklets were a little sparse, but at least they stayed true to the album's original design.</code> |
| <code>how are babushka dolls made?</code> | <code>Matryoshka dolls are made of wood from lime, balsa, alder, aspen, and birch trees; lime is probably the most common wood type. ... After cutting, the trees are stripped of most of their bark, although a few inner rings of bark are left to bind the wood and keep it from splitting.</code> |
* Loss: [<code>AdaptiveLayerLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#adaptivelayerloss) with these parameters:
```json
{
"loss": "GISTEmbedLoss",
"n_layers_per_step": -1,
"last_layer_weight": 1.5,
"prior_layers_weight": 0.15,
"kl_div_weight": 2,
"kl_temperature": 2
}
```
### Evaluation Datasets
#### nli-pairs
* Dataset: [nli-pairs](https://huggingface.co/datasets/sentence-transformers/all-nli) at [d482672](https://huggingface.co/datasets/sentence-transformers/all-nli/tree/d482672c8e74ce18da116f430137434ba2e52fab)
* Size: 750 evaluation samples
* Columns: <code>anchor</code> and <code>positive</code>
* Approximate statistics based on the first 1000 samples:
| | anchor | positive |
|:--------|:----------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|
| type | string | string |
| details | <ul><li>min: 5 tokens</li><li>mean: 17.61 tokens</li><li>max: 51 tokens</li></ul> | <ul><li>min: 4 tokens</li><li>mean: 9.71 tokens</li><li>max: 29 tokens</li></ul> |
* Samples:
| anchor | positive |
|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------|
| <code>Two women are embracing while holding to go packages.</code> | <code>Two woman are holding packages.</code> |
| <code>Two young children in blue jerseys, one with the number 9 and one with the number 2 are standing on wooden steps in a bathroom and washing their hands in a sink.</code> | <code>Two kids in numbered jerseys wash their hands.</code> |
| <code>A man selling donuts to a customer during a world exhibition event held in the city of Angeles</code> | <code>A man selling donuts to a customer.</code> |
* Loss: [<code>AdaptiveLayerLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#adaptivelayerloss) with these parameters:
```json
{
"loss": "GISTEmbedLoss",
"n_layers_per_step": -1,
"last_layer_weight": 1.5,
"prior_layers_weight": 0.15,
"kl_div_weight": 2,
"kl_temperature": 2
}
```
#### scitail-pairs-pos
* Dataset: [scitail-pairs-pos](https://huggingface.co/datasets/allenai/scitail) at [0cc4353](https://huggingface.co/datasets/allenai/scitail/tree/0cc4353235b289165dfde1c7c5d1be983f99ce44)
* Size: 750 evaluation samples
* Columns: <code>sentence1</code>, <code>sentence2</code>, and <code>label</code>
* Approximate statistics based on the first 1000 samples:
| | sentence1 | sentence2 | label |
|:--------|:----------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|:------------------------------------------------|
| type | string | string | int |
| details | <ul><li>min: 5 tokens</li><li>mean: 22.43 tokens</li><li>max: 61 tokens</li></ul> | <ul><li>min: 8 tokens</li><li>mean: 15.3 tokens</li><li>max: 36 tokens</li></ul> | <ul><li>0: ~50.00%</li><li>1: ~50.00%</li></ul> |
* Samples:
| sentence1 | sentence2 | label |
|:----------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------|:---------------|
| <code>An introduction to atoms and elements, compounds, atomic structure and bonding, the molecule and chemical reactions.</code> | <code>Replace another in a molecule happens to atoms during a substitution reaction.</code> | <code>0</code> |
| <code>Wavelength The distance between two consecutive points on a sinusoidal wave that are in phase;</code> | <code>Wavelength is the distance between two corresponding points of adjacent waves called.</code> | <code>1</code> |
| <code>humans normally have 23 pairs of chromosomes.</code> | <code>Humans typically have 23 pairs pairs of chromosomes.</code> | <code>1</code> |
* Loss: [<code>AdaptiveLayerLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#adaptivelayerloss) with these parameters:
```json
{
"loss": "GISTEmbedLoss",
"n_layers_per_step": -1,
"last_layer_weight": 1.5,
"prior_layers_weight": 0.15,
"kl_div_weight": 2,
"kl_temperature": 2
}
```
#### qnli-contrastive
* Dataset: [qnli-contrastive](https://huggingface.co/datasets/nyu-mll/glue) at [bcdcba7](https://huggingface.co/datasets/nyu-mll/glue/tree/bcdcba79d07bc864c1c254ccfcedcce55bcc9a8c)
* Size: 750 evaluation samples
* Columns: <code>sentence1</code>, <code>sentence2</code>, and <code>label</code>
* Approximate statistics based on the first 1000 samples:
| | sentence1 | sentence2 | label |
|:--------|:----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|:-----------------------------|
| type | string | string | int |
| details | <ul><li>min: 6 tokens</li><li>mean: 14.15 tokens</li><li>max: 36 tokens</li></ul> | <ul><li>min: 4 tokens</li><li>mean: 36.98 tokens</li><li>max: 225 tokens</li></ul> | <ul><li>0: 100.00%</li></ul> |
* Samples:
| sentence1 | sentence2 | label |
|:--------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------|:---------------|
| <code>What came into force after the new constitution was herald?</code> | <code>As of that day, the new constitution heralding the Second Republic came into force.</code> | <code>0</code> |
| <code>What is the first major city in the stream of the Rhine?</code> | <code>The most important tributaries in this area are the Ill below of Strasbourg, the Neckar in Mannheim and the Main across from Mainz.</code> | <code>0</code> |
| <code>What is the minimum required if you want to teach in Canada?</code> | <code>In most provinces a second Bachelor's Degree such as a Bachelor of Education is required to become a qualified teacher.</code> | <code>0</code> |
* Loss: [<code>AdaptiveLayerLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#adaptivelayerloss) with these parameters:
```json
{
"loss": "OnlineContrastiveLoss",
"n_layers_per_step": -1,
"last_layer_weight": 1.5,
"prior_layers_weight": 0.15,
"kl_div_weight": 2,
"kl_temperature": 2
}
```
### Training Hyperparameters
#### Non-Default Hyperparameters
- `eval_strategy`: steps
- `per_device_train_batch_size`: 28
- `per_device_eval_batch_size`: 18
- `learning_rate`: 2e-05
- `weight_decay`: 5e-07
- `num_train_epochs`: 4
- `lr_scheduler_type`: cosine_with_restarts
- `lr_scheduler_kwargs`: {'num_cycles': 5}
- `warmup_ratio`: 0.25
- `save_safetensors`: False
- `fp16`: True
- `push_to_hub`: True
- `hub_model_id`: bobox/DeBERTaV3-small-SenTra-AdaptiveLayers-AllSoft-HighTemp-n
- `hub_strategy`: checkpoint
- `batch_sampler`: no_duplicates
#### All Hyperparameters
<details><summary>Click to expand</summary>
- `overwrite_output_dir`: False
- `do_predict`: False
- `eval_strategy`: steps
- `prediction_loss_only`: True
- `per_device_train_batch_size`: 28
- `per_device_eval_batch_size`: 18
- `per_gpu_train_batch_size`: None
- `per_gpu_eval_batch_size`: None
- `gradient_accumulation_steps`: 1
- `eval_accumulation_steps`: None
- `learning_rate`: 2e-05
- `weight_decay`: 5e-07
- `adam_beta1`: 0.9
- `adam_beta2`: 0.999
- `adam_epsilon`: 1e-08
- `max_grad_norm`: 1.0
- `num_train_epochs`: 4
- `max_steps`: -1
- `lr_scheduler_type`: cosine_with_restarts
- `lr_scheduler_kwargs`: {'num_cycles': 5}
- `warmup_ratio`: 0.25
- `warmup_steps`: 0
- `log_level`: passive
- `log_level_replica`: warning
- `log_on_each_node`: True
- `logging_nan_inf_filter`: True
- `save_safetensors`: False
- `save_on_each_node`: False
- `save_only_model`: False
- `restore_callback_states_from_checkpoint`: False
- `no_cuda`: False
- `use_cpu`: False
- `use_mps_device`: False
- `seed`: 42
- `data_seed`: None
- `jit_mode_eval`: False
- `use_ipex`: False
- `bf16`: False
- `fp16`: True
- `fp16_opt_level`: O1
- `half_precision_backend`: auto
- `bf16_full_eval`: False
- `fp16_full_eval`: False
- `tf32`: None
- `local_rank`: 0
- `ddp_backend`: None
- `tpu_num_cores`: None
- `tpu_metrics_debug`: False
- `debug`: []
- `dataloader_drop_last`: False
- `dataloader_num_workers`: 0
- `dataloader_prefetch_factor`: None
- `past_index`: -1
- `disable_tqdm`: False
- `remove_unused_columns`: True
- `label_names`: None
- `load_best_model_at_end`: False
- `ignore_data_skip`: False
- `fsdp`: []
- `fsdp_min_num_params`: 0
- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
- `fsdp_transformer_layer_cls_to_wrap`: None
- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
- `deepspeed`: None
- `label_smoothing_factor`: 0.0
- `optim`: adamw_torch
- `optim_args`: None
- `adafactor`: False
- `group_by_length`: False
- `length_column_name`: length
- `ddp_find_unused_parameters`: None
- `ddp_bucket_cap_mb`: None
- `ddp_broadcast_buffers`: False
- `dataloader_pin_memory`: True
- `dataloader_persistent_workers`: False
- `skip_memory_metrics`: True
- `use_legacy_prediction_loop`: False
- `push_to_hub`: True
- `resume_from_checkpoint`: None
- `hub_model_id`: bobox/DeBERTaV3-small-SenTra-AdaptiveLayers-AllSoft-HighTemp-n
- `hub_strategy`: checkpoint
- `hub_private_repo`: False
- `hub_always_push`: False
- `gradient_checkpointing`: False
- `gradient_checkpointing_kwargs`: None
- `include_inputs_for_metrics`: False
- `eval_do_concat_batches`: True
- `fp16_backend`: auto
- `push_to_hub_model_id`: None
- `push_to_hub_organization`: None
- `mp_parameters`:
- `auto_find_batch_size`: False
- `full_determinism`: False
- `torchdynamo`: None
- `ray_scope`: last
- `ddp_timeout`: 1800
- `torch_compile`: False
- `torch_compile_backend`: None
- `torch_compile_mode`: None
- `dispatch_batches`: None
- `split_batches`: None
- `include_tokens_per_second`: False
- `include_num_input_tokens_seen`: False
- `neftune_noise_alpha`: None
- `optim_target_modules`: None
- `batch_eval_metrics`: False
- `batch_sampler`: no_duplicates
- `multi_dataset_batch_sampler`: proportional
</details>
### Training Logs
| Epoch | Step | Training Loss | scitail-pairs-pos loss | qnli-contrastive loss | nli-pairs loss | sts-test_spearman_cosine |
|:------:|:-----:|:-------------:|:----------------------:|:---------------------:|:--------------:|:------------------------:|
| 0.1003 | 281 | 8.4339 | - | - | - | - |
| 0.2006 | 562 | 6.8644 | - | - | - | - |
| 0.3009 | 843 | 5.1225 | - | - | - | - |
| 0.4001 | 1121 | - | 2.4070 | 4.2827 | 3.6032 | - |
| 0.4011 | 1124 | 3.9997 | - | - | - | - |
| 0.5014 | 1405 | 3.6186 | - | - | - | - |
| 0.6017 | 1686 | 3.259 | - | - | - | - |
| 0.7020 | 1967 | 3.1712 | - | - | - | - |
| 0.8001 | 2242 | - | 1.6090 | 2.5195 | 2.2851 | - |
| 0.8023 | 2248 | 3.104 | - | - | - | - |
| 0.9026 | 2529 | 2.8549 | - | - | - | - |
| 1.0029 | 2810 | 2.8668 | - | - | - | - |
| 1.1031 | 3091 | 2.7466 | - | - | - | - |
| 1.2002 | 3363 | - | 1.3474 | 2.2222 | 1.8491 | - |
| 1.2034 | 3372 | 2.6502 | - | - | - | - |
| 1.3037 | 3653 | 2.2191 | - | - | - | - |
| 1.4040 | 3934 | 2.2311 | - | - | - | - |
| 1.5043 | 4215 | 2.22 | - | - | - | - |
| 1.6003 | 4484 | - | 1.2671 | 1.7964 | 1.6444 | - |
| 1.6046 | 4496 | 2.1372 | - | - | - | - |
| 1.7049 | 4777 | 2.2219 | - | - | - | - |
| 1.8051 | 5058 | 2.2618 | - | - | - | - |
| 1.9054 | 5339 | 1.9995 | - | - | - | - |
| 2.0004 | 5605 | - | 1.2434 | 1.8182 | 1.5385 | - |
| 2.0057 | 5620 | 1.9757 | - | - | - | - |
| 2.1060 | 5901 | 2.0401 | - | - | - | - |
| 2.2063 | 6182 | 1.9818 | - | - | - | - |
| 2.3066 | 6463 | 1.7816 | - | - | - | - |
| 2.4004 | 6726 | - | 1.0396 | 1.5587 | 1.5077 | - |
| 2.4069 | 6744 | 1.9239 | - | - | - | - |
| 2.5071 | 7025 | 2.0148 | - | - | - | - |
| 2.6074 | 7306 | 1.9629 | - | - | - | - |
| 2.7077 | 7587 | 1.7316 | - | - | - | - |
| 2.8005 | 7847 | - | 1.0507 | 1.3294 | 1.4039 | - |
| 2.8080 | 7868 | 1.7794 | - | - | - | - |
| 2.9083 | 8149 | 1.7029 | - | - | - | - |
| 3.0086 | 8430 | 1.7996 | - | - | - | - |
| 3.1089 | 8711 | 1.9379 | - | - | - | - |
| 3.2006 | 8968 | - | 0.9949 | 1.3678 | 1.3436 | - |
| 3.2091 | 8992 | 1.844 | - | - | - | - |
| 3.3094 | 9273 | 1.358 | - | - | - | - |
| 3.4097 | 9554 | 1.5104 | - | - | - | - |
| 3.5100 | 9835 | 1.6964 | - | - | - | - |
| 3.6006 | 10089 | - | 0.9538 | 1.1866 | 1.3098 | - |
| 3.6103 | 10116 | 1.7661 | - | - | - | - |
| 3.7106 | 10397 | 1.6529 | - | - | - | - |
| 3.8108 | 10678 | 1.6835 | - | - | - | - |
| 3.9111 | 10959 | 1.35 | - | - | - | - |
| 4.0 | 11208 | - | - | - | - | 0.5551 |
### Framework Versions
- Python: 3.10.13
- Sentence Transformers: 3.0.1
- Transformers: 4.41.2
- PyTorch: 2.1.2
- Accelerate: 0.30.1
- Datasets: 2.19.2
- Tokenizers: 0.19.1
## Citation
### BibTeX
#### Sentence Transformers
```bibtex
@inproceedings{reimers-2019-sentence-bert,
title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2019",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/1908.10084",
}
```
#### AdaptiveLayerLoss
```bibtex
@misc{li20242d,
title={2D Matryoshka Sentence Embeddings},
author={Xianming Li and Zongxi Li and Jing Li and Haoran Xie and Qing Li},
year={2024},
eprint={2402.14776},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
#### CoSENTLoss
```bibtex
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
```
#### GISTEmbedLoss
```bibtex
@misc{solatorio2024gistembed,
title={GISTEmbed: Guided In-sample Selection of Training Negatives for Text Embedding Fine-tuning},
author={Aivin V. Solatorio},
year={2024},
eprint={2402.16829},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
```
<!--
## Glossary
*Clearly define terms in order to be accessible across audiences.*
-->
<!--
## Model Card Authors
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
-->
<!--
## Model Card Contact
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
--> |