Bram Vanroy
commited on
Commit
•
107c2a4
1
Parent(s):
2686c5b
always lower case shortname
Browse files- app.py +49 -0
- evals/models.json +0 -144
- generate_overview_json.py +1 -1
app.py
CHANGED
@@ -1,5 +1,7 @@
|
|
1 |
import json
|
2 |
from collections import defaultdict
|
|
|
|
|
3 |
from pathlib import Path
|
4 |
|
5 |
import numpy as np
|
@@ -18,6 +20,51 @@ BENCHMARKS = [ARC, HELLASWAG, MMLU, TRUTHFULQA]
|
|
18 |
|
19 |
METRICS = ["acc_norm", "acc_norm", "acc_norm", "mc2"]
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
def collect_results() -> dict[tuple[str, str], dict[str, float]]:
|
23 |
"""
|
@@ -104,6 +151,8 @@ HELLASWAG_COL = "HellaSwag (10-shot)️"
|
|
104 |
MMLU_COL = "MMLU (5-shot)"
|
105 |
TRUTHFULQA_COL = "TruthfulQA (0-shot)"
|
106 |
TRAIN_TYPE_COL = "Training type"
|
|
|
|
|
107 |
|
108 |
COLS = [MODEL_COL, TRAIN_TYPE_COL, AVERAGE_COL, ARC_COL, HELLASWAG_COL, MMLU_COL, TRUTHFULQA_COL]
|
109 |
TYPES = ["str", "number", "number", "number", "number", "number"]
|
|
|
1 |
import json
|
2 |
from collections import defaultdict
|
3 |
+
from dataclasses import dataclass, field
|
4 |
+
from functools import cached_property
|
5 |
from pathlib import Path
|
6 |
|
7 |
import numpy as np
|
|
|
20 |
|
21 |
METRICS = ["acc_norm", "acc_norm", "acc_norm", "mc2"]
|
22 |
|
23 |
+
MODEL_COL = "Model"
|
24 |
+
AVERAGE_COL = "Average"
|
25 |
+
ARC_COL = "ARC (25-shot)"
|
26 |
+
HELLASWAG_COL = "HellaSwag (10-shot)️"
|
27 |
+
MMLU_COL = "MMLU (5-shot)"
|
28 |
+
TRUTHFULQA_COL = "TruthfulQA (0-shot)"
|
29 |
+
TRAIN_TYPE_COL = "Training type"
|
30 |
+
TRAIN_TYPE_COL = "Training type"
|
31 |
+
NUM_PARAMETERS = "Num. parameters"
|
32 |
+
|
33 |
+
|
34 |
+
@dataclass
|
35 |
+
class Result:
|
36 |
+
train_type: str
|
37 |
+
num_parameters: int
|
38 |
+
arc: float = field(default=0.)
|
39 |
+
hellaswag: float = field(default=0.)
|
40 |
+
mmlu: float = field(default=0.)
|
41 |
+
truthfulqa: float = field(default=0.)
|
42 |
+
|
43 |
+
@cached_property
|
44 |
+
def num_parameters_kmb(self) -> str:
|
45 |
+
return convert_number_to_kmb(self.num_parameters)
|
46 |
+
|
47 |
+
@cached_property
|
48 |
+
def average(self) -> float:
|
49 |
+
return self.arc + self.hellaswag + self.mmlu + self.truthfulqa / 4
|
50 |
+
|
51 |
+
|
52 |
+
def convert_number_to_kmb(number: int) -> str:
|
53 |
+
"""
|
54 |
+
Converts a number to a string with K, M or B suffix
|
55 |
+
:param number: the number to convert
|
56 |
+
:return: a string with the number and a suffix, e.g. "7B", rounded to one decimal
|
57 |
+
"""
|
58 |
+
if number >= 1_000_000_000:
|
59 |
+
return f"{round(number / 1_000_000_000, 1)}B"
|
60 |
+
elif number >= 1_000_000:
|
61 |
+
return f"{round(number / 1_000_000, 1)}M"
|
62 |
+
elif number >= 1_000:
|
63 |
+
return f"{round(number / 1_000, 1)}K"
|
64 |
+
else:
|
65 |
+
return str(number)
|
66 |
+
|
67 |
+
|
68 |
|
69 |
def collect_results() -> dict[tuple[str, str], dict[str, float]]:
|
70 |
"""
|
|
|
151 |
MMLU_COL = "MMLU (5-shot)"
|
152 |
TRUTHFULQA_COL = "TruthfulQA (0-shot)"
|
153 |
TRAIN_TYPE_COL = "Training type"
|
154 |
+
TRAIN_TYPE_COL = "Training type"
|
155 |
+
NUM_PARAMETERS = "Num. parameters"
|
156 |
|
157 |
COLS = [MODEL_COL, TRAIN_TYPE_COL, AVERAGE_COL, ARC_COL, HELLASWAG_COL, MMLU_COL, TRUTHFULQA_COL]
|
158 |
TYPES = ["str", "number", "number", "number", "number", "number"]
|
evals/models.json
DELETED
@@ -1,144 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"Llama-2-13b-chat-dutch": {
|
3 |
-
"compute_dtype": "bfloat16",
|
4 |
-
"model_name": "BramVanroy/Llama-2-13b-chat-dutch",
|
5 |
-
"num_parameters": 13015864320,
|
6 |
-
"quantization": "8-bit"
|
7 |
-
},
|
8 |
-
"Llama-2-13b-chat-hf": {
|
9 |
-
"compute_dtype": "bfloat16",
|
10 |
-
"model_name": "meta-llama/Llama-2-13b-chat-hf",
|
11 |
-
"num_parameters": 13015864320,
|
12 |
-
"quantization": "8-bit"
|
13 |
-
},
|
14 |
-
"Llama-2-13b-hf": {
|
15 |
-
"compute_dtype": "bfloat16",
|
16 |
-
"model_name": "meta-llama/Llama-2-13b-hf",
|
17 |
-
"num_parameters": 13015864320,
|
18 |
-
"quantization": "8-bit"
|
19 |
-
},
|
20 |
-
"Llama-2-7b-chat-hf": {
|
21 |
-
"compute_dtype": "bfloat16",
|
22 |
-
"model_name": "meta-llama/Llama-2-7b-chat-hf",
|
23 |
-
"num_parameters": 6738415616,
|
24 |
-
"quantization": "8-bit"
|
25 |
-
},
|
26 |
-
"Llama-2-7b-hf": {
|
27 |
-
"compute_dtype": "bfloat16",
|
28 |
-
"model_name": "meta-llama/Llama-2-7b-hf",
|
29 |
-
"num_parameters": 6738415616,
|
30 |
-
"quantization": "8-bit"
|
31 |
-
},
|
32 |
-
"Mistral-7B-v0.1": {
|
33 |
-
"compute_dtype": "bfloat16",
|
34 |
-
"model_name": "mistralai/Mistral-7B-v0.1",
|
35 |
-
"num_parameters": 7241732096,
|
36 |
-
"quantization": "8-bit"
|
37 |
-
},
|
38 |
-
"Orca-2-13b": {
|
39 |
-
"compute_dtype": "bfloat16",
|
40 |
-
"model_name": "microsoft/Orca-2-13b",
|
41 |
-
"num_parameters": 13015895040,
|
42 |
-
"quantization": "8-bit"
|
43 |
-
},
|
44 |
-
"Orca-2-7b": {
|
45 |
-
"compute_dtype": "bfloat16",
|
46 |
-
"model_name": "microsoft/Orca-2-7b",
|
47 |
-
"num_parameters": 6738440192,
|
48 |
-
"quantization": "8-bit"
|
49 |
-
},
|
50 |
-
"bloom-7b1": {
|
51 |
-
"args": "pretrained=bigscience/bloom-7b1",
|
52 |
-
"model_name": "pretrained=bigscience/bloom-7b1"
|
53 |
-
},
|
54 |
-
"gpt-neo-1.3B-dutch": {
|
55 |
-
"compute_dtype": "bfloat16",
|
56 |
-
"model_name": "yhavinga/gpt-neo-1.3B-dutch",
|
57 |
-
"num_parameters": 1315575808,
|
58 |
-
"quantization": "8-bit"
|
59 |
-
},
|
60 |
-
"gpt-neo-1.3b-dutch": {
|
61 |
-
"args": "use_accelerate=True,device_map_option=auto,dtype=bfloat16,load_in_8bit=True",
|
62 |
-
"model_name": "yhavinga/gpt-neo-1.3B-dutch"
|
63 |
-
},
|
64 |
-
"gpt-neo-125M-dutch": {
|
65 |
-
"compute_dtype": "bfloat16",
|
66 |
-
"model_name": "yhavinga/gpt-neo-125M-dutch",
|
67 |
-
"num_parameters": 125198592,
|
68 |
-
"quantization": "8-bit"
|
69 |
-
},
|
70 |
-
"gpt-neo-125m-dutch": {
|
71 |
-
"args": "use_accelerate=True,device_map_option=auto,dtype=bfloat16,load_in_8bit=True",
|
72 |
-
"model_name": "yhavinga/gpt-neo-125M-dutch"
|
73 |
-
},
|
74 |
-
"gpt2-large-dutch": {
|
75 |
-
"compute_dtype": "bfloat16",
|
76 |
-
"model_name": "yhavinga/gpt2-large-dutch",
|
77 |
-
"num_parameters": 774030080,
|
78 |
-
"quantization": "8-bit"
|
79 |
-
},
|
80 |
-
"gpt2-medium-dutch": {
|
81 |
-
"compute_dtype": "bfloat16",
|
82 |
-
"model_name": "yhavinga/gpt2-medium-dutch",
|
83 |
-
"num_parameters": 354823168,
|
84 |
-
"quantization": "8-bit"
|
85 |
-
},
|
86 |
-
"llama-2-13b-chat-dutch": {
|
87 |
-
"args": "use_accelerate=True,device_map_option=auto,dtype=bfloat16,load_in_8bit=True",
|
88 |
-
"model_name": "BramVanroy/Llama-2-13b-chat-dutch"
|
89 |
-
},
|
90 |
-
"llama-2-13b-chat-hf": {
|
91 |
-
"args": "use_accelerate=True,device_map_option=auto,dtype=bfloat16,load_in_8bit=True",
|
92 |
-
"model_name": "meta-llama/Llama-2-13b-chat-hf"
|
93 |
-
},
|
94 |
-
"llama-2-13b-hf": {
|
95 |
-
"args": "use_accelerate=True,device_map_option=auto,dtype=bfloat16,load_in_8bit=True",
|
96 |
-
"model_name": "meta-llama/Llama-2-13b-hf"
|
97 |
-
},
|
98 |
-
"llama-2-7b-chat-hf": {
|
99 |
-
"args": "use_accelerate=True,device_map_option=auto,dtype=bfloat16,load_in_8bit=True",
|
100 |
-
"model_name": "meta-llama/Llama-2-7b-chat-hf"
|
101 |
-
},
|
102 |
-
"llama-2-7b-hf": {
|
103 |
-
"args": "use_accelerate=True,device_map_option=auto,dtype=bfloat16,load_in_8bit=True",
|
104 |
-
"model_name": "meta-llama/Llama-2-7b-hf"
|
105 |
-
},
|
106 |
-
"llama-7b": {
|
107 |
-
"args": "pretrained=/sensei-fs/users/daclai/uoChatGPT/llama-7B",
|
108 |
-
"model_name": "pretrained=/sensei-fs/users/daclai/uoChatGPT/llama-7B"
|
109 |
-
},
|
110 |
-
"llama2-13b-ft-mc4": {
|
111 |
-
"compute_dtype": "bfloat16",
|
112 |
-
"model_name": "BramVanroy/llama2-13b-ft-mc4_nl_cleaned_tiny",
|
113 |
-
"num_parameters": 13015864320,
|
114 |
-
"quantization": "8-bit"
|
115 |
-
},
|
116 |
-
"llama2-13b-ft-mc4_nl_cleaned_tiny": {
|
117 |
-
"args": "use_accelerate=True,device_map_option=auto,dtype=bfloat16,load_in_8bit=True",
|
118 |
-
"model_name": "BramVanroy/llama2-13b-ft-mc4_nl_cleaned_tiny"
|
119 |
-
},
|
120 |
-
"mistral-7b-v0.1": {
|
121 |
-
"args": "use_accelerate=True,device_map_option=auto,dtype=bfloat16,load_in_8bit=True",
|
122 |
-
"model_name": "mistralai/Mistral-7B-v0.1"
|
123 |
-
},
|
124 |
-
"neural-chat-7b-v3-1": {
|
125 |
-
"compute_dtype": "bfloat16",
|
126 |
-
"model_name": "Intel/neural-chat-7b-v3-1",
|
127 |
-
"num_parameters": 7241732096,
|
128 |
-
"quantization": "8-bit"
|
129 |
-
},
|
130 |
-
"orca-2-13b": {
|
131 |
-
"args": "use_accelerate=True,device_map_option=auto,dtype=bfloat16,load_in_8bit=True",
|
132 |
-
"model_name": "microsoft/Orca-2-13b"
|
133 |
-
},
|
134 |
-
"orca-2-7b": {
|
135 |
-
"args": "use_accelerate=True,device_map_option=auto,dtype=bfloat16,load_in_8bit=True",
|
136 |
-
"model_name": "microsoft/Orca-2-7b"
|
137 |
-
},
|
138 |
-
"zephyr-7b-beta": {
|
139 |
-
"compute_dtype": "bfloat16",
|
140 |
-
"model_name": "HuggingFaceH4/zephyr-7b-beta",
|
141 |
-
"num_parameters": 7241732096,
|
142 |
-
"quantization": "8-bit"
|
143 |
-
}
|
144 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
generate_overview_json.py
CHANGED
@@ -16,7 +16,7 @@ def main():
|
|
16 |
for pfin in evals_dir.rglob("*.json"):
|
17 |
if pfin.stem == "models":
|
18 |
continue
|
19 |
-
short_name = pfin.stem.split("_")[2]
|
20 |
|
21 |
data = json.loads(pfin.read_text(encoding="utf-8"))
|
22 |
if "config" not in data:
|
|
|
16 |
for pfin in evals_dir.rglob("*.json"):
|
17 |
if pfin.stem == "models":
|
18 |
continue
|
19 |
+
short_name = pfin.stem.split("_")[2].lower()
|
20 |
|
21 |
data = json.loads(pfin.read_text(encoding="utf-8"))
|
22 |
if "config" not in data:
|