Spaces:
Restarting
on
CPU Upgrade
Restarting
on
CPU Upgrade
edbeeching
commited on
Commit
•
db6f218
1
Parent(s):
5cb1426
added public option
Browse files
app.py
CHANGED
@@ -8,11 +8,12 @@ import json
|
|
8 |
from apscheduler.schedulers.background import BackgroundScheduler
|
9 |
import pandas as pd
|
10 |
import datetime
|
11 |
-
from utils import get_eval_results_dicts, make_clickable_model
|
12 |
|
13 |
# clone / pull the lmeh eval data
|
14 |
H4_TOKEN = os.environ.get("H4_TOKEN", None)
|
15 |
LMEH_REPO = "HuggingFaceH4/lmeh_evaluations"
|
|
|
16 |
|
17 |
repo=None
|
18 |
if H4_TOKEN:
|
@@ -46,8 +47,12 @@ def load_results(model, benchmark, metric):
|
|
46 |
return mean_acc, data["config"]["model_args"]
|
47 |
|
48 |
|
49 |
-
COLS = ["base_model", "revision", "
|
50 |
-
TYPES = ["markdown","str", "
|
|
|
|
|
|
|
|
|
51 |
|
52 |
EVAL_COLS = ["model", "revision", "private", "8bit_eval", "is_delta_weight", "status"]
|
53 |
EVAL_TYPES = ["markdown","str", "bool", "bool", "bool", "str"]
|
@@ -56,7 +61,31 @@ def get_leaderboard():
|
|
56 |
print("pulling changes")
|
57 |
repo.git_pull()
|
58 |
|
59 |
-
all_data = get_eval_results_dicts()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
dataframe = pd.DataFrame.from_records(all_data)
|
61 |
dataframe = dataframe.sort_values(by=['total ⬆️'], ascending=False)
|
62 |
print(dataframe)
|
@@ -77,7 +106,7 @@ def get_eval_table():
|
|
77 |
with open(file_path) as fp:
|
78 |
data = json.load(fp)
|
79 |
|
80 |
-
data["# params"] =
|
81 |
data["model"] = make_clickable_model(data["model"])
|
82 |
data["revision"] = data.get("revision", "main")
|
83 |
|
@@ -171,8 +200,16 @@ block = gr.Blocks()
|
|
171 |
with block:
|
172 |
with gr.Row():
|
173 |
gr.Markdown(f"""
|
174 |
-
|
175 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
176 |
""")
|
177 |
|
178 |
with gr.Row():
|
@@ -186,10 +223,10 @@ with block:
|
|
186 |
# Evaluation Queue for the LMEH benchmarks, these models will be automatically evaluated on the 🤗 cluster
|
187 |
|
188 |
""")
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
|
194 |
with gr.Row():
|
195 |
refresh_button = gr.Button("Refresh")
|
@@ -202,11 +239,12 @@ with block:
|
|
202 |
with gr.Column():
|
203 |
model_name_textbox = gr.Textbox(label="Model name")
|
204 |
revision_name_textbox = gr.Textbox(label="revision", placeholder="main")
|
205 |
-
|
206 |
with gr.Column():
|
207 |
-
is_8bit_toggle = gr.Checkbox(False, label="8 bit eval")
|
208 |
-
private = gr.Checkbox(False, label="Private")
|
209 |
is_delta_weight = gr.Checkbox(False, label="Delta weights")
|
|
|
210 |
|
211 |
with gr.Row():
|
212 |
submit_button = gr.Button("Submit Eval")
|
@@ -220,7 +258,8 @@ with block:
|
|
220 |
print("adding refresh leaderboard")
|
221 |
def refresh_leaderboard():
|
222 |
leaderboard_table = get_leaderboard()
|
223 |
-
|
|
|
224 |
|
225 |
scheduler = BackgroundScheduler()
|
226 |
scheduler.add_job(func=refresh_leaderboard, trigger="interval", seconds=300) # refresh every 5 mins
|
|
|
8 |
from apscheduler.schedulers.background import BackgroundScheduler
|
9 |
import pandas as pd
|
10 |
import datetime
|
11 |
+
from utils import get_eval_results_dicts, make_clickable_model
|
12 |
|
13 |
# clone / pull the lmeh eval data
|
14 |
H4_TOKEN = os.environ.get("H4_TOKEN", None)
|
15 |
LMEH_REPO = "HuggingFaceH4/lmeh_evaluations"
|
16 |
+
IS_PUBLIC = bool(True) # add secret here
|
17 |
|
18 |
repo=None
|
19 |
if H4_TOKEN:
|
|
|
47 |
return mean_acc, data["config"]["model_args"]
|
48 |
|
49 |
|
50 |
+
COLS = ["base_model", "revision", "total ⬆️", "ARC (25-shot) ⬆️", "HellaSwag (10-shot) ⬆️", "MMLU (5-shot) ⬆️", "TruthQA (0-shot) ⬆️"]
|
51 |
+
TYPES = ["markdown","str", "number", "number", "number", "number", "number", ]
|
52 |
+
|
53 |
+
if not IS_PUBLIC:
|
54 |
+
COLS.insert(2, "8bit")
|
55 |
+
TYPES.insert(2, "bool")
|
56 |
|
57 |
EVAL_COLS = ["model", "revision", "private", "8bit_eval", "is_delta_weight", "status"]
|
58 |
EVAL_TYPES = ["markdown","str", "bool", "bool", "bool", "str"]
|
|
|
61 |
print("pulling changes")
|
62 |
repo.git_pull()
|
63 |
|
64 |
+
all_data = get_eval_results_dicts(IS_PUBLIC)
|
65 |
+
|
66 |
+
gpt4_values = {
|
67 |
+
"base_model":f'<a target="_blank" href=https://arxiv.org/abs/2303.08774 style="color: blue; text-decoration: underline;text-decoration-style: dotted;">gpt4</a>',
|
68 |
+
"revision":"tech report",
|
69 |
+
"8bit":None,
|
70 |
+
"total ⬆️":84.3,
|
71 |
+
"ARC (25-shot) ⬆️":96.3,
|
72 |
+
"HellaSwag (10-shot) ⬆️":95.3,
|
73 |
+
"MMLU (5-shot) ⬆️":86.4,
|
74 |
+
"TruthQA (0-shot) ⬆️":59.0,
|
75 |
+
}
|
76 |
+
all_data.append(gpt4_values)
|
77 |
+
gpt35_values = {
|
78 |
+
"base_model":f'<a target="_blank" href=https://arxiv.org/abs/2303.08774 style="color: blue; text-decoration: underline;text-decoration-style: dotted;">gpt3.5</a>',
|
79 |
+
"revision":"tech report",
|
80 |
+
"8bit":None,
|
81 |
+
"total ⬆️":71.9,
|
82 |
+
"ARC (25-shot) ⬆️":85.2,
|
83 |
+
"HellaSwag (10-shot) ⬆️":85.5,
|
84 |
+
"MMLU (5-shot) ⬆️":70.0,
|
85 |
+
"TruthQA (0-shot) ⬆️":47.0,
|
86 |
+
}
|
87 |
+
all_data.append(gpt35_values)
|
88 |
+
|
89 |
dataframe = pd.DataFrame.from_records(all_data)
|
90 |
dataframe = dataframe.sort_values(by=['total ⬆️'], ascending=False)
|
91 |
print(dataframe)
|
|
|
106 |
with open(file_path) as fp:
|
107 |
data = json.load(fp)
|
108 |
|
109 |
+
data["# params"] = "unknown"
|
110 |
data["model"] = make_clickable_model(data["model"])
|
111 |
data["revision"] = data.get("revision", "main")
|
112 |
|
|
|
200 |
with block:
|
201 |
with gr.Row():
|
202 |
gr.Markdown(f"""
|
203 |
+
# 🤗 Open Chatbot Leaderboard
|
204 |
+
<font size="4">With the plethora of chatbot LLMs being released week upon week, often with grandiose claims of their performance, it can be hard to filter out the genuine progress that is being made by the open-source community and which chatbot is the current state of the art. The 🤗 Open Chatbot Leaderboard aims to track, rank and evaluate chatbot models as they are released. We evaluate models of 4 key benchmarks from the <a href="https://github.com/EleutherAI/lm-evaluation-harness" target="_blank"> Eleuther AI Language Model Evaluation Harness </a>, a unified framework to test generative language models on a large number of different evaluation tasks. A key advantage of this leaderboard is that anyone from the community can submit a model for automated evaluation on the 🤗 research cluster. As long as it is Transformers model with weights on the 🤗 hub. We also support delta-weights for non-commercial licensed models, such as llama.
|
205 |
+
<p>
|
206 |
+
Evaluation is performed against 4 popular benchmarks:
|
207 |
+
- <a href="https://arxiv.org/abs/1803.05457" target="_blank"> AI2 Reasoning Challenge </a> (25-shot) - a set of grade-school science questions.
|
208 |
+
- <a href="https://arxiv.org/abs/1905.07830" target="_blank"> HellaSwag </a> (10-shot) - a test of commonsense inference, which is easy for humans (~95%) but challenging for SOTA models.
|
209 |
+
- <a href="https://arxiv.org/abs/2009.03300" target="_blank"> MMLU </a> (5-shot) - a test to measure a text model's multitask accuracy. The test covers 57 tasks including elementary mathematics, US history, computer science, law, and more.
|
210 |
+
- <a href="https://arxiv.org/abs/2109.07958" target="_blank"> Truthful QA MC </a> (0-shot) - a benchmark to measure whether a language model is truthful in generating answers to questions.
|
211 |
+
<p>
|
212 |
+
We chose these benchmarks as they test a variety of reasoning and general knowledge across a wide variety of fields in 0-shot and few-shot settings. </font>
|
213 |
""")
|
214 |
|
215 |
with gr.Row():
|
|
|
223 |
# Evaluation Queue for the LMEH benchmarks, these models will be automatically evaluated on the 🤗 cluster
|
224 |
|
225 |
""")
|
226 |
+
with gr.Accordion("Evaluation Queue", open=False):
|
227 |
+
with gr.Row():
|
228 |
+
eval_table = gr.components.Dataframe(value=eval_queue, headers=EVAL_COLS,
|
229 |
+
datatype=EVAL_TYPES, max_rows=5)
|
230 |
|
231 |
with gr.Row():
|
232 |
refresh_button = gr.Button("Refresh")
|
|
|
239 |
with gr.Column():
|
240 |
model_name_textbox = gr.Textbox(label="Model name")
|
241 |
revision_name_textbox = gr.Textbox(label="revision", placeholder="main")
|
242 |
+
|
243 |
with gr.Column():
|
244 |
+
is_8bit_toggle = gr.Checkbox(False, label="8 bit eval", visible=not IS_PUBLIC)
|
245 |
+
private = gr.Checkbox(False, label="Private", visible=not IS_PUBLIC)
|
246 |
is_delta_weight = gr.Checkbox(False, label="Delta weights")
|
247 |
+
base_model_name_textbox = gr.Textbox(label="base model (for delta)")
|
248 |
|
249 |
with gr.Row():
|
250 |
submit_button = gr.Button("Submit Eval")
|
|
|
258 |
print("adding refresh leaderboard")
|
259 |
def refresh_leaderboard():
|
260 |
leaderboard_table = get_leaderboard()
|
261 |
+
eval_table = get_eval_table()
|
262 |
+
print("refreshing leaderboard")
|
263 |
|
264 |
scheduler = BackgroundScheduler()
|
265 |
scheduler.add_job(func=refresh_leaderboard, trigger="interval", seconds=300) # refresh every 5 mins
|
utils.py
CHANGED
@@ -23,28 +23,13 @@ BENCH_TO_NAME = {
|
|
23 |
"hendrycks":"MMLU (5-shot) ⬆️",
|
24 |
"truthfulqa_mc":"TruthQA (0-shot) ⬆️",
|
25 |
}
|
26 |
-
def make_clickable_model(model_name):
|
27 |
# remove user from model name
|
28 |
#model_name_show = ' '.join(model_name.split('/')[1:])
|
29 |
|
30 |
link = "https://huggingface.co/" + model_name
|
31 |
return f'<a target="_blank" href="{link}" style="color: blue; text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
|
32 |
|
33 |
-
def get_n_params(base_model):
|
34 |
-
return "unknown"
|
35 |
-
|
36 |
-
# WARNING: High memory usage
|
37 |
-
|
38 |
-
# Retrieve the number of parameters from the configuration
|
39 |
-
try:
|
40 |
-
config = AutoConfig.from_pretrained(base_model, use_auth_token=True, low_cpu_mem_usage=True)
|
41 |
-
n_params = AutoModel.from_config(config).num_parameters()
|
42 |
-
except Exception as e:
|
43 |
-
print(f"Error:{e} The number of parameters is not available in the config for the model '{base_model}'.")
|
44 |
-
return "unknown"
|
45 |
-
|
46 |
-
return str(n_params)
|
47 |
-
|
48 |
@dataclass
|
49 |
class EvalResult:
|
50 |
eval_name : str
|
@@ -66,8 +51,8 @@ class EvalResult:
|
|
66 |
data_dict["8bit"] = self.is_8bit
|
67 |
data_dict["base_model"] = make_clickable_model(base_model)
|
68 |
data_dict["revision"] = self.revision
|
69 |
-
data_dict["total ⬆️"] = round(sum([v for k,v in self.results.items()]),
|
70 |
-
data_dict["# params"] = get_n_params(base_model)
|
71 |
|
72 |
for benchmark in BENCHMARKS:
|
73 |
if not benchmark in self.results.keys():
|
@@ -90,7 +75,7 @@ def parse_eval_result(json_filepath: str) -> Tuple[str, dict]:
|
|
90 |
model = path_split[-4]
|
91 |
is_8bit = path_split[-2] == "8bit"
|
92 |
revision = path_split[-3]
|
93 |
-
if len(path_split)==
|
94 |
# handles gpt2 type models that don't have an org
|
95 |
result_key = f"{path_split[-4]}_{path_split[-3]}_{path_split[-2]}"
|
96 |
else:
|
@@ -101,7 +86,7 @@ def parse_eval_result(json_filepath: str) -> Tuple[str, dict]:
|
|
101 |
for benchmark, metric in zip(BENCHMARKS, METRICS):
|
102 |
if benchmark in json_filepath:
|
103 |
accs = np.array([v[metric] for k, v in data["results"].items()])
|
104 |
-
mean_acc = round(np.mean(accs),
|
105 |
eval_result = EvalResult(result_key, org, model, revision, is_8bit, {benchmark:mean_acc})
|
106 |
|
107 |
return result_key, eval_result
|
@@ -109,8 +94,12 @@ def parse_eval_result(json_filepath: str) -> Tuple[str, dict]:
|
|
109 |
|
110 |
|
111 |
|
112 |
-
def get_eval_results() -> List[EvalResult]:
|
113 |
-
json_filepaths = glob.glob("evals/eval_results
|
|
|
|
|
|
|
|
|
114 |
eval_results = {}
|
115 |
|
116 |
for json_filepath in json_filepaths:
|
@@ -125,8 +114,8 @@ def get_eval_results() -> List[EvalResult]:
|
|
125 |
|
126 |
return eval_results
|
127 |
|
128 |
-
def get_eval_results_dicts() -> List[Dict]:
|
129 |
-
eval_results = get_eval_results()
|
130 |
|
131 |
return [e.to_dict() for e in eval_results]
|
132 |
|
|
|
23 |
"hendrycks":"MMLU (5-shot) ⬆️",
|
24 |
"truthfulqa_mc":"TruthQA (0-shot) ⬆️",
|
25 |
}
|
26 |
+
def make_clickable_model(model_name):
|
27 |
# remove user from model name
|
28 |
#model_name_show = ' '.join(model_name.split('/')[1:])
|
29 |
|
30 |
link = "https://huggingface.co/" + model_name
|
31 |
return f'<a target="_blank" href="{link}" style="color: blue; text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
|
32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
@dataclass
|
34 |
class EvalResult:
|
35 |
eval_name : str
|
|
|
51 |
data_dict["8bit"] = self.is_8bit
|
52 |
data_dict["base_model"] = make_clickable_model(base_model)
|
53 |
data_dict["revision"] = self.revision
|
54 |
+
data_dict["total ⬆️"] = round(sum([v for k,v in self.results.items()])/4.0,1)
|
55 |
+
#data_dict["# params"] = get_n_params(base_model)
|
56 |
|
57 |
for benchmark in BENCHMARKS:
|
58 |
if not benchmark in self.results.keys():
|
|
|
75 |
model = path_split[-4]
|
76 |
is_8bit = path_split[-2] == "8bit"
|
77 |
revision = path_split[-3]
|
78 |
+
if len(path_split)== 7:
|
79 |
# handles gpt2 type models that don't have an org
|
80 |
result_key = f"{path_split[-4]}_{path_split[-3]}_{path_split[-2]}"
|
81 |
else:
|
|
|
86 |
for benchmark, metric in zip(BENCHMARKS, METRICS):
|
87 |
if benchmark in json_filepath:
|
88 |
accs = np.array([v[metric] for k, v in data["results"].items()])
|
89 |
+
mean_acc = round(np.mean(accs)*100.0,1)
|
90 |
eval_result = EvalResult(result_key, org, model, revision, is_8bit, {benchmark:mean_acc})
|
91 |
|
92 |
return result_key, eval_result
|
|
|
94 |
|
95 |
|
96 |
|
97 |
+
def get_eval_results(is_public) -> List[EvalResult]:
|
98 |
+
json_filepaths = glob.glob("evals/eval_results/public/**/16bit/*.json", recursive=True)
|
99 |
+
if not is_public:
|
100 |
+
json_filepaths += glob.glob("evals/eval_results/private/**/*.json", recursive=True)
|
101 |
+
json_filepaths += glob.glob("evals/eval_results/private/**/*.json", recursive=True)
|
102 |
+
json_filepaths += glob.glob("evals/eval_results/public/**/8bit/*.json", recursive=True) # include the 8bit evals of public models
|
103 |
eval_results = {}
|
104 |
|
105 |
for json_filepath in json_filepaths:
|
|
|
114 |
|
115 |
return eval_results
|
116 |
|
117 |
+
def get_eval_results_dicts(is_public=True) -> List[Dict]:
|
118 |
+
eval_results = get_eval_results(is_public)
|
119 |
|
120 |
return [e.to_dict() for e in eval_results]
|
121 |
|