Spaces:
Restarting
on
CPU Upgrade
Restarting
on
CPU Upgrade
edbeeching
commited on
Commit
•
f90ad24
1
Parent(s):
a7919f0
refactoring leaderboard
Browse files
app.py
CHANGED
@@ -8,6 +8,7 @@ import json
|
|
8 |
from apscheduler.schedulers.background import BackgroundScheduler
|
9 |
import pandas as pd
|
10 |
import datetime
|
|
|
11 |
|
12 |
# clone / pull the lmeh eval data
|
13 |
H4_TOKEN = os.environ.get("H4_TOKEN", None)
|
@@ -29,21 +30,9 @@ if H4_TOKEN:
|
|
29 |
|
30 |
# parse the results
|
31 |
BENCHMARKS = ["arc_challenge", "hellaswag", "hendrycks", "truthfulqa_mc"]
|
32 |
-
BENCH_TO_NAME = {
|
33 |
-
"arc_challenge":"ARC (25-shot) ⬆️",
|
34 |
-
"hellaswag":"HellaSwag (10-shot) ⬆️",
|
35 |
-
"hendrycks":"MMLU (5-shot) ⬆️",
|
36 |
-
"truthfulqa_mc":"TruthQA (0-shot) ⬆️",
|
37 |
-
}
|
38 |
-
METRICS = ["acc_norm", "acc_norm", "acc_norm", "mc2"]
|
39 |
-
|
40 |
|
41 |
-
|
42 |
-
# remove user from model name
|
43 |
-
#model_name_show = ' '.join(model_name.split('/')[1:])
|
44 |
|
45 |
-
link = "https://huggingface.co/" + model_name
|
46 |
-
return f'<a target="_blank" href="{link}" style="color: blue; text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
|
47 |
|
48 |
def load_results(model, benchmark, metric):
|
49 |
file_path = os.path.join("evals", model, f"{model}-eval_{benchmark}.json")
|
@@ -82,28 +71,29 @@ def get_leaderboard():
|
|
82 |
if repo:
|
83 |
print("pulling changes")
|
84 |
repo.git_pull()
|
85 |
-
entries = [entry for entry in os.listdir("evals") if not (entry.startswith('.') or entry=="eval_requests" or entry=="evals")]
|
86 |
-
model_directories = [entry for entry in entries if os.path.isdir(os.path.join("evals", entry))]
|
87 |
-
all_data = []
|
88 |
-
for model in model_directories:
|
89 |
-
|
90 |
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
|
97 |
-
|
98 |
|
99 |
-
|
100 |
-
|
101 |
|
102 |
-
|
103 |
|
104 |
-
|
105 |
-
|
106 |
|
|
|
107 |
dataframe = pd.DataFrame.from_records(all_data)
|
108 |
dataframe = dataframe.sort_values(by=['total ⬆️'], ascending=False)
|
109 |
|
|
|
8 |
from apscheduler.schedulers.background import BackgroundScheduler
|
9 |
import pandas as pd
|
10 |
import datetime
|
11 |
+
from utils import get_eval_results_dicts, make_clickable_model
|
12 |
|
13 |
# clone / pull the lmeh eval data
|
14 |
H4_TOKEN = os.environ.get("H4_TOKEN", None)
|
|
|
30 |
|
31 |
# parse the results
|
32 |
BENCHMARKS = ["arc_challenge", "hellaswag", "hendrycks", "truthfulqa_mc"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
+
METRICS = ["acc_norm", "acc_norm", "acc_norm", "mc2"]
|
|
|
|
|
35 |
|
|
|
|
|
36 |
|
37 |
def load_results(model, benchmark, metric):
|
38 |
file_path = os.path.join("evals", model, f"{model}-eval_{benchmark}.json")
|
|
|
71 |
if repo:
|
72 |
print("pulling changes")
|
73 |
repo.git_pull()
|
74 |
+
# entries = [entry for entry in os.listdir("evals") if not (entry.startswith('.') or entry=="eval_requests" or entry=="evals")]
|
75 |
+
# model_directories = [entry for entry in entries if os.path.isdir(os.path.join("evals", entry))]
|
76 |
+
# all_data = []
|
77 |
+
# for model in model_directories:
|
78 |
+
# model_data = {"base_model": None, "eval_name": model}
|
79 |
|
80 |
+
# for benchmark, metric in zip(BENCHMARKS, METRICS):
|
81 |
+
# value, base_model = load_results(model, benchmark, metric)
|
82 |
+
# model_data[BENCH_TO_NAME[benchmark]] = round(value,3)
|
83 |
+
# if base_model is not None: # in case the last benchmark failed
|
84 |
+
# model_data["base_model"] = base_model
|
85 |
|
86 |
+
# model_data["total ⬆️"] = round(sum(model_data[benchmark] for benchmark in BENCH_TO_NAME.values()),3)
|
87 |
|
88 |
+
# if model_data["base_model"] is not None:
|
89 |
+
# model_data["base_model"] = make_clickable_model(model_data["base_model"])
|
90 |
|
91 |
+
# model_data["# params"] = get_n_params(model_data["base_model"])
|
92 |
|
93 |
+
# if model_data["base_model"] is not None:
|
94 |
+
# all_data.append(model_data)
|
95 |
|
96 |
+
all_data = get_eval_results_dicts()
|
97 |
dataframe = pd.DataFrame.from_records(all_data)
|
98 |
dataframe = dataframe.sort_values(by=['total ⬆️'], ascending=False)
|
99 |
|
utils.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
import numpy as np
|
4 |
+
import gradio as gr
|
5 |
+
from huggingface_hub import Repository, HfApi
|
6 |
+
from transformers import AutoConfig
|
7 |
+
import json
|
8 |
+
from apscheduler.schedulers.background import BackgroundScheduler
|
9 |
+
import pandas as pd
|
10 |
+
import datetime
|
11 |
+
import glob
|
12 |
+
from dataclasses import dataclass
|
13 |
+
from typing import List, Tuple, Dict
|
14 |
+
# clone / pull the lmeh eval data
|
15 |
+
H4_TOKEN = os.environ.get("H4_TOKEN", None)
|
16 |
+
LMEH_REPO = "HuggingFaceH4/lmeh_evaluations"
|
17 |
+
|
18 |
+
# repo=None
|
19 |
+
# if H4_TOKEN:
|
20 |
+
# print("pulling repo")
|
21 |
+
# # try:
|
22 |
+
# # shutil.rmtree("./evals/")
|
23 |
+
# # except:
|
24 |
+
# # pass
|
25 |
+
|
26 |
+
# repo = Repository(
|
27 |
+
# local_dir="./evals/", clone_from=LMEH_REPO, use_auth_token=H4_TOKEN, repo_type="dataset"
|
28 |
+
# )
|
29 |
+
# repo.git_pull()
|
30 |
+
METRICS = ["acc_norm", "acc_norm", "acc_norm", "mc2"]
|
31 |
+
BENCHMARKS = ["arc_challenge", "hellaswag", "hendrycks", "truthfulqa_mc"]
|
32 |
+
BENCH_TO_NAME = {
|
33 |
+
"arc_challenge":"ARC (25-shot) ⬆️",
|
34 |
+
"hellaswag":"HellaSwag (10-shot) ⬆️",
|
35 |
+
"hendrycks":"MMLU (5-shot) ⬆️",
|
36 |
+
"truthfulqa_mc":"TruthQA (0-shot) ⬆️",
|
37 |
+
}
|
38 |
+
def make_clickable_model(model_name):
|
39 |
+
# remove user from model name
|
40 |
+
#model_name_show = ' '.join(model_name.split('/')[1:])
|
41 |
+
|
42 |
+
link = "https://huggingface.co/" + model_name
|
43 |
+
return f'<a target="_blank" href="{link}" style="color: blue; text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
|
44 |
+
|
45 |
+
@dataclass
|
46 |
+
class EvalResult:
|
47 |
+
org : str
|
48 |
+
model : str
|
49 |
+
is_8bit : bool
|
50 |
+
results : dict
|
51 |
+
|
52 |
+
def to_dict(self):
|
53 |
+
data_dict = {}
|
54 |
+
data_dict["base_model"] = make_clickable_model(f"{self.org}/{self.model}")
|
55 |
+
data_dict["total ⬆️"] = sum([v for k,v in self.results.items()])
|
56 |
+
data_dict["# params"] = "unknown (todo)"
|
57 |
+
|
58 |
+
for benchmark in BENCHMARKS:
|
59 |
+
if not benchmark in self.results.keys():
|
60 |
+
self.results[benchmark] = None
|
61 |
+
|
62 |
+
for k,v in BENCH_TO_NAME.items():
|
63 |
+
data_dict[v] = self.results[k]
|
64 |
+
|
65 |
+
return data_dict
|
66 |
+
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
def parse_eval_result(json_filepath: str) -> Tuple[str, dict]:
|
71 |
+
with open(json_filepath) as fp:
|
72 |
+
data = json.load(fp)
|
73 |
+
|
74 |
+
path_split = json_filepath.split("/")
|
75 |
+
org = None
|
76 |
+
model = path_split[-3]
|
77 |
+
is_8bit = path_split[-2] == "8bit"
|
78 |
+
if len(path_split)== 5:
|
79 |
+
# handles gpt2 type models that don't have an org
|
80 |
+
result_key = f"{path_split[-3]}_{path_split[-2]}"
|
81 |
+
else:
|
82 |
+
result_key = f"{path_split[-4]}_{path_split[-3]}_{path_split[-2]}"
|
83 |
+
org = path_split[-4]
|
84 |
+
|
85 |
+
eval_result = None
|
86 |
+
for benchmark, metric in zip(BENCHMARKS, METRICS):
|
87 |
+
if benchmark in json_filepath:
|
88 |
+
accs = np.array([v[metric] for k, v in data["results"].items()])
|
89 |
+
mean_acc = np.mean(accs)
|
90 |
+
eval_result = EvalResult(org, model, is_8bit, {benchmark:mean_acc})
|
91 |
+
|
92 |
+
return result_key, eval_result
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
|
97 |
+
def get_eval_results() -> List[EvalResult]:
|
98 |
+
json_filepaths = glob.glob("evals/eval_results/**/*.json", recursive=True)
|
99 |
+
eval_results = {}
|
100 |
+
|
101 |
+
for json_filepath in json_filepaths:
|
102 |
+
result_key, eval_result = parse_eval_result(json_filepath)
|
103 |
+
if result_key in eval_results.keys():
|
104 |
+
eval_results[result_key].results.update(eval_result.results)
|
105 |
+
else:
|
106 |
+
eval_results[result_key] = eval_result
|
107 |
+
|
108 |
+
|
109 |
+
eval_results = [v for k,v in eval_results.items()]
|
110 |
+
|
111 |
+
return eval_results
|
112 |
+
|
113 |
+
def get_eval_results_dicts() -> List[Dict]:
|
114 |
+
eval_results = get_eval_results()
|
115 |
+
|
116 |
+
return [e.to_dict() for e in eval_results]
|
117 |
+
|
118 |
+
eval_results_dict = get_eval_results_dicts()
|
119 |
+
print(eval_results_dict)
|