Omartificial-Intelligence-Space
commited on
Commit
•
9f383fe
1
Parent(s):
c1ddcde
update app.py
Browse files
app.py
CHANGED
@@ -14,16 +14,16 @@ from src.about import (
|
|
14 |
)
|
15 |
from src.display.css_html_js import custom_css
|
16 |
from src.display.utils import (
|
17 |
-
|
18 |
COLS,
|
|
|
19 |
EVAL_COLS,
|
20 |
EVAL_TYPES,
|
21 |
-
COLUMNS, # Added this line
|
22 |
-
AutoEvalColumn,
|
23 |
ModelType,
|
24 |
WeightType,
|
25 |
Precision
|
26 |
)
|
|
|
27 |
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
|
28 |
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
29 |
from src.submission.submit import add_new_eval
|
@@ -58,21 +58,18 @@ LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS,
|
|
58 |
|
59 |
def init_leaderboard(dataframe):
|
60 |
if dataframe is None or dataframe.empty:
|
61 |
-
# Instead of raising an error, display an empty leaderboard with a message
|
62 |
print("Leaderboard DataFrame is empty. No models have been evaluated yet.")
|
63 |
-
|
64 |
-
dataframe = pd.DataFrame(columns=[c.name for c in COLUMNS])
|
65 |
-
# Optionally, you can add a message to the interface to inform users
|
66 |
return Leaderboard(
|
67 |
value=dataframe,
|
68 |
-
datatype=[
|
69 |
select_columns=SelectColumns(
|
70 |
-
default_selection=[
|
71 |
-
cant_deselect=[
|
72 |
label="Select Columns to Display:",
|
73 |
),
|
74 |
-
search_columns=[
|
75 |
-
hide_columns=[
|
76 |
filter_columns=[
|
77 |
ColumnFilter("model_type", type="checkboxgroup", label="Model types"),
|
78 |
ColumnFilter("precision", type="checkboxgroup", label="Precision"),
|
|
|
14 |
)
|
15 |
from src.display.css_html_js import custom_css
|
16 |
from src.display.utils import (
|
17 |
+
COLUMNS,
|
18 |
COLS,
|
19 |
+
BENCHMARK_COLS,
|
20 |
EVAL_COLS,
|
21 |
EVAL_TYPES,
|
|
|
|
|
22 |
ModelType,
|
23 |
WeightType,
|
24 |
Precision
|
25 |
)
|
26 |
+
|
27 |
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
|
28 |
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
29 |
from src.submission.submit import add_new_eval
|
|
|
58 |
|
59 |
def init_leaderboard(dataframe):
|
60 |
if dataframe is None or dataframe.empty:
|
|
|
61 |
print("Leaderboard DataFrame is empty. No models have been evaluated yet.")
|
62 |
+
dataframe = pd.DataFrame(columns=[col.name for col in COLUMNS])
|
|
|
|
|
63 |
return Leaderboard(
|
64 |
value=dataframe,
|
65 |
+
datatype=[col.type for col in COLUMNS],
|
66 |
select_columns=SelectColumns(
|
67 |
+
default_selection=[col.name for col in COLUMNS if col.displayed_by_default],
|
68 |
+
cant_deselect=[col.name for col in COLUMNS if col.never_hidden],
|
69 |
label="Select Columns to Display:",
|
70 |
),
|
71 |
+
search_columns=[col.name for col in COLUMNS if col.name in ["model", "license"]],
|
72 |
+
hide_columns=[col.name for col in COLUMNS if col.hidden],
|
73 |
filter_columns=[
|
74 |
ColumnFilter("model_type", type="checkboxgroup", label="Model types"),
|
75 |
ColumnFilter("precision", type="checkboxgroup", label="Precision"),
|