Commit
9f383fe
1 Parent(s): c1ddcde

update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -12
app.py CHANGED
@@ -14,16 +14,16 @@ from src.about import (
14
  )
15
  from src.display.css_html_js import custom_css
16
  from src.display.utils import (
17
- BENCHMARK_COLS,
18
  COLS,
 
19
  EVAL_COLS,
20
  EVAL_TYPES,
21
- COLUMNS, # Added this line
22
- AutoEvalColumn,
23
  ModelType,
24
  WeightType,
25
  Precision
26
  )
 
27
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
28
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
29
  from src.submission.submit import add_new_eval
@@ -58,21 +58,18 @@ LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS,
58
 
59
  def init_leaderboard(dataframe):
60
  if dataframe is None or dataframe.empty:
61
- # Instead of raising an error, display an empty leaderboard with a message
62
  print("Leaderboard DataFrame is empty. No models have been evaluated yet.")
63
- # Create an empty DataFrame with the necessary columns
64
- dataframe = pd.DataFrame(columns=[c.name for c in COLUMNS])
65
- # Optionally, you can add a message to the interface to inform users
66
  return Leaderboard(
67
  value=dataframe,
68
- datatype=[c.type for c in COLUMNS],
69
  select_columns=SelectColumns(
70
- default_selection=[c.name for c in COLUMNS if c.displayed_by_default],
71
- cant_deselect=[c.name for c in COLUMNS if c.never_hidden],
72
  label="Select Columns to Display:",
73
  ),
74
- search_columns=[c.name for c in COLUMNS if c.name in ["model", "license"]],
75
- hide_columns=[c.name for c in COLUMNS if c.hidden],
76
  filter_columns=[
77
  ColumnFilter("model_type", type="checkboxgroup", label="Model types"),
78
  ColumnFilter("precision", type="checkboxgroup", label="Precision"),
 
14
  )
15
  from src.display.css_html_js import custom_css
16
  from src.display.utils import (
17
+ COLUMNS,
18
  COLS,
19
+ BENCHMARK_COLS,
20
  EVAL_COLS,
21
  EVAL_TYPES,
 
 
22
  ModelType,
23
  WeightType,
24
  Precision
25
  )
26
+
27
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
28
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
29
  from src.submission.submit import add_new_eval
 
58
 
59
  def init_leaderboard(dataframe):
60
  if dataframe is None or dataframe.empty:
 
61
  print("Leaderboard DataFrame is empty. No models have been evaluated yet.")
62
+ dataframe = pd.DataFrame(columns=[col.name for col in COLUMNS])
 
 
63
  return Leaderboard(
64
  value=dataframe,
65
+ datatype=[col.type for col in COLUMNS],
66
  select_columns=SelectColumns(
67
+ default_selection=[col.name for col in COLUMNS if col.displayed_by_default],
68
+ cant_deselect=[col.name for col in COLUMNS if col.never_hidden],
69
  label="Select Columns to Display:",
70
  ),
71
+ search_columns=[col.name for col in COLUMNS if col.name in ["model", "license"]],
72
+ hide_columns=[col.name for col in COLUMNS if col.hidden],
73
  filter_columns=[
74
  ColumnFilter("model_type", type="checkboxgroup", label="Model types"),
75
  ColumnFilter("precision", type="checkboxgroup", label="Precision"),