|
import gradio as gr |
|
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns |
|
import pandas as pd |
|
from apscheduler.schedulers.background import BackgroundScheduler |
|
from huggingface_hub import snapshot_download |
|
|
|
from src.about import ( |
|
CITATION_BUTTON_LABEL, |
|
CITATION_BUTTON_TEXT, |
|
EVALUATION_QUEUE_TEXT, |
|
INTRODUCTION_TEXT, |
|
LLM_BENCHMARKS_TEXT, |
|
TITLE, |
|
) |
|
from src.display.css_html_js import custom_css |
|
from src.display.utils import ( |
|
BENCHMARK_COLS, |
|
COLS, |
|
EVAL_COLS, |
|
EVAL_TYPES, |
|
AutoEvalColumn, |
|
ModelType, |
|
fields, |
|
WeightType, |
|
Precision |
|
) |
|
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN |
|
from src.populate import get_evaluation_queue_df, get_leaderboard_df |
|
from src.submission.submit import add_new_eval |
|
|
|
|
|
def restart_space(): |
|
API.restart_space(repo_id=REPO_ID) |
|
|
|
|
|
try: |
|
print(EVAL_REQUESTS_PATH) |
|
snapshot_download( |
|
repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN |
|
) |
|
except Exception: |
|
restart_space() |
|
try: |
|
print(EVAL_RESULTS_PATH) |
|
snapshot_download( |
|
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN |
|
) |
|
except Exception: |
|
restart_space() |
|
|
|
|
|
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS) |
|
print(f"The leaderboard is {LEADERBOARD_DF}") |
|
( |
|
finished_eval_queue_df, |
|
running_eval_queue_df, |
|
pending_eval_queue_df, |
|
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS) |
|
|
|
def init_leaderboard(dataframe): |
|
if dataframe is None or dataframe.empty: |
|
raise ValueError("Leaderboard DataFrame is empty or None.") |
|
return Leaderboard( |
|
value=dataframe, |
|
datatype=[c.type for c in fields(AutoEvalColumn)], |
|
select_columns=SelectColumns( |
|
default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and c.name not in ['params', 'available_on_hub', 'hub', 'Model sha','Hub License']], |
|
cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden], |
|
label="Select Columns to Display:", |
|
), |
|
search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name], |
|
hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden], |
|
filter_columns=[ |
|
ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"), |
|
ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"), |
|
ColumnFilter( |
|
AutoEvalColumn.params.name, |
|
type="slider", |
|
min=0.01, |
|
max=500, |
|
label="Select the number of parameters (B)", |
|
), |
|
ColumnFilter( |
|
AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=False |
|
), |
|
], |
|
bool_checkboxgroup_label="Hide models", |
|
interactive=False, |
|
) |
|
|
|
|
|
demo = gr.Blocks(css=custom_css) |
|
with demo: |
|
gr.HTML(TITLE) |
|
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text") |
|
|
|
with gr.Tabs(elem_classes="tab-buttons") as tabs: |
|
with gr.TabItem("π
LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0): |
|
leaderboard = init_leaderboard(LEADERBOARD_DF) |
|
print(f"FINAL LEADERBOARD 1 {LEADERBOARD_DF}") |
|
|
|
with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=2): |
|
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text") |
|
|
|
with gr.TabItem("π Submit here! ", elem_id="llm-benchmark-tab-table", id=3): |
|
with gr.Column(): |
|
with gr.Row(): |
|
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text") |
|
|
|
with gr.Column(): |
|
with gr.Accordion( |
|
f"β
Finished Evaluations ({len(finished_eval_queue_df)})", |
|
open=False, |
|
): |
|
with gr.Row(): |
|
finished_eval_table = gr.components.Dataframe( |
|
value=finished_eval_queue_df, |
|
headers=EVAL_COLS, |
|
datatype=EVAL_TYPES, |
|
row_count=5, |
|
) |
|
with gr.Accordion( |
|
f"π Running Evaluation Queue ({len(running_eval_queue_df)})", |
|
open=False, |
|
): |
|
with gr.Row(): |
|
running_eval_table = gr.components.Dataframe( |
|
value=running_eval_queue_df, |
|
headers=EVAL_COLS, |
|
datatype=EVAL_TYPES, |
|
row_count=5, |
|
) |
|
|
|
with gr.Accordion( |
|
f"β³ Pending Evaluation Queue ({len(pending_eval_queue_df)})", |
|
open=False, |
|
): |
|
with gr.Row(): |
|
pending_eval_table = gr.components.Dataframe( |
|
value=pending_eval_queue_df, |
|
headers=EVAL_COLS, |
|
datatype=EVAL_TYPES, |
|
row_count=5, |
|
) |
|
with gr.Row(): |
|
gr.Markdown("# βοΈβ¨ Submit your model outputs !", elem_classes="markdown-text") |
|
gr.Markdown("Send your model outputs for all the models using the ContextualBench code and email them to us at xnguyen@salesforce.com ", elem_classes="markdown-text") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Row(): |
|
with gr.Accordion("π Citation", open=False): |
|
citation_button = gr.Textbox( |
|
value=CITATION_BUTTON_TEXT, |
|
label=CITATION_BUTTON_LABEL, |
|
lines=20, |
|
elem_id="citation-button", |
|
show_copy_button=True, |
|
) |
|
|
|
scheduler = BackgroundScheduler() |
|
scheduler.add_job(restart_space, "interval", seconds=1800) |
|
scheduler.start() |
|
demo.queue(default_concurrency_limit=40).launch() |