Spaces:
Runtime error
Runtime error
from dataclasses import dataclass | |
from enum import Enum | |
class Task: | |
benchmark: str | |
metric: str | |
col_name: str | |
# Init: to update with your specific keys | |
class Tasks(Enum): | |
# task_key in the json file, metric_key in the json file, name to display in the leaderboard | |
# task0 = Task("task_name1", "metric_name", "First task") | |
# task1 = Task("task_name2", "metric_name", "Second task") | |
Analytical_Talent_LSS = Task("Analytical Talent LSS", "Acc", "Analytical Talent LSS") | |
Calculus_USS = Task("Calculus USS", "Acc", "Calculus USS") | |
Chemistry_USS = Task("Chemistry USS", "Acc", "Chemistry USS") | |
Discrete_Mathematics_USS = Task("Discrete Mathematics USS", "Acc", "Discrete Mathematics USS") | |
Economy_USS = Task("Economy USS", "Acc", "Economy USS") | |
Geography_USS = Task("Geography USS", "Acc", "Geography USS") | |
Geology_USS = Task("Geology USS", "Acc", "Geology USS") | |
Geometry_USS = Task("Geometry USS", "Acc", "Geometry USS") | |
History_USS = Task("History USS", "Acc", "History USS") | |
Logic_USS = Task("Logic USS", "Acc", "Logic USS") | |
Mathematical_and_Logical_Intelligence_UPS = Task("Mathematical and Logical Intelligence UPS", "Acc", "Mathematical and Logical Intelligence UPS") | |
Mathematics_LPS = Task("Mathematics LPS", "Acc", "Mathematics LPS") | |
Mathematics_LSS = Task("Mathematics LSS", "Acc", "Mathematics LSS") | |
Mathematics_UPS = Task("Mathematics UPS", "Acc", "Mathematics UPS") | |
Mathematics_USS = Task("Mathematics USS", "Acc", "Mathematics USS") | |
Mathematics_and_Statistics_USS = Task("Mathematics and Statistics USS", "Acc", "Mathematics and Statistics USS") | |
Natural_Sciences_LPS = Task("Natural Sciences LPS", "Acc", "Natural Sciences LPS") | |
Natural_Sciences_LSS = Task("Natural Sciences LSS", "Acc", "Natural Sciences LSS") | |
Natural_Sciences_UPS = Task("Natural Sciences UPS", "Acc", "Natural Sciences UPS") | |
Persian_Literature_LPS = Task("Persian Literature LPS", "Acc", "Persian Literature LPS") | |
Persian_Literature_LSS = Task("Persian Literature LSS", "Acc", "Persian Literature LSS") | |
Persian_Literature_UPS = Task("Persian Literature UPS", "Acc", "Persian Literature UPS") | |
Persian_Literature_USS = Task("Persian Literature USS", "Acc", "Persian Literature USS") | |
Philosophy_USS = Task("Philosophy USS", "Acc", "Philosophy USS") | |
Physics_USS = Task("Physics USS", "Acc", "Physics USS") | |
Probability_and_Statistics_USS = Task("Probability and Statistics USS", "Acc", "Probability and Statistics USS") | |
Psychology_USS = Task("Psychology USS", "Acc", "Psychology USS") | |
Social_Studies_LPS = Task("Social Studies LPS", "Acc", "Social Studies LPS") | |
Social_Studies_LSS = Task("Social Studies LSS", "Acc", "Social Studies LSS") | |
Social_Studies_UPS = Task("Social Studies UPS", "Acc", "Social Studies UPS") | |
Sociology_USS = Task("Sociology USS", "Acc", "Sociology USS") | |
Speed_and_Accuracy_UPS = Task("Speed and Accuracy UPS", "Acc", "Speed and Accuracy UPS") | |
Theology_LPS = Task("Theology LPS", "Acc", "Theology LPS") | |
Theology_LSS = Task("Theology LSS", "Acc", "Theology LSS") | |
Theology_UPS = Task("Theology UPS", "Acc", "Theology UPS") | |
Theology_USS = Task("Theology USS", "Acc", "Theology USS") | |
Verbal_and_Linguistic_Intelligence_UPS = Task("Verbal and Linguistic Intelligence UPS", "Acc", "Verbal and Linguistic Intelligence UPS") | |
Biology_USS = Task("Biology USS", "Acc", "Biology USS") | |
# Avg_on_all_tasks = Task("Avg on all tasks", "Acc", "Avg on all tasks") | |
# Avg_on_all_questions = Task("Avg on all questions", "Acc", "Avg on all questions") | |
# Your leaderboard name | |
TITLE = """<h1 align="center" id="space-title">Khayyam Challenge (PersianMMLU)</h1>""" | |
# What does your leaderboard evaluate? | |
INTRODUCTION_TEXT = """""" | |
# Intro text | |
# """ | |
# Which evaluations are you running? how can people reproduce what you have? | |
LLM_BENCHMARKS_TEXT = f"""""" | |
# ## How it works | |
# ## Reproducibility | |
# To reproduce our results, here is the commands you can run: | |
# """ | |
EVALUATION_QUEUE_TEXT = """In progress""" | |
# ## Some good practices before submitting a model | |
# ### 1) Make sure you can load your model and tokenizer using AutoClasses: | |
# ```python | |
# from transformers import AutoConfig, AutoModel, AutoTokenizer | |
# config = AutoConfig.from_pretrained("your model name", revision=revision) | |
# model = AutoModel.from_pretrained("your model name", revision=revision) | |
# tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision) | |
# ``` | |
# If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded. | |
# Note: make sure your model is public! | |
# Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted! | |
# ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index) | |
# It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`! | |
# ### 3) Make sure your model has an open license! | |
# This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗 | |
# ### 4) Fill up your model card | |
# When we add extra information about models to the leaderboard, it will be automatically taken from the model card | |
# ## In case of model failure | |
# If your model is displayed in the `FAILED` category, its execution stopped. | |
# Make sure you have followed the above steps first. | |
# If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task). | |
# """ | |
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results" | |
CITATION_BUTTON_TEXT = r""" | |
@article{ghahroodi2024khayyam, | |
title={Khayyam Challenge (PersianMMLU): Is Your LLM Truly Wise to The Persian Language?}, | |
author={Ghahroodi, Omid and Nouri, Marzia and Sanian, Mohammad Vali and Sahebi, Alireza and Dastgheib, Doratossadat and Asgari, Ehsaneddin and Baghshah, Mahdieh Soleymani and Rohban, Mohammad Hossein}, | |
journal={arXiv preprint arXiv:2404.06644}, | |
year={2024} | |
} | |
""" | |