Spaces:
Running
Running
File size: 3,052 Bytes
d39d937 5108c5d 2e771a4 5108c5d 2e771a4 5108c5d d39d937 2e771a4 d39d937 57b4540 1f5af07 d39d937 57b4540 d39d937 2e771a4 d39d937 57b4540 d39d937 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
from dataclasses import dataclass
from enum import Enum
@dataclass
class Task:
benchmark: str
metric: str
col_name: str
# Select your tasks here
# ---------------------------------------------------
class Tasks(Enum):
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
Persian_MMLU = Task("Persian MMLU", "Exact Match", "Persian MMLU")
Persian_Math = Task("Persian Math", "Math Equivalence", "Persian Math")
ParsiNLUـEntailment = Task("ParsiNLU Entailment", "Exact Match", "ParsiNLU Entailment")
FarsTail_Entailment = Task("FarsTail Entailment", "Exact Match", "FarsTail Entailment")
ParsiNLU_Machine_Translation_Fa_En = Task("ParsiNLU Machine Translation Fa En", "English Sentence Bleu", "ParsiNLU Machine Translation Fa-En")
ParsiNLU_Machine_Translation_En_Fa = Task("ParsiNLU Machine Translation En Fa", "Persian Sentence Bleu", "ParsiNLU Machine Translation En-Fa")
ParsiNLU_Reading_Comprehension = Task("ParsiNLU Reading Comprehension", "Common Tokens", "ParsiNLU Reading Comprehension")
Persian_News_Summary = Task("Persian News Summary", "Persian Rouge", "Persian News Summary")
# Your leaderboard name
TITLE = """<h1 align="center" id="space-title">ParsBench Leaderboard</h1>"""
# What does your leaderboard evaluate?
INTRODUCTION_TEXT = """
<p>This leaderboard is created using <a href="https://github.com/shahriarshm/parsbench">ParsBench</a> framework benchmarking toolkit.</p>
<p>In this leaderboard we evaluate the open-weight LLMs based on the Persian (Farsi) language tasks.</p>
<p><strong>Note:</strong> We've only added the GPT and Claude 3.5 Sonnet models evaluation for a better comparison between the open LLMs and SoTA models.</p>
"""
# Which evaluations are you running? how can people reproduce what you have?
LLM_BENCHMARKS_TEXT = f"""
We evaluated the models on these datasets:
- [Persian MMLU (Khayyam Challenge)](https://huggingface.co/datasets/raia-center/khayyam-challenge)
- [Persian Math](https://github.com/Ipouyall/Benchmarking_ChatGPT_for_Persian)
- [ParsiNLU Entailment](https://huggingface.co/datasets/persiannlp/parsinlu_entailment)
- [FarsTail Entailment](https://github.com/dml-qom/FarsTail)
- [ParsiNLU Machine Translation Fa-En](https://huggingface.co/datasets/persiannlp/parsinlu_translation_fa_en)
- [ParsiNLU Machine Translation En-Fa](https://huggingface.co/datasets/persiannlp/parsinlu_translation_en_fa)
- [ParsiNLU Reading Comprehension](https://huggingface.co/datasets/persiannlp/parsinlu_reading_comprehension)
- [Persian News Summary](https://huggingface.co/datasets/HooshvareLab/pn_summary)
"""
EVALUATION_QUEUE_TEXT = """
For now, you can contact me at shahriarshm81@gmail.com for submitting a new request.
"""
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""
@misc{ParsBench,
title = {ParsBench Leaderboard},
author = {Shahriar Shariati},
howpublished = {\url{https://huggingface.co/spaces/ParsBench/leaderboard}},
year = {2024}
}
"""
|