File size: 994 Bytes
efeee6d
314f91a
95f85ed
efeee6d
 
 
 
 
 
314f91a
b899767
 
efeee6d
943f952
783e5a7
 
 
1ffc326
 
b899767
 
efeee6d
 
 
783e5a7
58733e4
efeee6d
8c49cb6
783e5a7
0227006
 
efeee6d
0227006
d313dbd
 
8c49cb6
58733e4
2a73469
783e5a7
217b585
9833cdb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
from dataclasses import dataclass
from enum import Enum

@dataclass
class Task:
    benchmark: str
    metric: str
    col_name: str


# Select your tasks here
# ---------------------------------------------------
class Tasks(Enum):
    # task_key in the json file, metric_key in the json file, name to display in the leaderboard 
    task0 = Task("0-shot", "acc", "0-shot")
    task1 = Task("1-shot", "acc", "1-shot")
    task5 = Task("5-shot", "acc", "5-shot")

NUM_FEWSHOT = 0 # Change with your few shot
# ---------------------------------------------------



# Your leaderboard name
TITLE = """<h1 align="center" id="space-title">PsychoLex leaderboard</h1>"""

# What does your leaderboard evaluate?
INTRODUCTION_TEXT = """
Accuracy of LLMs on the PsychoLexEval dataset in Persian.
"""

# Which evaluations are you running? how can people reproduce what you have?
LLM_BENCHMARKS_TEXT = f"""
"""

EVALUATION_QUEUE_TEXT = """
"""

CITATION_BUTTON_LABEL = ""
CITATION_BUTTON_TEXT = r"""
"""