File size: 15,261 Bytes
7f6ca6e
 
 
 
a52e4dc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
342f2c9
a52e4dc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6280ba1
a52e4dc
 
 
 
 
 
 
 
 
 
 
 
6280ba1
a52e4dc
 
 
 
 
 
 
 
 
 
 
 
 
6280ba1
a52e4dc
 
 
 
 
 
 
6280ba1
a52e4dc
 
 
 
 
 
 
 
6280ba1
a52e4dc
 
 
 
6280ba1
a52e4dc
 
 
 
 
 
 
 
 
 
 
 
6280ba1
a52e4dc
 
 
 
 
 
 
 
 
7f6ca6e
eaa1d85
 
7f6ca6e
 
a52e4dc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6280ba1
a52e4dc
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
import streamlit as st
import pandas as pd

# CSS样式
# st.markdown("""
# <style>
# h1 {
#     font-size: 2.5em;  /* 标题字体大小 */
# }
# .stDataFrame {
#     font-family: Helvetica;
# }
# .dataframe th, .dataframe td {
#     width: auto;
#     min-width: 500px; 
# }
# </style>
# """, unsafe_allow_html=True)

# # 标题
# st.title('🏆AEOLLM Leaderboard')

# # 描述
# st.markdown("""
# This leaderboard is used to show the performance of the **automatic evaluation methods of LLMs** submitted by the **AEOLLM team** on four tasks:
# - Dialogue Generation (DG)
# - Text Expansion (TE)
# - Summary Generation (SG)
# - Non-Factoid QA (NFQA)
            
# Details of AEOLLLM can be found at the link: [https://aeollm.github.io/](https://aeollm.github.io/)
# """, unsafe_allow_html=True)
# # 创建示例数据

# # teamId 唯一标识码
# DG = {
#     "teamId": ["baseline1", "baseline2", "baseline3", "baseline4"],
#     "methods": ["chatglm3-6b", "baichuan2-13b", "chatglm-pro", "gpt-4o-mini"],
#     "accuracy": [0.5806, 0.5483, 0.6001, 0.6472],
#     "kendall's tau": [0.3243, 0.1739, 0.3042, 0.4167],
#     "spearman": [0.3505, 0.1857, 0.3264, 0.4512]
# }

# df1 = pd.DataFrame(DG)
# for col in df1.select_dtypes(include=['float64', 'int64']).columns:
#     df1[col] = df1[col].apply(lambda x: f"{x:.4f}")

# TE = {
#     "teamId": ["baseline1", "baseline2", "baseline3", "baseline4"],
#     "methods": ["chatglm3-6b", "baichuan2-13b", "chatglm-pro", "gpt-4o-mini"],
#     "accuracy": [0.5107, 0.5050, 0.5461, 0.5581],
#     "kendall's tau": [0.1281, 0.0635, 0.2716, 0.3864],
#     "spearman": [0.1352, 0.0667, 0.2867, 0.4157]
# }
# df2 = pd.DataFrame(TE)
# for col in df2.select_dtypes(include=['float64', 'int64']).columns:
#     df2[col] = df2[col].apply(lambda x: f"{x:.4f}")

# SG = {
#     "teamId": ["baseline1", "baseline2", "baseline3", "baseline4"],
#     "methods": ["chatglm3-6b", "baichuan2-13b", "chatglm-pro", "gpt-4o-mini"],
#     "accuracy": [0.6504, 0.6014, 0.7162, 0.7441],
#     "kendall's tau": [0.3957, 0.2688, 0.5092, 0.5001],
#     "spearman": [0.4188, 0.2817, 0.5403, 0.5405],
# }
# df3 = pd.DataFrame(SG)
# for col in df3.select_dtypes(include=['float64', 'int64']).columns:
#     df3[col] = df3[col].apply(lambda x: f"{x:.4f}")

# NFQA = {
#     "teamId": ["baseline1", "baseline2", "baseline3", "baseline4"],
#     "methods": ["chatglm3-6b", "baichuan2-13b", "chatglm-pro", "gpt-4o-mini"],
#     "accuracy": [0.5935, 0.5817, 0.7000, 0.7203],
#     "kendall's tau": [0.2332, 0.2389, 0.4440, 0.4235],
#     "spearman": [0.2443, 0.2492, 0.4630, 0.4511]
# }
# df4 = pd.DataFrame(NFQA)
# for col in df4.select_dtypes(include=['float64', 'int64']).columns:
#     df4[col] = df4[col].apply(lambda x: f"{x:.4f}")

# # 创建标签页
# tab1, tab2, tab3, tab4 = st.tabs(["DG", "TE", "SG", "NFQA"])

# with tab1:
#     st.markdown("""Task: Dialogue Generation; Dataset: DialyDialog""", unsafe_allow_html=True)
#     st.dataframe(df1, use_container_width=True)

# with tab2:
#     st.markdown("""Task: Text Expansion; Dataset: WritingPrompts""", unsafe_allow_html=True)
#     st.dataframe(df2, use_container_width=True)

# with tab3:
#     st.markdown("""Task: Summary Generation; Dataset: Xsum""", unsafe_allow_html=True)
#     st.dataframe(df3, use_container_width=True)

# with tab4:
#     st.markdown("""Task: Non-Factoid QA; Dataset: NF_CATS""", unsafe_allow_html=True)
#     st.dataframe(df4, use_container_width=True)

# 设置页面标题和大标题
st.set_page_config(page_title="AEOLLM", page_icon="👋")
st.title("NTCIR-18 Automatic Evaluation of LLMs (AEOLLM) Task")

# 在侧边栏创建导航菜单
st.sidebar.title("Navigation")
page = st.sidebar.radio("Go to", ["Introduction", "Methodology", "Datasets", "Important Dates", "Evaluation Measures", "Data and File format", "Submit", "LeaderBoard", "Organisers", "References"])

# 根据选择的页面展示不同的内容
if page == "Introduction":
    st.header("Introduction")
    st.markdown("""
The Automatic Evaluation of LLMs (AEOLLM) task is a new core task in [NTCIR-18](http://research.nii.ac.jp/ntcir/ntcir-18) to support in-depth research on large language models (LLMs) evaluation. As LLMs grow popular in both fields of academia and industry, how to effectively evaluate the capacity of LLMs becomes an increasingly critical but still challenging issue. Existing methods can be divided into two types: manual evaluation, which is expensive, and automatic evaluation, which faces many limitations including the task format (the majority belong to multiple-choice questions) and evaluation criteria (occupied by reference-based metrics). To advance the innovation of automatic evaluation, we proposed the Automatic Evaluation of LLMs (AEOLLM) task which focuses on generative tasks and encourages reference-free methods. Besides, we set up diverse subtasks such as summary generation, non-factoid question answering, text expansion, and dialogue generation to comprehensively test different methods. We believe that the AEOLLM task will facilitate the development of the LLMs community.
    """)

elif page == "Methodology":
    st.header("Methodology")
    st.image("asserts/method.svg", use_column_width=True)
    st.markdown("""
<ol>
  <li>First, we choose four subtasks as shown in the table below:</li>
  <table>
    <thead>
      <tr>
        <th style="text-align: left">Task</th>
        <th style="text-align: left">Description</th>
        <th style="text-align: left">Dataset</th>
      </tr>
    </thead>
    <tbody>
      <tr>
        <td style="text-align: left">Summary Generation (SG)</td>
        <td style="text-align: left">write a summary for the specified text</td>
        <td style="text-align: left">XSum: over 226k news articles</td>
      </tr>
      <tr>
        <td style="text-align: left">Non-Factoid QA (NFQA)</td>
        <td style="text-align: left">construct long-form answers to open-ended non-factoid questions</td>
        <td style="text-align: left">NF_CATS: 12k non-factoid questions</td>
      </tr>
      <tr>
        <td style="text-align: left">Text Expansion (TE)</td>
        <td style="text-align: left">given a theme, participants need to generate stories related to the theme</td>
        <td style="text-align: left">WritingPrompts: 303k story themes2</td>
      </tr>
      <tr>
        <td style="text-align: left">Dialogue Generation (DG)</td>
        <td style="text-align: left">generate human-like responses to numerous topics in daily conversation contexts</td>
        <td style="text-align: left">DailyDialog: 13k daily conversation contexts</td>
      </tr>
    </tbody>
  </table>
  <li>Second, we choose a series of popular LLMs during the competition to generate answers.</li>
  <li>Third, we manually annotate the answer sets for each question, which will be used as gold standards for evaluating the performance of different evaluation methods.</li>
  <li>Last, we will collect evaluation results from participants and calculate consistency with manually annotated results. We will use Accuracy, Kendall’s tau and Spearman correlation coefficient as the evaluation metrics.</li>
</ol>
    """,unsafe_allow_html=True)

elif page == "Datasets":
    st.header("Datasets")
    st.markdown("""
<p>A brief description of the specific dataset we used, along with the original download link, is provided below:</p>
<ul>
  <li><strong>Summary Generation (SG): <a href="https://huggingface.co/datasets/EdinburghNLP/xsum">Xsum</a></strong>: A real-world single document news summary dataset collected from online articles by the British Broadcasting Corporation (BBC) and contains over 220 thousand news documents.</li>
  <li><strong>Non-Factoid QA (NFQA): <a href="https://github.com/Lurunchik/NF-CATS">NF_CATS</a></strong>: A dataset contains examples of 12k natural questions divided into eight categories.</li>
  <li><strong>Text Expansion (TE): <a href="https://huggingface.co/datasets/euclaise/writingprompts">WritingPrompts</a></strong>: A large dataset of 300K human-written stories paired with writing prompts from an online forum.</li>
  <li><strong>Dialogue Generation (DG): <a href="https://huggingface.co/datasets/daily_dialog">DailyDialog</a></strong>: A high-quality dataset of 13k multi-turn dialogues. The language is human-written and less noisy.</li>
</ul>
<p>For your convenience, we have released <strong>the training set</strong> (with human-annotated results) and <strong>the test set</strong> (without human-annotated results) on <a href="https://huggingface.co/datasets/THUIR/AEOLLM">https://huggingface.co/datasets/THUIR/AEOLLM</a>, which you can easily download.</p>
    """,unsafe_allow_html=True)

elif page == "Important Dates":
    st.header("Important Dates")
    st.markdown("""
<p><em>All deadlines are at 11:59pm in the Anywhere on Earth (AOE) timezone.</em><br />
<span class="event"><strong>Kickoff Event</strong>:</span> <span class="date">March 29, 2024</span><br />
<span class="event"><strong>Dataset Release</strong>:</span> <span class="date">👉May 1, 2024</span><br />
<span class="event"><strong>System Output Submission Deadline</strong>:</span> <span class="date">Jan 15, 2025</span><br />
<span class="event"><strong>Evaluation Results Release</strong>:</span> <span class="date">Feb 1, 2025</span>  <br />
<span class="event"><strong>Task overview release (draft)</strong>:</span> <span class="date">Feb 1, 2025</span><br />
<span class="event"><strong>Submission Due of Participant Papers (draft)</strong>:</span> <span class="date">March 1, 2025</span><br />
<span class="event"><strong>Camera-Ready Participant Paper Due</strong>:</span> <span class="date">May 1, 2025</span><br />
<span class="event"><strong>NTCIR-18 Conference</strong>:</span> <span class="date">Jun 10-13 2025</span><br /></p>
    """,unsafe_allow_html=True)
elif page == "Evaluation Measures":
    st.header("Evaluation Measures")
    st.markdown("""
- **Acc(Accuracy):** The proportion of identical preference results between the model and human annotations. Specifically, we first convert individual scores (ranks) into pairwise preferences and then calculate consistency with human annotations.
- **Kendall's tau:** Measures the ordinal association between two ranked variables.
  
  $$
  \\tau=\\frac{C-D}{\\frac{1}{2}n(n-1)}
  $$

  where:
  - C is the number of concordant pairs,
  - D is the number of discordant pairs,
  - n is the number of pairs.
- **Spearman's Rank Correlation Coefficient:** Measures the strength and direction of the association between two ranked variables. 
    $$
        \\rho = 1 - \\frac{6 \sum d_i^2}{n(n^2 - 1)}
    $$
    where:
    - $d_i$ is the difference between the ranks of corresponding elements in the two lists,
    - $n$ is the number of elements.
    """,unsafe_allow_html=True)
elif page == "Data and File format":
    st.header("Data and File format")
    st.markdown("""
<p>We will be following a similar format as the ones used by most <strong>TREC submissions</strong>, which is repeated below. White space is used to separate columns. The width of the columns in the format is not important, but it is important to have exactly five columns per line with at least one space between the columns.</p>
<p><strong>taskId  questionId  answerId  score  rank</strong></p>
<ol>
  <li>the first column is the taskeId (index different tasks)</li>
  <li>the second column is questionId (index different questions in the same task)</li>
  <li>the third column is answerId (index the answer provided by different LLMs to the same question)</li>
  <li>the fourth column is score (index the score to the answer given by participants)</li>
  <li>the fifth column is rank (index the rank of the answer within all answers to the same question)</li>
</ol>
    """,unsafe_allow_html=True)
elif page == "Submit":
    st.header("Submit")
    st.markdown("""
TAB
    """)
elif page == "LeaderBoard":
    st.header("LeaderBoard")
    # # 描述
    st.markdown("""
This leaderboard is used to show the performance of the **automatic evaluation methods of LLMs** submitted by the **AEOLLM team** on four tasks:
- Dialogue Generation (DG)
- Text Expansion (TE)
- Summary Generation (SG)
- Non-Factoid QA (NFQA)
    """, unsafe_allow_html=True)
    # 创建示例数据

    # teamId 唯一标识码
    DG = {
        "teamId": ["baseline1", "baseline2", "baseline3", "baseline4"],
        "methods": ["chatglm3-6b", "baichuan2-13b", "chatglm-pro", "gpt-4o-mini"],
        "accuracy": [0.5806, 0.5483, 0.6001, 0.6472],
        "kendall's tau": [0.3243, 0.1739, 0.3042, 0.4167],
        "spearman": [0.3505, 0.1857, 0.3264, 0.4512]
    }

    df1 = pd.DataFrame(DG)

    TE = {
        "teamId": ["baseline1", "baseline2", "baseline3", "baseline4"],
        "methods": ["chatglm3-6b", "baichuan2-13b", "chatglm-pro", "gpt-4o-mini"],
        "accuracy": [0.5107, 0.5050, 0.5461, 0.5581],
        "kendall's tau": [0.1281, 0.0635, 0.2716, 0.3864],
        "spearman": [0.1352, 0.0667, 0.2867, 0.4157]
    }
    df2 = pd.DataFrame(TE)

    SG = {
        "teamId": ["baseline1", "baseline2", "baseline3", "baseline4"],
        "methods": ["chatglm3-6b", "baichuan2-13b", "chatglm-pro", "gpt-4o-mini"],
        "accuracy": [0.6504, 0.6014, 0.7162, 0.7441],
        "kendall's tau": [0.3957, 0.2688, 0.5092, 0.5001],
        "spearman": [0.4188, 0.2817, 0.5403, 0.5405],
    }
    df3 = pd.DataFrame(SG)

    NFQA = {
        "teamId": ["baseline1", "baseline2", "baseline3", "baseline4"],
        "methods": ["chatglm3-6b", "baichuan2-13b", "chatglm-pro", "gpt-4o-mini"],
        "accuracy": [0.5935, 0.5817, 0.7000, 0.7203],
        "kendall's tau": [0.2332, 0.2389, 0.4440, 0.4235],
        "spearman": [0.2443, 0.2492, 0.4630, 0.4511]
    }
    df4 = pd.DataFrame(NFQA)

    df = [df1, df2, df3, df4]
    for d in df:
        for col in d.select_dtypes(include=['float64', 'int64']).columns:
            d[col] = d[col].apply(lambda x: f"{x:.4f}")

    # 创建标签页
    tab1, tab2, tab3, tab4 = st.tabs(["DG", "TE", "SG", "NFQA"])

    with tab1:
        st.markdown("""Task: Dialogue Generation; Dataset: DialyDialog""", unsafe_allow_html=True)
        st.dataframe(df1, use_container_width=True)

    with tab2:
        st.markdown("""Task: Text Expansion; Dataset: WritingPrompts""", unsafe_allow_html=True)
        st.dataframe(df2, use_container_width=True)

    with tab3:
        st.markdown("""Task: Summary Generation; Dataset: Xsum""", unsafe_allow_html=True)
        st.dataframe(df3, use_container_width=True)

    with tab4:
        st.markdown("""Task: Non-Factoid QA; Dataset: NF_CATS""", unsafe_allow_html=True)
        st.dataframe(df4, use_container_width=True)
elif page == "Organisers":
    st.header("Organisers")
    st.markdown("""
<em>Yiqun Liu</em> [yiqunliu@tsinghua.edu.cn] (Tsinghua University)<br />
<em>Qingyao Ai</em> [aiqy@tsinghua.edu.cn] (Tsinghua University)<br />
<em>Junjie Chen</em> [chenjj826@gmail.com] (Tsinghua University) <br />
<em>Zhumin Chu</em> [chuzm19@mails.tsinghua.edu.cn] (Tsinghua University)<br />
<em>Haitao Li</em> [liht22@mails.tsinghua.edu.cn] (Tsinghua University)""",unsafe_allow_html=True)
elif page == "References":
    st.header("References")
    st.markdown("""TAB""")