import streamlit as st import pandas as pd import datetime from huggingface_hub import HfApi import json # 定义一些全局变量或参数 MAX_SUBMISSIONS_PER_DAY = 3 # 每天的最大提交次数 submissions_log = {} # 用于记录用户提交的次数 # 获取用户的唯一ID(使用Hugging Face的OAuth2身份验证) def get_user_id(): api = HfApi() user_info = api.whoami(token=st.secrets["hf_api_token"]) return user_info["name"] # 检查用户当天的提交次数 def check_submission_limit(user_id): today = datetime.date.today() if user_id in submissions_log: if submissions_log[user_id]["date"] == today: return submissions_log[user_id]["count"] < MAX_SUBMISSIONS_PER_DAY else: submissions_log[user_id] = {"date": today, "count": 0} return True else: submissions_log[user_id] = {"date": today, "count": 0} return True # 更新用户的提交记录 def update_submission_count(user_id): submissions_log[user_id]["count"] += 1 # CSS样式 st.markdown(""" """, unsafe_allow_html=True) # 标题 st.title('🏆AEOLLM Leaderboard') # 描述 st.markdown(""" This leaderboard is used to show the performance of the **automatic evaluation methods of LLMs** submitted by the **AEOLLM team** on four tasks: - Summary Generation (SG) - Non-Factoid QA (NFQA) - Dialogue Generation (DG) - Text Expansion (TE). Details of AEOLLLM can be found at the link: [https://cjj826.github.io/AEOLLM/](https://cjj826.github.io/AEOLLM/) Submit your result here (.json): """, unsafe_allow_html=True) # user_id = get_user_id() # st.write(f"欢迎, {user_id}!") # # 检查用户的提交限制 # if check_submission_limit(user_id): # st.write(f"您今天还可以提交 {MAX_SUBMISSIONS_PER_DAY - submissions_log[user_id]['count']} 次。") # # 创建文件上传组件 # uploaded_file = st.file_uploader("选择一个文件", type=["json"]) # # 创建一个按钮,用户点击后提交文件 # if st.button("提交文件"): # if uploaded_file is not None: # # 读取文件内容 # file_content = uploaded_file.read().decode("utf-8") # # 如果是JSON文件,解析内容 # try: # json_data = json.loads(file_content) # st.success("文件已成功提交!") # st.json(json_data) # 显示上传的JSON数据 # except json.JSONDecodeError: # st.error("无法解析JSON文件,请确保文件格式正确。") # else: # st.warning("请先上传一个文件!") # else: # st.error("您今天的提交次数已达上限。请明天再试。") # 创建示例数据 SG = { "methods": ["Model A", "Model B", "Model C"], "team": ["U1", "U2", "U3"], "acc": [0.75, 0.64, 0.83], "tau": [0.05, 0.28, 0.16], "s": [0.12, 0.27, 0.18], } df1 = pd.DataFrame(SG) NFQA = { "methods": ["Model A", "Model B", "Model C"], "team": ["U1", "U2", "U3"], "acc": [0.75, 0.64, 0.83], "tau": [0.05, 0.28, 0.16], "s": [0.12, 0.27, 0.18] } df2 = pd.DataFrame(NFQA) DG = { "methods": ["Model A", "Model B", "Model C"], "team": ["U1", "U2", "U3"], "acc": [0.75, 0.64, 0.83], "tau": [0.05, 0.28, 0.16], "s": [0.12, 0.27, 0.18] } df3 = pd.DataFrame(DG) TE = { "methods": ["Model A", "Model B", "Model C"], "team": ["U1", "U2", "U3"], "acc": [0.75, 0.64, 0.83], "tau": [0.05, 0.28, 0.16], "s": [0.12, 0.27, 0.18] } df4 = pd.DataFrame(TE) # 创建标签页 tab1, tab2, tab3, tab4 = st.tabs(["SG", "NFQA", "DG", "TE"]) # 在标签页 1 中添加内容 with tab1: st.header("Summary Generation") st.dataframe(df1, use_container_width=True) # 在标签页 2 中添加内容 with tab2: st.header("Non-Factoid QA") st.dataframe(df2, use_container_width=True) # 在标签页 3 中添加内容 with tab3: st.header("Dialogue Generation") st.dataframe(df3, use_container_width=True) # 在标签页 4 中添加内容 with tab4: st.header("Text Expansion") st.dataframe(df4, use_container_width=True, )