aaditya commited on
Commit
8903d17
1 Parent(s): 9862d77

Update src/display/about.py

Browse files
Files changed (1) hide show
  1. src/display/about.py +2 -3
src/display/about.py CHANGED
@@ -1,15 +1,14 @@
1
  from src.display.utils import ModelType
2
 
3
- TITLE = """<h1 align="center" id="space-title">🧬 Biomedical Knowledge Probing Leaderboard 🧬</h1>"""
4
 
5
  INTRODUCTION_TEXT = """
6
- 📐 This LB aims to track, rank and evaluate biomedical factual knowledge probing results in LLMs.
7
  """
8
 
9
  # Submit a model for automated evaluation on the [Edinburgh International Data Facility](https://www.epcc.ed.ac.uk/hpc-services/edinburgh-international-data-facility) (EIDF) GPU cluster on the "Submit" page.
10
  # The leaderboard's backend runs the great [Eleuther AI Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) - more details in the "About" page.
11
  # """
12
-
13
  # About Tab
14
  LLM_BENCHMARKS_TEXT = f"""
15
  # Context
 
1
  from src.display.utils import ModelType
2
 
3
+ TITLE = """<h1 align="center" id="space-title">🩺 Open Medical LLM Leaderboard 🩺 </h1>"""
4
 
5
  INTRODUCTION_TEXT = """
6
+ 📐 This LB aims to track, rank and evaluate Medical Domain LLMs
7
  """
8
 
9
  # Submit a model for automated evaluation on the [Edinburgh International Data Facility](https://www.epcc.ed.ac.uk/hpc-services/edinburgh-international-data-facility) (EIDF) GPU cluster on the "Submit" page.
10
  # The leaderboard's backend runs the great [Eleuther AI Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) - more details in the "About" page.
11
  # """
 
12
  # About Tab
13
  LLM_BENCHMARKS_TEXT = f"""
14
  # Context