kushal-10 commited on
Commit
badc551
β€’
1 Parent(s): ad85ee9

single header, split models list (#3)

Browse files
Files changed (5) hide show
  1. app.py +35 -17
  2. src/assets/text_content.py +2 -1
  3. src/utils.py +122 -122
  4. versions/v0.9.csv +2 -4
  5. versions/v1.0.csv +8 -4
app.py CHANGED
@@ -1,25 +1,25 @@
1
  import gradio as gr
2
 
3
  from src.assets.text_content import TITLE, INTRODUCTION_TEXT
4
- from src.utils import get_data, compare_plots, filter_search
5
 
6
  ############################ For Leaderboards #############################
7
- DATA_PATH = 'versions'
8
- latest_flag = True #Set flag to iclude latest data in Details and Versions Tab
9
- latest_df, latest_vname, previous_df, previous_vname = get_data(DATA_PATH, latest_flag)
10
 
11
  global prev_df
12
- prev_df = previous_df[0]
13
  def select_prev_df(name):
14
- ind = previous_vname.index(name)
15
- prev_df = previous_df[ind]
16
  return prev_df
17
 
18
  ############################ For Plots ####################################
19
- global plot_df, MODEL_COLS
20
  plot_df = latest_df[0]
21
  MODEL_COLS = list(plot_df['Model'].unique())
22
-
23
 
24
  ############# MAIN APPLICATION ######################
25
  demo = gr.Blocks()
@@ -35,7 +35,7 @@ with demo:
35
  show_label=False,
36
  elem_id="search-bar",
37
  )
38
-
39
  leaderboard_table = gr.components.Dataframe(
40
  value=latest_df[0],
41
  elem_id="leaderboard-table",
@@ -57,16 +57,26 @@ with demo:
57
  leaderboard_table,
58
  queue=True
59
  )
 
60
  with gr.TabItem("πŸ“ˆ Plot", id=3):
61
  with gr.Row():
62
- model_cols = gr.CheckboxGroup(
63
- MODEL_COLS,
64
- label="Select Models πŸ€–",
65
  value=[],
66
  elem_id="column-select",
67
  interactive=True,
68
  )
69
 
 
 
 
 
 
 
 
 
 
70
  with gr.Row():
71
  plot_grdf = gr.DataFrame(
72
  value=plot_df,
@@ -76,9 +86,16 @@ with demo:
76
  # Output block for the plot
77
  plot_output = gr.Plot()
78
 
79
- model_cols.change(
80
  compare_plots,
81
- [plot_grdf, model_cols],
 
 
 
 
 
 
 
82
  plot_output,
83
  queue=True
84
  )
@@ -86,7 +103,7 @@ with demo:
86
  with gr.TabItem("πŸ”„ Versions and Details", elem_id="details", id=2):
87
  with gr.Row():
88
  ver_selection = gr.Dropdown(
89
- previous_vname, label="Select Version πŸ•ΉοΈ", value=previous_vname[0]
90
  )
91
  with gr.Row():
92
  search_bar_prev = gr.Textbox(
@@ -122,7 +139,8 @@ with demo:
122
  prev_table,
123
  queue=True
124
  )
125
-
126
  demo.load()
 
127
  demo.queue()
 
128
  demo.launch()
 
1
  import gradio as gr
2
 
3
  from src.assets.text_content import TITLE, INTRODUCTION_TEXT
4
+ from src.utils import compare_plots, filter_search, get_csv_data, split_models
5
 
6
  ############################ For Leaderboards #############################
7
+ # Get CSV data
8
+ global latest_df, all_dfs, all_vnames
9
+ latest_df, all_dfs, all_vnames = get_csv_data()
10
 
11
  global prev_df
12
+ prev_df = all_dfs[0]
13
  def select_prev_df(name):
14
+ ind = all_vnames.index(name)
15
+ prev_df = all_dfs[ind]
16
  return prev_df
17
 
18
  ############################ For Plots ####################################
19
+ global plot_df, MODEL_COLS, OPEN_MODELS, COMM_MODELS
20
  plot_df = latest_df[0]
21
  MODEL_COLS = list(plot_df['Model'].unique())
22
+ OPEN_MODELS, COMM_MODELS = split_models(MODEL_COLS)
23
 
24
  ############# MAIN APPLICATION ######################
25
  demo = gr.Blocks()
 
35
  show_label=False,
36
  elem_id="search-bar",
37
  )
38
+
39
  leaderboard_table = gr.components.Dataframe(
40
  value=latest_df[0],
41
  elem_id="leaderboard-table",
 
57
  leaderboard_table,
58
  queue=True
59
  )
60
+
61
  with gr.TabItem("πŸ“ˆ Plot", id=3):
62
  with gr.Row():
63
+ open_model_cols = gr.CheckboxGroup(
64
+ OPEN_MODELS,
65
+ label="Select Models - Open Weight 🌐",
66
  value=[],
67
  elem_id="column-select",
68
  interactive=True,
69
  )
70
 
71
+ with gr.Row():
72
+ comm_model_cols = gr.CheckboxGroup(
73
+ COMM_MODELS,
74
+ label="Select Models - Closed Weight πŸ’Ό",
75
+ value=[],
76
+ elem_id="column-select-2",
77
+ interactive=True,
78
+ )
79
+
80
  with gr.Row():
81
  plot_grdf = gr.DataFrame(
82
  value=plot_df,
 
86
  # Output block for the plot
87
  plot_output = gr.Plot()
88
 
89
+ open_model_cols.change(
90
  compare_plots,
91
+ [plot_grdf, open_model_cols, comm_model_cols],
92
+ plot_output,
93
+ queue=True
94
+ )
95
+
96
+ comm_model_cols.change(
97
+ compare_plots,
98
+ [plot_grdf, open_model_cols, comm_model_cols],
99
  plot_output,
100
  queue=True
101
  )
 
103
  with gr.TabItem("πŸ”„ Versions and Details", elem_id="details", id=2):
104
  with gr.Row():
105
  ver_selection = gr.Dropdown(
106
+ all_vnames, label="Select Version πŸ•ΉοΈ", value=all_vnames[0]
107
  )
108
  with gr.Row():
109
  search_bar_prev = gr.Textbox(
 
139
  prev_table,
140
  queue=True
141
  )
 
142
  demo.load()
143
+
144
  demo.queue()
145
+
146
  demo.launch()
src/assets/text_content.py CHANGED
@@ -1,13 +1,14 @@
1
  TITLE = """<h1 align="center" id="space-title"> πŸ† CLEM Leaderboard</h1>"""
2
 
3
  INTRODUCTION_TEXT = """
 
4
  The CLEM Leaderboard aims to track, rank and evaluate current cLLMs (chat-optimized Large Language Models) with the suggested pronounciation β€œclems”.
5
 
6
  The benchmarking approach is described in [Clembench: Using Game Play to Evaluate Chat-Optimized Language Models as Conversational Agents](https://arxiv.org/abs/2305.13455).
7
 
8
  Source code benchmarking "clems" is available here: [Clembench](https://github.com/clembench/clembench)
9
 
10
- All generated files and results from the benchmark runs are available here: [clembench-runs](https://github.com/clembench/clembench-runs)
11
  """
12
 
13
  SHORT_NAMES = {
 
1
  TITLE = """<h1 align="center" id="space-title"> πŸ† CLEM Leaderboard</h1>"""
2
 
3
  INTRODUCTION_TEXT = """
4
+ <h6 align="center">
5
  The CLEM Leaderboard aims to track, rank and evaluate current cLLMs (chat-optimized Large Language Models) with the suggested pronounciation β€œclems”.
6
 
7
  The benchmarking approach is described in [Clembench: Using Game Play to Evaluate Chat-Optimized Language Models as Conversational Agents](https://arxiv.org/abs/2305.13455).
8
 
9
  Source code benchmarking "clems" is available here: [Clembench](https://github.com/clembench/clembench)
10
 
11
+ All generated files and results from the benchmark runs are available here: [clembench-runs](https://github.com/clembench/clembench-runs) </h6>
12
  """
13
 
14
  SHORT_NAMES = {
src/utils.py CHANGED
@@ -2,46 +2,58 @@ import os
2
  import pandas as pd
3
  import matplotlib.pyplot as plt
4
  import numpy as np
5
-
6
  from src.assets.text_content import SHORT_NAMES
7
 
8
- def update_cols(df: pd.DataFrame) -> pd.DataFrame:
 
 
 
 
9
  '''
10
- Change three header rows to a single header row
11
  Args:
12
- df: Raw dataframe containing 3 separate header rows
13
- Remove this function if the dataframe has only one header row
14
-
15
- Returns:
16
- df: Updated dataframe which has only 1 header row instead of 3
17
  '''
18
- default_cols = list(df.columns)
19
-
20
- # First 4 columns are initalised in 'update', Append additional columns for games Model, Clemscore, ALL(PLayed) and ALL(Main Score)
21
- update = ['Model', 'Clemscore', 'Played', 'Quality Score']
22
- game_metrics = default_cols[4:]
23
-
24
- # Change columns Names for each Game
25
- for i in range(len(game_metrics)):
26
- if i%3 == 0:
27
- game = game_metrics[i]
28
- update.append(str(game).capitalize() + "(Played)")
29
- update.append(str(game).capitalize() + "(Quality Score)")
30
- update.append(str(game).capitalize() + "(Quality Score[std])")
31
-
32
- # Create a dict to change names of the columns
33
- map_cols = {}
34
- for i in range(len(default_cols)):
35
- map_cols[default_cols[i]] = str(update[i])
36
-
37
- df = df.rename(columns=map_cols)
38
- df = df.iloc[2:]
39
-
40
- return df
 
 
 
 
 
 
41
 
42
  def process_df(df: pd.DataFrame) -> pd.DataFrame:
43
  '''
44
- Process dataframe - Remove repition in model names, convert datatypes to sort by "float" instead of "str"
 
 
 
45
  Args:
46
  df: Unprocessed Dataframe (after using update_cols)
47
  Returns:
@@ -66,91 +78,77 @@ def process_df(df: pd.DataFrame) -> pd.DataFrame:
66
  else:
67
  models_list.append(splits[0] + "--" + splits[1])
68
  df[model_col_name] = models_list
 
 
 
 
 
 
 
 
69
 
 
 
 
 
 
70
  return df
71
 
72
- def get_data(path: str, flag: bool):
73
  '''
74
- Get a list of all version names and respective Dataframes
75
- Args:
76
- path: Path to the directory containing CSVs of different versions -> v0.9.csv, v1.0.csv, ....
77
- flag: Set this flag to include the latest version in Details and Versions tab
78
- Returns:
79
- latest_df: singular list containing dataframe of the latest version of the leaderboard with only 4 columns
80
- latest_vname: list of the name of latest version
81
- previous_df: list of dataframes for previous versions (can skip latest version if required)
82
- previous_vname: list of the names for the previous versions (INCLUDED IN Details and Versions Tab)
83
-
84
  '''
85
- # Check if Directory is empty
86
- list_versions = os.listdir(path)
87
- if not list_versions:
88
- print("Directory is empty")
 
 
 
 
 
 
 
 
89
 
90
- else:
91
- files = [file for file in list_versions if file.endswith('.csv')]
92
- files.sort(reverse=True)
93
- file_names = [os.path.splitext(file)[0] for file in files]
94
-
95
- DFS = []
96
- for file in files:
97
- df = pd.read_csv(os.path.join(path, file))
98
- df = update_cols(df) # Remove if by default there is only one header row
99
- df = process_df(df) # Process Dataframe
100
- df = df.sort_values(by=list(df.columns)[1], ascending=False) # Sort by clemscore
101
- DFS.append(df)
102
-
103
- # Only keep relavant columns for the main leaderboard
104
- latest_df_dummy = DFS[0]
105
- all_columns = list(latest_df_dummy.columns)
106
- keep_columns = all_columns[0:4]
107
- latest_df_dummy = latest_df_dummy.drop(columns=[c for c in all_columns if c not in keep_columns])
108
-
109
- latest_df = [latest_df_dummy]
110
- latest_vname = [file_names[0]]
111
- previous_df = []
112
- previous_vname = []
113
- for df, name in zip(DFS, file_names):
114
- previous_df.append(df)
115
- previous_vname.append(name)
116
-
117
- if not flag:
118
- previous_df.pop(0)
119
- previous_vname.pop(0)
120
-
121
- return latest_df, latest_vname, previous_df, previous_vname
122
-
123
- return None
124
 
 
 
125
 
126
- # ['Model', 'Clemscore', 'All(Played)', 'All(Quality Score)']
127
- def compare_plots(df: pd.DataFrame, LIST: list):
 
 
 
128
  '''
129
- Quality Score v/s % Played plot by selecting models
130
  Args:
131
- LIST: The list of models to show in the plot, updated from frontend
 
132
  Returns:
133
- fig: The plot
134
  '''
135
  short_names = label_map(LIST)
136
-
137
  list_columns = list(df.columns)
 
138
  df = df[df[list_columns[0]].isin(LIST)]
139
 
140
  X = df[list_columns[2]]
141
  fig, ax = plt.subplots()
142
  for model in LIST:
143
  short = short_names[model]
144
- # same_flag = short_names[model][1]
145
  model_df = df[df[list_columns[0]] == model]
146
  x = model_df[list_columns[2]]
147
  y = model_df[list_columns[3]]
148
  color = plt.cm.rainbow(x / max(X)) # Use a colormap for different colors
149
  plt.scatter(x, y, color=color)
150
- # if same_flag:
151
  plt.annotate(f'{short}', (x, y), textcoords="offset points", xytext=(0, -15), ha='center', rotation=0)
152
- # else:
153
- # plt.annotate(f'{short}', (x, y), textcoords="offset points", xytext=(20, -3), ha='center', rotation=0)
154
  ax.grid(which='both', color='grey', linewidth=1, linestyle='-', alpha=0.2)
155
  ax.set_xticks(np.arange(0,110,10))
156
  plt.xlim(-10, 110)
@@ -162,6 +160,23 @@ def compare_plots(df: pd.DataFrame, LIST: list):
162
 
163
  return fig
164
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  def shorten_model_name(full_name):
166
  # Split the name into parts
167
  parts = full_name.split('-')
@@ -191,13 +206,6 @@ def label_map(model_list: list) -> dict:
191
  '''
192
  short_names = {}
193
  for model_name in model_list:
194
- # splits = model_name.split('--')
195
- # if len(splits) != 1:
196
- # splits[0] = SHORT_NAMES[splits[0] + '-']
197
- # splits[1] = SHORT_NAMES[splits[1] + '-']
198
- # # Define the short name and indicate there are two different models
199
- # short_names[model_name] = [splits[0] + '--' + splits[1], 0]
200
- # else:
201
  if model_name in SHORT_NAMES:
202
  short_name = SHORT_NAMES[model_name]
203
  else:
@@ -207,32 +215,24 @@ def label_map(model_list: list) -> dict:
207
  short_names[model_name] = short_name
208
 
209
  return short_names
210
-
211
- def filter_search(df: pd.DataFrame, query: str) -> pd.DataFrame:
212
  '''
213
- Filter the dataframe based on the search query
214
- Args:
215
- df: Unfiltered dataframe
216
- query: a string of queries separated by ";"
217
- Return:
218
- filtered_df: Dataframe containing searched queries in the 'Model' column
219
  '''
220
- queries = query.split(';')
221
- list_cols = list(df.columns)
222
- df_len = len(df)
223
- filtered_models = []
224
- models_list = list(df[list_cols[0]])
225
- for q in queries:
226
- q = q.lower()
227
- for i in range(df_len):
228
- model_name = models_list[i]
229
- if q in model_name.lower():
230
- filtered_models.append(model_name) # Append model names containing query q
 
 
231
 
232
- filtered_df = df[df[list_cols[0]].isin(filtered_models)]
233
 
234
- if query == "":
235
- return df
236
 
237
- return filtered_df
238
-
 
2
  import pandas as pd
3
  import matplotlib.pyplot as plt
4
  import numpy as np
 
5
  from src.assets.text_content import SHORT_NAMES
6
 
7
+ # Set the folder name to save csv files
8
+ global csvs_path
9
+ csvs_path = 'versions'
10
+
11
+ def get_csv_data():
12
  '''
13
+ Get data from csv files saved locally
14
  Args:
15
+ None
16
+ Returns:
17
+ latest_df: singular list containing dataframe of the latest version of the leaderboard with only 4 columns
18
+ all_dfs: list of dataframes for previous versions + latest version including columns for all games
19
+ all_vnames: list of the names for the previous versions + latest version (For Details and Versions Tab Dropdown)
20
  '''
21
+ list_vers = os.listdir(csvs_path)
22
+ list_vers = [s.split('.csv')[0] for s in list_vers]
23
+ # Sort by latest version
24
+ float_content = [float(s[1:]) for s in list_vers]
25
+ float_content.sort(reverse=True)
26
+ list_vers = ['v'+str(s) for s in float_content]
27
+
28
+ DFS = []
29
+ for csv in list_vers:
30
+ read_path = os.path.join(csvs_path, csv + '.csv')
31
+ df = pd.read_csv(read_path)
32
+ df = process_df(df)
33
+ df = df.sort_values(by=list(df.columns)[1], ascending=False) # Sort by clemscore
34
+ DFS.append(df)
35
+
36
+ # Only keep relavant columns for the main leaderboard
37
+ latest_df_dummy = DFS[0]
38
+ all_columns = list(latest_df_dummy.columns)
39
+ keep_columns = all_columns[0:4]
40
+ latest_df_dummy = latest_df_dummy.drop(columns=[c for c in all_columns if c not in keep_columns])
41
+
42
+ latest_df = [latest_df_dummy]
43
+ all_dfs = []
44
+ all_vnames = []
45
+ for df, name in zip(DFS, list_vers):
46
+ all_dfs.append(df)
47
+ all_vnames.append(name)
48
+
49
+ return latest_df, all_dfs, all_vnames
50
 
51
  def process_df(df: pd.DataFrame) -> pd.DataFrame:
52
  '''
53
+ Process dataframe
54
+ - Remove repition in model names
55
+ - Convert datatypes to sort by "float" instead of "str" for sorting
56
+ - Update column names
57
  Args:
58
  df: Unprocessed Dataframe (after using update_cols)
59
  Returns:
 
78
  else:
79
  models_list.append(splits[0] + "--" + splits[1])
80
  df[model_col_name] = models_list
81
+
82
+ # Update column names
83
+ update = ['Model', 'Clemscore', '% Played', 'Quality Score']
84
+ game_metrics = list_column_names[4:]
85
+
86
+ for col in game_metrics:
87
+ splits = col.split(',')
88
+ update.append(splits[0].capitalize() + "" + splits[1])
89
 
90
+ map_cols = {}
91
+ for i in range(len(update)):
92
+ map_cols[list_column_names[i]] = str(update[i])
93
+
94
+ df = df.rename(columns=map_cols)
95
  return df
96
 
97
+ def filter_search(df: pd.DataFrame, query: str) -> pd.DataFrame:
98
  '''
99
+ Filter the dataframe based on the search query
100
+ Args:
101
+ df: Unfiltered dataframe
102
+ query: a string of queries separated by ";"
103
+ Return:
104
+ filtered_df: Dataframe containing searched queries in the 'Model' column
 
 
 
 
105
  '''
106
+ queries = query.split(';')
107
+ list_cols = list(df.columns)
108
+ df_len = len(df)
109
+ filtered_models = []
110
+ models_list = list(df[list_cols[0]])
111
+ for q in queries:
112
+ q = q.lower()
113
+ q = q.strip()
114
+ for i in range(df_len):
115
+ model_name = models_list[i]
116
+ if q in model_name.lower():
117
+ filtered_models.append(model_name) # Append model names containing query q
118
 
119
+ filtered_df = df[df[list_cols[0]].isin(filtered_models)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
+ if query == "":
122
+ return df
123
 
124
+ return filtered_df
125
+
126
+ ###################################FOR PLOTS##################################################
127
+
128
+ def plot_graph(df:pd.DataFrame, LIST:list):
129
  '''
130
+ Takes in a list of models to plot
131
  Args:
132
+ df: A dummy dataframe of latest version
133
+ LIST: List of models to plot
134
  Returns:
135
+ Fig: figure to plot
136
  '''
137
  short_names = label_map(LIST)
 
138
  list_columns = list(df.columns)
139
+
140
  df = df[df[list_columns[0]].isin(LIST)]
141
 
142
  X = df[list_columns[2]]
143
  fig, ax = plt.subplots()
144
  for model in LIST:
145
  short = short_names[model]
 
146
  model_df = df[df[list_columns[0]] == model]
147
  x = model_df[list_columns[2]]
148
  y = model_df[list_columns[3]]
149
  color = plt.cm.rainbow(x / max(X)) # Use a colormap for different colors
150
  plt.scatter(x, y, color=color)
 
151
  plt.annotate(f'{short}', (x, y), textcoords="offset points", xytext=(0, -15), ha='center', rotation=0)
 
 
152
  ax.grid(which='both', color='grey', linewidth=1, linestyle='-', alpha=0.2)
153
  ax.set_xticks(np.arange(0,110,10))
154
  plt.xlim(-10, 110)
 
160
 
161
  return fig
162
 
163
+
164
+ # ['Model', 'Clemscore', 'All(Played)', 'All(Quality Score)']
165
+ def compare_plots(df: pd.DataFrame, LIST1: list, LIST2: list):
166
+ '''
167
+ Quality Score v/s % Played plot by selecting models
168
+ Args:
169
+ df: A dummy dataframe of latest version
170
+ LIST1: The list of open source models to show in the plot, updated from frontend
171
+ LIST2: The list of commercial models to show in the plot, updated from frontend
172
+ Returns:
173
+ fig: The plot
174
+ '''
175
+ # Combine lists for Open source and commercial models
176
+ LIST = LIST1 + LIST2
177
+ fig = plot_graph(df, LIST)
178
+ return fig
179
+
180
  def shorten_model_name(full_name):
181
  # Split the name into parts
182
  parts = full_name.split('-')
 
206
  '''
207
  short_names = {}
208
  for model_name in model_list:
 
 
 
 
 
 
 
209
  if model_name in SHORT_NAMES:
210
  short_name = SHORT_NAMES[model_name]
211
  else:
 
215
  short_names[model_name] = short_name
216
 
217
  return short_names
218
+
219
+ def split_models(MODEL_LIST: list):
220
  '''
221
+ Split the models into open source and commercial
 
 
 
 
 
222
  '''
223
+ open_models = []
224
+ comm_models = []
225
+
226
+ for model in MODEL_LIST:
227
+ if model.startswith(('gpt-', 'claude-', 'command')):
228
+ comm_models.append(model)
229
+ else:
230
+ open_models.append(model)
231
+
232
+ open_models.sort(key=lambda o: o.upper())
233
+ comm_models.sort(key=lambda c: c.upper())
234
+ return open_models, comm_models
235
+
236
 
 
237
 
 
 
238
 
 
 
versions/v0.9.csv CHANGED
@@ -1,6 +1,4 @@
1
- ,-,all,all,imagegame,imagegame,imagegame,privateshared,privateshared,privateshared,referencegame,referencegame,referencegame,taboo,taboo,taboo,wordle,wordle,wordle,wordle_withclue,wordle_withclue,wordle_withclue,wordle_withcritic,wordle_withcritic,wordle_withcritic
2
- ,clemscore,Average % Played,Average Quality Score,% Played,Quality Score,Quality Score (std),% Played,Quality Score,Quality Score (std),% Played,Quality Score,Quality Score (std),% Played,Quality Score,Quality Score (std),% Played,Quality Score,Quality Score (std),% Played,Quality Score,Quality Score (std),% Played,Quality Score,Quality Score (std)
3
- model,,,,,,,,,,,,,,,,,,,,,,,,
4
  claude-v1.3-t0.0--claude-v1.3-t0.0,37.07,74.76,49.58,0.0,,,100.0,84.87,18.87,100.0,82.5,38.48,76.92,68.75,38.71,100.0,0.0,0.0,100.0,30.56,40.13,46.43,30.77,48.04
5
  falcon-40b-t0.0--falcon-40b-t0.0,0.71,0.95,75.0,0.0,,,0.0,,,0.0,,,0.0,,,0.0,,,3.33,50.0,,3.33,100.0,
6
  gpt-3.5-turbo-t0.0--gpt-3.5-turbo-t0.0,37.02,85.86,43.12,97.5,60.28,25.95,64.0,72.83,13.07,100.0,55.0,50.38,69.49,71.95,44.79,100.0,0.0,0.0,93.33,28.57,46.0,76.67,13.19,30.16
@@ -11,4 +9,4 @@ koala-13b-t0.0--koala-13b-t0.0,1.48,14.76,10.0,0.0,,,0.0,,,0.0,,,0.0,,,86.67,0.0
11
  luminous-supreme-t0.0--luminous-supreme-t0.0,0.0,16.24,0.0,0.0,,,0.0,,,0.0,,,0.0,,,100.0,0.0,0.0,3.33,0.0,,10.34,0.0,0.0
12
  oasst-12b-t0.0--oasst-12b-t0.0,1.74,20.85,8.33,0.0,,,0.0,,,15.0,33.33,51.64,0.0,,,100.0,0.0,0.0,16.67,0.0,0.0,14.29,0.0,0.0
13
  text-davinci-003-t0.0--text-davinci-003-t0.0,15.78,44.5,35.46,57.5,38.7,27.78,16.0,14.1,25.21,82.5,36.36,48.85,28.81,76.47,43.72,66.67,1.25,5.59,36.67,31.36,38.99,23.33,50.0,50.0
14
- vicuna-13b-t0.0--vicuna-13b-t0.0,4.24,13.58,31.25,0.0,,,0.0,,,0.0,,,5.08,100.0,0.0,56.67,0.0,0.0,13.33,25.0,50.0,20.0,0.0,0.0
 
1
+ ,"-, clemscore","all, Average % Played","all, Average Quality Score","imagegame, % Played","imagegame, Quality Score","imagegame, Quality Score (std)","privateshared, % Played","privateshared, Quality Score","privateshared, Quality Score (std)","referencegame, % Played","referencegame, Quality Score","referencegame, Quality Score (std)","taboo, % Played","taboo, Quality Score","taboo, Quality Score (std)","wordle, % Played","wordle, Quality Score","wordle, Quality Score (std)","wordle_withclue, % Played","wordle_withclue, Quality Score","wordle_withclue, Quality Score (std)","wordle_withcritic, % Played","wordle_withcritic, Quality Score","wordle_withcritic, Quality Score (std)"
 
 
2
  claude-v1.3-t0.0--claude-v1.3-t0.0,37.07,74.76,49.58,0.0,,,100.0,84.87,18.87,100.0,82.5,38.48,76.92,68.75,38.71,100.0,0.0,0.0,100.0,30.56,40.13,46.43,30.77,48.04
3
  falcon-40b-t0.0--falcon-40b-t0.0,0.71,0.95,75.0,0.0,,,0.0,,,0.0,,,0.0,,,0.0,,,3.33,50.0,,3.33,100.0,
4
  gpt-3.5-turbo-t0.0--gpt-3.5-turbo-t0.0,37.02,85.86,43.12,97.5,60.28,25.95,64.0,72.83,13.07,100.0,55.0,50.38,69.49,71.95,44.79,100.0,0.0,0.0,93.33,28.57,46.0,76.67,13.19,30.16
 
9
  luminous-supreme-t0.0--luminous-supreme-t0.0,0.0,16.24,0.0,0.0,,,0.0,,,0.0,,,0.0,,,100.0,0.0,0.0,3.33,0.0,,10.34,0.0,0.0
10
  oasst-12b-t0.0--oasst-12b-t0.0,1.74,20.85,8.33,0.0,,,0.0,,,15.0,33.33,51.64,0.0,,,100.0,0.0,0.0,16.67,0.0,0.0,14.29,0.0,0.0
11
  text-davinci-003-t0.0--text-davinci-003-t0.0,15.78,44.5,35.46,57.5,38.7,27.78,16.0,14.1,25.21,82.5,36.36,48.85,28.81,76.47,43.72,66.67,1.25,5.59,36.67,31.36,38.99,23.33,50.0,50.0
12
+ vicuna-13b-t0.0--vicuna-13b-t0.0,4.24,13.58,31.25,0.0,,,0.0,,,0.0,,,5.08,100.0,0.0,56.67,0.0,0.0,13.33,25.0,50.0,20.0,0.0,0.0
versions/v1.0.csv CHANGED
@@ -1,9 +1,8 @@
1
- ,-,all,all,imagegame,imagegame,imagegame,privateshared,privateshared,privateshared,referencegame,referencegame,referencegame,taboo,taboo,taboo,wordle,wordle,wordle,wordle_withclue,wordle_withclue,wordle_withclue,wordle_withcritic,wordle_withcritic,wordle_withcritic
2
- ,clemscore,Average % Played,Average Quality Score,% Played,Quality Score,Quality Score (std),% Played,Quality Score,Quality Score (std),% Played,Quality Score,Quality Score (std),% Played,Quality Score,Quality Score (std),% Played,Quality Score,Quality Score (std),% Played,Quality Score,Quality Score (std),% Played,Quality Score,Quality Score (std)
3
- model,,,,,,,,,,,,,,,,,,,,,,,,
4
  CodeLlama-34b-Instruct-hf-t0.0--CodeLlama-34b-Instruct-hf-t0.0,10.34,23.96,43.15,7.5,37.67,36.83,0.0,,,15.0,83.33,40.82,74.58,29.55,44.87,53.33,0.0,0.0,13.33,8.33,16.66,4.0,100.0,
5
  Mistral-7B-Instruct-v0.1-t0.0--Mistral-7B-Instruct-v0.1-t0.0,2.72,17.5,15.56,,,,20.0,0.0,0.0,,,,50.85,46.67,49.01,0.0,,,16.67,0.0,0.0,0.0,,
6
  Wizard-Vicuna-13B-Uncensored-HF-t0.0--Wizard-Vicuna-13B-Uncensored-HF-t0.0,2.06,9.49,21.71,0.0,,,0.0,,,7.5,66.67,57.74,22.03,30.77,48.04,3.33,0.0,,30.0,11.11,33.33,3.57,0.0,
 
7
  WizardLM-70b-v1.0-t0.0--WizardLM-70b-v1.0-t0.0,16.7,51.65,32.34,0.0,,,24.0,62.3,19.84,100.0,32.5,47.43,62.71,67.57,47.46,60.0,0.0,0.0,70.0,21.43,40.53,44.83,10.26,28.49
8
  claude-2-t0.0--claude-2-t0.0,33.71,82.12,41.05,0.0,,,100.0,75.89,18.57,100.0,45.0,50.38,91.53,73.15,39.71,100.0,0.0,0.0,90.0,21.6,36.56,93.33,30.65,43.22
9
  claude-2.1-t0.0--claude-2.1-t0.0,36.38,83.08,43.79,0.0,,,100.0,73.01,23.8,100.0,55.0,50.38,94.92,69.35,41.41,100.0,0.67,3.65,93.33,29.29,40.95,93.33,35.42,41.74
@@ -16,6 +15,7 @@ gpt-3.5-turbo-1106-t0.0--gpt-3.5-turbo-1106-t0.0,30.45,77.12,39.49,40.0,51.25,34
16
  gpt-4-0314-t0.0--gpt-4-0314-t0.0,58.81,93.79,62.7,65.0,88.92,16.24,100.0,91.12,7.48,100.0,77.5,42.29,91.53,79.63,32.48,100.0,2.78,7.37,100.0,50.78,41.69,100.0,48.17,41.42
17
  gpt-4-0613-t0.0--gpt-4-0613-t0.0,60.9,97.22,62.64,97.5,97.28,10.38,100.0,97.34,5.02,100.0,80.0,40.51,83.05,81.97,29.03,100.0,4.25,8.62,100.0,46.11,41.85,100.0,31.5,38.1
18
  gpt-4-1106-preview-t0.0--gpt-4-1106-preview-t0.0,60.33,97.95,61.59,97.5,94.15,14.85,100.0,83.25,12.51,100.0,90.0,30.38,88.14,83.97,29.88,100.0,7.5,13.01,100.0,49.11,42.93,100.0,23.17,37.74
 
19
  koala-13B-HF-t0.0--koala-13B-HF-t0.0,1.25,23.22,5.38,0.0,,,0.0,,,0.0,,,52.54,16.13,37.39,100.0,0.0,0.0,10.0,0.0,0.0,0.0,,
20
  llama-2-13b-chat-hf-t0.0--llama-2-13b-chat-hf-t0.0,1.89,3.43,55.09,0.0,,,24.0,55.09,33.68,0.0,,,0.0,,,0.0,,,0.0,,,0.0,,
21
  llama-2-70b-chat-hf-t0.0--llama-2-70b-chat-hf-t0.0,1.39,3.79,36.74,0.0,,,14.0,13.48,11.98,12.5,60.0,54.77,0.0,,,0.0,,,0.0,,,0.0,,
@@ -23,4 +23,8 @@ llama-2-7b-chat-hf-t0.0--llama-2-7b-chat-hf-t0.0,0.24,6.05,4.0,0.0,,,0.0,,,0.0,,
23
  oasst-sft-4-pythia-12b-epoch-3.5-t0.0--oasst-sft-4-pythia-12b-epoch-3.5-t0.0,0.0,14.76,0.0,0.0,,,0.0,,,0.0,,,0.0,,,100.0,0.0,0.0,3.33,0.0,,0.0,,
24
  sheep-duck-llama-2-13b-t0.0--sheep-duck-llama-2-13b-t0.0,6.74,34.86,19.34,0.0,,,0.0,,,97.5,33.33,47.76,89.83,0.0,0.0,0.0,,,23.33,19.05,37.8,33.33,25.0,42.49
25
  sheep-duck-llama-2-70b-v1.1-t0.0--sheep-duck-llama-2-70b-v1.1-t0.0,17.12,40.82,41.93,40.0,23.19,28.06,0.0,,,35.0,57.14,51.36,59.32,74.29,44.34,34.78,0.0,0.0,63.33,43.86,43.82,53.33,53.12,49.9
26
- vicuna-33b-v1.3-t0.0--vicuna-33b-v1.3-t0.0,9.15,17.47,52.36,15.0,23.67,24.34,40.0,34.58,26.47,0.0,,,37.29,50.0,51.18,0.0,,,23.33,53.57,46.61,6.67,100.0,0.0
 
 
 
 
 
1
+ ,"-, clemscore","all, Average % Played","all, Average Quality Score","imagegame, % Played","imagegame, Quality Score","imagegame, Quality Score (std)","privateshared, % Played","privateshared, Quality Score","privateshared, Quality Score (std)","referencegame, % Played","referencegame, Quality Score","referencegame, Quality Score (std)","taboo, % Played","taboo, Quality Score","taboo, Quality Score (std)","wordle, % Played","wordle, Quality Score","wordle, Quality Score (std)","wordle_withclue, % Played","wordle_withclue, Quality Score","wordle_withclue, Quality Score (std)","wordle_withcritic, % Played","wordle_withcritic, Quality Score","wordle_withcritic, Quality Score (std)"
 
 
2
  CodeLlama-34b-Instruct-hf-t0.0--CodeLlama-34b-Instruct-hf-t0.0,10.34,23.96,43.15,7.5,37.67,36.83,0.0,,,15.0,83.33,40.82,74.58,29.55,44.87,53.33,0.0,0.0,13.33,8.33,16.66,4.0,100.0,
3
  Mistral-7B-Instruct-v0.1-t0.0--Mistral-7B-Instruct-v0.1-t0.0,2.72,17.5,15.56,,,,20.0,0.0,0.0,,,,50.85,46.67,49.01,0.0,,,16.67,0.0,0.0,0.0,,
4
  Wizard-Vicuna-13B-Uncensored-HF-t0.0--Wizard-Vicuna-13B-Uncensored-HF-t0.0,2.06,9.49,21.71,0.0,,,0.0,,,7.5,66.67,57.74,22.03,30.77,48.04,3.33,0.0,,30.0,11.11,33.33,3.57,0.0,
5
+ WizardLM-13b-v1.2-t0.0--WizardLM-13b-v1.2-t0.0,7.82,40.49,19.31,0.0,,,26.0,21.37,20.34,100.0,35.0,48.3,47.46,51.79,48.08,33.33,0.0,0.0,43.33,7.69,27.74,33.33,0.0,0.0
6
  WizardLM-70b-v1.0-t0.0--WizardLM-70b-v1.0-t0.0,16.7,51.65,32.34,0.0,,,24.0,62.3,19.84,100.0,32.5,47.43,62.71,67.57,47.46,60.0,0.0,0.0,70.0,21.43,40.53,44.83,10.26,28.49
7
  claude-2-t0.0--claude-2-t0.0,33.71,82.12,41.05,0.0,,,100.0,75.89,18.57,100.0,45.0,50.38,91.53,73.15,39.71,100.0,0.0,0.0,90.0,21.6,36.56,93.33,30.65,43.22
8
  claude-2.1-t0.0--claude-2.1-t0.0,36.38,83.08,43.79,0.0,,,100.0,73.01,23.8,100.0,55.0,50.38,94.92,69.35,41.41,100.0,0.67,3.65,93.33,29.29,40.95,93.33,35.42,41.74
 
15
  gpt-4-0314-t0.0--gpt-4-0314-t0.0,58.81,93.79,62.7,65.0,88.92,16.24,100.0,91.12,7.48,100.0,77.5,42.29,91.53,79.63,32.48,100.0,2.78,7.37,100.0,50.78,41.69,100.0,48.17,41.42
16
  gpt-4-0613-t0.0--gpt-4-0613-t0.0,60.9,97.22,62.64,97.5,97.28,10.38,100.0,97.34,5.02,100.0,80.0,40.51,83.05,81.97,29.03,100.0,4.25,8.62,100.0,46.11,41.85,100.0,31.5,38.1
17
  gpt-4-1106-preview-t0.0--gpt-4-1106-preview-t0.0,60.33,97.95,61.59,97.5,94.15,14.85,100.0,83.25,12.51,100.0,90.0,30.38,88.14,83.97,29.88,100.0,7.5,13.01,100.0,49.11,42.93,100.0,23.17,37.74
18
+ gpt4all-13b-snoozy-t0.0--gpt4all-13b-snoozy-t0.0,0.0,2.92,0.0,0.0,,,0.0,,,0.0,,,0.0,,,0.0,,,13.33,0.0,0.0,7.14,0.0,0.0
19
  koala-13B-HF-t0.0--koala-13B-HF-t0.0,1.25,23.22,5.38,0.0,,,0.0,,,0.0,,,52.54,16.13,37.39,100.0,0.0,0.0,10.0,0.0,0.0,0.0,,
20
  llama-2-13b-chat-hf-t0.0--llama-2-13b-chat-hf-t0.0,1.89,3.43,55.09,0.0,,,24.0,55.09,33.68,0.0,,,0.0,,,0.0,,,0.0,,,0.0,,
21
  llama-2-70b-chat-hf-t0.0--llama-2-70b-chat-hf-t0.0,1.39,3.79,36.74,0.0,,,14.0,13.48,11.98,12.5,60.0,54.77,0.0,,,0.0,,,0.0,,,0.0,,
 
23
  oasst-sft-4-pythia-12b-epoch-3.5-t0.0--oasst-sft-4-pythia-12b-epoch-3.5-t0.0,0.0,14.76,0.0,0.0,,,0.0,,,0.0,,,0.0,,,100.0,0.0,0.0,3.33,0.0,,0.0,,
24
  sheep-duck-llama-2-13b-t0.0--sheep-duck-llama-2-13b-t0.0,6.74,34.86,19.34,0.0,,,0.0,,,97.5,33.33,47.76,89.83,0.0,0.0,0.0,,,23.33,19.05,37.8,33.33,25.0,42.49
25
  sheep-duck-llama-2-70b-v1.1-t0.0--sheep-duck-llama-2-70b-v1.1-t0.0,17.12,40.82,41.93,40.0,23.19,28.06,0.0,,,35.0,57.14,51.36,59.32,74.29,44.34,34.78,0.0,0.0,63.33,43.86,43.82,53.33,53.12,49.9
26
+ vicuna-13b-v1.5-t0.0--vicuna-13b-v1.5-t0.0,7.21,34.74,20.74,0.0,,,24.0,0.0,0.0,80.0,28.12,45.68,49.15,37.93,49.38,26.67,0.0,0.0,36.67,30.3,45.84,26.67,28.12,45.19
27
+ vicuna-33b-v1.3-t0.0--vicuna-33b-v1.3-t0.0,9.15,17.47,52.36,15.0,23.67,24.34,40.0,34.58,26.47,0.0,,,37.29,50.0,51.18,0.0,,,23.33,53.57,46.61,6.67,100.0,0.0
28
+ vicuna-7b-v1.5-t0.0--vicuna-7b-v1.5-t0.0,3.46,12.86,26.91,0.0,,,4.0,0.0,0.0,0.0,,,62.71,24.32,43.5,0.0,,,13.33,50.0,57.74,10.0,33.33,57.74
29
+ zephyr-7b-alpha-t0.0--zephyr-7b-alpha-t0.0,0.75,7.51,10.0,0.0,,,0.0,,,0.0,,,0.0,,,0.0,,,33.33,20.0,42.16,19.23,0.0,0.0
30
+ zephyr-7b-beta-t0.0--zephyr-7b-beta-t0.0,1.23,3.95,31.25,0.0,,,0.0,,,0.0,,,0.0,,,0.0,,,13.33,25.0,50.0,14.29,37.5,47.87