Koshti10 commited on
Commit
63f4228
β€’
1 Parent(s): ff19480

Upload 7 files

Browse files
Files changed (4) hide show
  1. app.py +48 -32
  2. src/leaderboard_utils.py +5 -0
  3. src/reload.py +75 -0
  4. src/reload_utils.py +82 -0
app.py CHANGED
@@ -4,23 +4,28 @@ from src.assets.text_content import TITLE, INTRODUCTION_TEXT
4
  from src.leaderboard_utils import filter_search, get_github_data
5
  from src.plot_utils import split_models, compare_plots
6
 
7
- # For Leaderboards
8
- # Get CSV data
9
- global primary_leaderboard_df, version_dfs, version_names
10
- primary_leaderboard_df, version_dfs, version_names = get_github_data()
11
 
12
- global prev_df
13
- prev_df = version_dfs[0]
14
- def select_prev_df(name):
15
- ind = version_names.index(name)
16
- prev_df = version_dfs[ind]
17
- return prev_df
18
 
19
- # For Plots
20
- global plot_df, OPEN_MODELS, CLOSED_MODELS
21
- plot_df = primary_leaderboard_df[0]
22
- MODELS = list(plot_df[list(plot_df.columns)[0]].unique())
23
- OPEN_MODELS, CLOSED_MODELS = split_models(MODELS)
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
 
26
  # MAIN APPLICATION s
@@ -37,20 +42,22 @@ with main_app:
37
  show_label=False,
38
  elem_id="search-bar",
39
  )
40
-
41
- leaderboard_table = gr.components.Dataframe(
42
- value=primary_leaderboard_df[0],
43
  elem_id="leaderboard-table",
44
  interactive=False,
45
  visible=True,
46
- )
 
47
 
48
  # Add a dummy leaderboard to handle search queries from the primary_leaderboard_df and not update primary_leaderboard_df
49
- dummy_leaderboard_table = gr.components.Dataframe(
50
- value=primary_leaderboard_df[0],
51
  elem_id="leaderboard-table",
52
  interactive=False,
53
  visible=False,
 
54
  )
55
 
56
  search_bar.submit(
@@ -63,20 +70,22 @@ with main_app:
63
  with gr.TabItem("πŸ“ˆ Plot", id=3):
64
  with gr.Row():
65
  open_models_selection = gr.CheckboxGroup(
66
- OPEN_MODELS,
67
  label="Open-weight Models 🌐",
68
  value=[],
69
  elem_id="value-select",
70
  interactive=True,
 
71
  )
72
 
73
  with gr.Row():
74
  closed_models_selection = gr.CheckboxGroup(
75
- CLOSED_MODELS,
76
  label="Closed-weight Models πŸ’Ό",
77
  value=[],
78
  elem_id="value-select-2",
79
  interactive=True,
 
80
  )
81
 
82
  with gr.Row():
@@ -109,8 +118,9 @@ with main_app:
109
 
110
  with gr.Row():
111
  dummy_plot_df = gr.DataFrame(
112
- value=plot_df,
113
- visible=False
 
114
  )
115
 
116
  with gr.Row():
@@ -156,7 +166,10 @@ with main_app:
156
  with gr.TabItem("πŸ”„ Versions and Details", elem_id="details", id=2):
157
  with gr.Row():
158
  version_select = gr.Dropdown(
159
- version_names, label="Select Version πŸ•ΉοΈ", value=version_names[0]
 
 
 
160
  )
161
  with gr.Row():
162
  search_bar_prev = gr.Textbox(
@@ -165,18 +178,20 @@ with main_app:
165
  elem_id="search-bar-2",
166
  )
167
 
168
- prev_table = gr.components.Dataframe(
169
- value=prev_df,
170
  elem_id="leaderboard-table",
171
  interactive=False,
172
  visible=True,
 
173
  )
174
 
175
- dummy_prev_table = gr.components.Dataframe(
176
- value=prev_df,
177
  elem_id="leaderboard-table",
178
  interactive=False,
179
  visible=False,
 
180
  )
181
 
182
  search_bar_prev.submit(
@@ -187,10 +202,11 @@ with main_app:
187
  )
188
 
189
  version_select.change(
190
- select_prev_df,
191
  [version_select],
192
  prev_table,
193
- queue=True
 
194
  )
195
  main_app.load()
196
 
 
4
  from src.leaderboard_utils import filter_search, get_github_data
5
  from src.plot_utils import split_models, compare_plots
6
 
7
+ # from src.reload_utils import ReloadData
8
+ from src.reload import get_primary_leaderboard, get_open_models, get_closed_models, get_plot_df, get_version_names, get_version_df, get_prev_df
 
 
9
 
10
+ reload_time = 5
 
 
 
 
 
11
 
12
+ # # For Leaderboards
13
+ # # Get CSV data
14
+ # global primary_leaderboard_df, version_dfs, version_names
15
+ # primary_leaderboard_df, version_dfs, version_names = get_github_data()
16
+
17
+ # global prev_df
18
+ # prev_df = version_dfs[0]
19
+ # def select_prev_df(name):
20
+ # ind = version_names.index(name)
21
+ # prev_df = version_dfs[ind]
22
+ # return prev_df
23
+
24
+ # # For Plots
25
+ # global plot_df, OPEN_MODELS, CLOSED_MODELS
26
+ # plot_df = primary_leaderboard_df[0]
27
+ # MODELS = list(plot_df[list(plot_df.columns)[0]].unique())
28
+ # OPEN_MODELS, CLOSED_MODELS = split_models(MODELS)
29
 
30
 
31
  # MAIN APPLICATION s
 
42
  show_label=False,
43
  elem_id="search-bar",
44
  )
45
+
46
+ leaderboard_table = gr.DataFrame(
47
+ value=get_primary_leaderboard,
48
  elem_id="leaderboard-table",
49
  interactive=False,
50
  visible=True,
51
+ every=reload_time
52
+ )
53
 
54
  # Add a dummy leaderboard to handle search queries from the primary_leaderboard_df and not update primary_leaderboard_df
55
+ dummy_leaderboard_table = gr.Dataframe(
56
+ value=get_primary_leaderboard,
57
  elem_id="leaderboard-table",
58
  interactive=False,
59
  visible=False,
60
+ every=reload_time
61
  )
62
 
63
  search_bar.submit(
 
70
  with gr.TabItem("πŸ“ˆ Plot", id=3):
71
  with gr.Row():
72
  open_models_selection = gr.CheckboxGroup(
73
+ choices=get_open_models(),
74
  label="Open-weight Models 🌐",
75
  value=[],
76
  elem_id="value-select",
77
  interactive=True,
78
+ every=reload_time
79
  )
80
 
81
  with gr.Row():
82
  closed_models_selection = gr.CheckboxGroup(
83
+ choices=get_closed_models(),
84
  label="Closed-weight Models πŸ’Ό",
85
  value=[],
86
  elem_id="value-select-2",
87
  interactive=True,
88
+ every=reload_time
89
  )
90
 
91
  with gr.Row():
 
118
 
119
  with gr.Row():
120
  dummy_plot_df = gr.DataFrame(
121
+ value=get_plot_df,
122
+ visible=False,
123
+ every=reload_time
124
  )
125
 
126
  with gr.Row():
 
166
  with gr.TabItem("πŸ”„ Versions and Details", elem_id="details", id=2):
167
  with gr.Row():
168
  version_select = gr.Dropdown(
169
+ choices=get_version_names(),
170
+ label="Select Version πŸ•ΉοΈ",
171
+ value=get_version_names()[0],
172
+ every=reload_time
173
  )
174
  with gr.Row():
175
  search_bar_prev = gr.Textbox(
 
178
  elem_id="search-bar-2",
179
  )
180
 
181
+ prev_table = gr.Dataframe(
182
+ value=get_prev_df,
183
  elem_id="leaderboard-table",
184
  interactive=False,
185
  visible=True,
186
+ every=reload_time
187
  )
188
 
189
+ dummy_prev_table = gr.Dataframe(
190
+ value=get_prev_df,
191
  elem_id="leaderboard-table",
192
  interactive=False,
193
  visible=False,
194
+ every=reload_time
195
  )
196
 
197
  search_bar_prev.submit(
 
202
  )
203
 
204
  version_select.change(
205
+ get_prev_df,
206
  [version_select],
207
  prev_table,
208
+ queue=True,
209
+ every=reload_time
210
  )
211
  main_app.load()
212
 
src/leaderboard_utils.py CHANGED
@@ -3,7 +3,12 @@ import pandas as pd
3
  import requests, json
4
  from io import StringIO
5
 
 
 
 
6
  def get_github_data():
 
 
7
  '''
8
  Get data from csv files on Github
9
  Args:
 
3
  import requests, json
4
  from io import StringIO
5
 
6
+ from datetime import datetime
7
+
8
+
9
  def get_github_data():
10
+ current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
11
+ print(f"LOADING GITHUB DATAAA.... at time = {current_time}")
12
  '''
13
  Get data from csv files on Github
14
  Args:
src/reload.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Isolated functions to reload the leaderboard data and plot the results
2
+
3
+ from src.leaderboard_utils import filter_search, get_github_data
4
+ from src.plot_utils import split_models, compare_plots
5
+
6
+ def get_primary_leaderboard():
7
+ '''
8
+ Returns
9
+ primary_leaderboard_df[0]: Dataframe containing the primary leaderboard (laterst version of the benchmark results)
10
+ '''
11
+ primary_leaderboard_df, _, _ = get_github_data()
12
+ return primary_leaderboard_df[0]
13
+
14
+ def get_open_models():
15
+ '''
16
+ Returns
17
+ open_models: Checkbox group containing the open models
18
+ '''
19
+ primary_leaderboard_df, _, _ = get_github_data()
20
+ temp_df = primary_leaderboard_df[0]
21
+ models = list(temp_df[list(temp_df.columns)[0]].unique())
22
+ open_models, _ = split_models(models)
23
+ return open_models
24
+
25
+ def get_closed_models():
26
+ '''
27
+ Returns
28
+ closed_models: Checkbox group containing the closed models
29
+ '''
30
+ primary_leaderboard_df, _, _ = get_github_data()
31
+ temp_df = primary_leaderboard_df[0]
32
+ models = list(temp_df[list(temp_df.columns)[0]].unique())
33
+ _, closed_models = split_models(models)
34
+ return closed_models
35
+
36
+ def get_plot_df():
37
+ '''
38
+ Returns
39
+ plot_df: Dataframe containing the results of latest version for plotting
40
+ '''
41
+ primary_leaderboard_df, _, _ = get_github_data()
42
+ plot_df = primary_leaderboard_df[0]
43
+ return plot_df
44
+
45
+ def get_version_names():
46
+ '''
47
+ Returns
48
+ version_names: List containing the versions of the benchmark results for dropdown selection
49
+ '''
50
+ _, _, version_names = get_github_data()
51
+ return version_names
52
+
53
+ def get_version_df():
54
+ '''
55
+ Returns
56
+ version_dfs: Dataframe containing the benchmark results for all versions
57
+ '''
58
+ _, version_dfs, _ = get_github_data()
59
+ return version_dfs
60
+
61
+ def get_prev_df(name='initial'):
62
+ '''
63
+ Returns
64
+ prev_df: Dataframe containing the benchmark results for the previous versions (default = latest version)
65
+ '''
66
+ _, version_dfs, version_names = get_github_data()
67
+
68
+ if name == 'initial':
69
+ name = version_names[0]
70
+
71
+ ind = version_names.index(name)
72
+ prev_df = version_dfs[ind]
73
+ return prev_df
74
+
75
+
src/reload_utils.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Isolated functions to reload the leaderboard data and plot the results
2
+
3
+ from src.leaderboard_utils import filter_search, get_github_data
4
+ from src.plot_utils import split_models, compare_plots
5
+
6
+ #COMPONENTS TO RELOAD EVERY TIME
7
+ # leaderboard_table, dummy_leaderboard_table,
8
+ # open_models_selection, closed_models_selection, show_all, show_names, show_legend
9
+ #version_select, prev_table, dummy_prev_table
10
+
11
+ class ReloadData():
12
+ '''
13
+ A class containing methods to reload the leaderboard data and plot the results
14
+ The methods return individual component values directly to use 'every' arg in the component
15
+ '''
16
+
17
+ def __init__(self):
18
+ print("Initializing Reload...........")
19
+ self.primary_leaderboard_df, self.version_dfs, self.version_names = get_github_data()
20
+ self.plot_df = self.primary_leaderboard_df[0]
21
+ self.models = list(self.plot_df[list(self.plot_df.columns)[0]].unique())
22
+ print("Reload completed ....... Here's a reloaded dataframe for latest version")
23
+ print(self.primary_leaderboard_df)
24
+
25
+ def get_primary_leaderboard(self):
26
+ '''
27
+ Returns
28
+ self.primary_leaderboard_df[0]: Dataframe containing the primary leaderboard (laterst version of the benchmark results)
29
+ '''
30
+ return self.primary_leaderboard_df[0]
31
+
32
+ def get_open_models(self):
33
+ '''
34
+ Returns
35
+ open_models: Checkbox group containing the open models
36
+ '''
37
+ self.open_models, _ = split_models(self.models)
38
+ return self.open_models
39
+
40
+ def get_closed_models(self):
41
+ '''
42
+ Returns
43
+ closed_models: Checkbox group containing the closed models
44
+ '''
45
+ _, self.closed_models = split_models(self.models)
46
+ return self.closed_models
47
+
48
+ def get_plot_df(self):
49
+ '''
50
+ Returns
51
+ plot_df: Dataframe containing the results of latest version for plotting
52
+ '''
53
+ return self.plot_df
54
+
55
+ def get_version_names(self):
56
+ '''
57
+ Returns
58
+ version_names: List containing the versions of the benchmark results for dropdown selection
59
+ '''
60
+ return self.version_names
61
+
62
+ def get_version_df(self):
63
+ '''
64
+ Returns
65
+ version_dfs: Dataframe containing the benchmark results for all versions
66
+ '''
67
+ return self.version_dfs
68
+
69
+ def get_prev_df(self, name='initial'):
70
+ '''
71
+ Returns
72
+ prev_df: Dataframe containing the benchmark results for the previous versions (default = latest version)
73
+ '''
74
+ if name == 'initial':
75
+ name = self.version_names[0]
76
+
77
+ ind = self.version_names.index(name)
78
+ self.prev_df = self.version_dfs[ind]
79
+ return self.prev_df
80
+
81
+
82
+