Spaces:
Runtime error
Runtime error
Add filters for bad models
Browse files- background_task.py +12 -5
background_task.py
CHANGED
@@ -7,6 +7,7 @@ from huggingface_hub import HfApi, Repository
|
|
7 |
|
8 |
DATASET_REPO_URL = "https://huggingface.co/datasets/huggingface-projects/bot-fight-data"
|
9 |
DATASET_TEMP_REPO_URL = "https://huggingface.co/datasets/huggingface-projects/temp-match-results"
|
|
|
10 |
ELO_FILENAME = "soccer_elo.csv"
|
11 |
HISTORY_FILENAME = "soccer_history.csv"
|
12 |
TEMP_FILENAME = "results.csv"
|
@@ -103,6 +104,8 @@ class Matchmaking:
|
|
103 |
self.matches["model2"].append(model2.author + "/" + model2.name)
|
104 |
self.matches["result"].append(result)
|
105 |
self.matches["timestamp"].append(row["timestamp"])
|
|
|
|
|
106 |
data_dict = {"model1": [], "model2": [], "timestamp": [], "result": []}
|
107 |
df = pd.DataFrame(data_dict)
|
108 |
print(df.head())
|
@@ -182,11 +185,9 @@ def match(model1, model2):
|
|
182 |
model2_id = model2.author + "/" + model2.name
|
183 |
subprocess.run(["./SoccerTows.x86_64", "-model1", model1_id, "-model2", model2_id, "-nographics", "-batchmode"])
|
184 |
print(f"Match {model1_id} against {model2_id} ended.")
|
185 |
-
model1.games_played += 1
|
186 |
-
model2.games_played += 1
|
187 |
|
188 |
|
189 |
-
def get_models_list() -> list:
|
190 |
"""
|
191 |
Get the list of models from the hub and the ELO file.
|
192 |
|
@@ -197,9 +198,14 @@ def get_models_list() -> list:
|
|
197 |
data = pd.read_csv(os.path.join(DATASET_REPO_URL, "resolve", "main", ELO_FILENAME))
|
198 |
models_on_hub = api.list_models(filter=["reinforcement-learning", "ml-agents", "ML-Agents-SoccerTwos"])
|
199 |
for i, row in data.iterrows():
|
|
|
|
|
|
|
200 |
models.append(Model(row["author"], row["model"], row["elo"], row["games_played"]))
|
201 |
-
models_ids.append(
|
202 |
for model in models_on_hub:
|
|
|
|
|
203 |
author, name = model.modelId.split("/")[0], model.modelId.split("/")[1]
|
204 |
if model.modelId not in models_ids:
|
205 |
models.append(Model(author, name))
|
@@ -228,7 +234,8 @@ def init_matchmaking():
|
|
228 |
4. Compute the new ELO rating for each model
|
229 |
5. Save the results to the hub
|
230 |
"""
|
231 |
-
|
|
|
232 |
matchmaking = Matchmaking(models)
|
233 |
matchmaking.run()
|
234 |
matchmaking.to_csv()
|
|
|
7 |
|
8 |
DATASET_REPO_URL = "https://huggingface.co/datasets/huggingface-projects/bot-fight-data"
|
9 |
DATASET_TEMP_REPO_URL = "https://huggingface.co/datasets/huggingface-projects/temp-match-results"
|
10 |
+
FILTER_FILE = "https://huggingface.co/datasets/huggingface-projects/filter-bad-models/raw/main/bad_models.csv"
|
11 |
ELO_FILENAME = "soccer_elo.csv"
|
12 |
HISTORY_FILENAME = "soccer_history.csv"
|
13 |
TEMP_FILENAME = "results.csv"
|
|
|
104 |
self.matches["model2"].append(model2.author + "/" + model2.name)
|
105 |
self.matches["result"].append(result)
|
106 |
self.matches["timestamp"].append(row["timestamp"])
|
107 |
+
model1.games_played += 1
|
108 |
+
model2.games_played += 1
|
109 |
data_dict = {"model1": [], "model2": [], "timestamp": [], "result": []}
|
110 |
df = pd.DataFrame(data_dict)
|
111 |
print(df.head())
|
|
|
185 |
model2_id = model2.author + "/" + model2.name
|
186 |
subprocess.run(["./SoccerTows.x86_64", "-model1", model1_id, "-model2", model2_id, "-nographics", "-batchmode"])
|
187 |
print(f"Match {model1_id} against {model2_id} ended.")
|
|
|
|
|
188 |
|
189 |
|
190 |
+
def get_models_list(filter_bad_models) -> list:
|
191 |
"""
|
192 |
Get the list of models from the hub and the ELO file.
|
193 |
|
|
|
198 |
data = pd.read_csv(os.path.join(DATASET_REPO_URL, "resolve", "main", ELO_FILENAME))
|
199 |
models_on_hub = api.list_models(filter=["reinforcement-learning", "ml-agents", "ML-Agents-SoccerTwos"])
|
200 |
for i, row in data.iterrows():
|
201 |
+
model_id = row["author"] + "/" + row["model"]
|
202 |
+
if model_id in filter_bad_models:
|
203 |
+
continue
|
204 |
models.append(Model(row["author"], row["model"], row["elo"], row["games_played"]))
|
205 |
+
models_ids.append(model_id)
|
206 |
for model in models_on_hub:
|
207 |
+
if model.modelId in filter_bad_models:
|
208 |
+
continue
|
209 |
author, name = model.modelId.split("/")[0], model.modelId.split("/")[1]
|
210 |
if model.modelId not in models_ids:
|
211 |
models.append(Model(author, name))
|
|
|
234 |
4. Compute the new ELO rating for each model
|
235 |
5. Save the results to the hub
|
236 |
"""
|
237 |
+
filter_bad_models = pd.read_csv(FILTER_FILE)["model"].tolist()
|
238 |
+
models = get_models_list(filter_bad_models)
|
239 |
matchmaking = Matchmaking(models)
|
240 |
matchmaking.run()
|
241 |
matchmaking.to_csv()
|