Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
recover backend-cli
Browse files- backend-cli.py +0 -15
backend-cli.py
CHANGED
@@ -265,21 +265,6 @@ def process_pending_requests() -> bool:
|
|
265 |
if __name__ == "__main__":
|
266 |
wait = True
|
267 |
hard_task_lst = None
|
268 |
-
local_debug = False
|
269 |
-
#debug specific task by ping
|
270 |
-
if local_debug:
|
271 |
-
debug_model_names = ['TinyLlama/TinyLlama-1.1B-Chat-v0.6']
|
272 |
-
# debug_task_name = 'ifeval'
|
273 |
-
debug_task_name = 'selfcheckgpt'
|
274 |
-
task_lst = TASKS_HARNESS.copy()
|
275 |
-
for task in task_lst:
|
276 |
-
for debug_model_name in debug_model_names:
|
277 |
-
task_name = task.benchmark
|
278 |
-
if task_name != debug_task_name:
|
279 |
-
continue
|
280 |
-
eval_request = EvalRequest(model=debug_model_name, private=False, status='', json_filepath='', precision='float16')
|
281 |
-
results = process_evaluation(task, eval_request)
|
282 |
-
|
283 |
if socket.gethostname() in {'hamburg', 'neuromancer'} or os.path.isdir("/home/pminervi"):
|
284 |
wait = False
|
285 |
hard_task_lst = ['nq', 'trivia', 'tqa']
|
|
|
265 |
if __name__ == "__main__":
|
266 |
wait = True
|
267 |
hard_task_lst = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
268 |
if socket.gethostname() in {'hamburg', 'neuromancer'} or os.path.isdir("/home/pminervi"):
|
269 |
wait = False
|
270 |
hard_task_lst = ['nq', 'trivia', 'tqa']
|