File size: 8,075 Bytes
4d423a9 111d85e 7682345 10c3b6b c9751c2 c57954b 6c813f8 10c3b6b 13373dd 4d423a9 7682345 4d423a9 b516823 7682345 4d423a9 6c813f8 4d423a9 e7932f8 b516823 13373dd 4d423a9 c57954b 47984ee 4d423a9 47984ee 4d423a9 fcf7625 4d423a9 10c3b6b 4d423a9 b680a21 a61e83a b430e89 4d423a9 fcf7625 b680a21 a61e83a fcf7625 a61e83a b430e89 4d423a9 b516823 4d423a9 b516823 c27b8a4 b516823 c57954b 4d423a9 bfd434f 4d423a9 b516823 4d423a9 b516823 4d423a9 b516823 4d423a9 b516823 4d423a9 c57954b 47984ee 4d423a9 47984ee 4d423a9 47984ee 4d423a9 47984ee 4d423a9 bfd434f c57954b bfd434f 4d423a9 bfd434f 4d423a9 bfd434f 4d423a9 bfd434f 4d99b49 bfd434f 4d423a9 bfd434f fcf7625 bfd434f 4d423a9 fcf7625 bfd434f 47984ee bfd434f fcf7625 bfd434f 47984ee bfd434f 4d423a9 47984ee 4d423a9 bfd434f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 |
import os
import time
import traceback
from typing import Optional
from config_store import (
get_process_config,
get_inference_config,
get_openvino_config,
get_pytorch_config,
)
import gradio as gr
from huggingface_hub import whoami
from huggingface_hub.errors import GatedRepoError
from gradio_huggingfacehub_search import HuggingfaceHubSearch
from optimum_benchmark.launchers.device_isolation_utils import * # noqa
from optimum_benchmark.backends.openvino.utils import (
TASKS_TO_OVMODELS,
TASKS_TO_OVPIPELINES,
)
from optimum_benchmark.backends.transformers_utils import (
TASKS_TO_AUTO_MODEL_CLASS_NAMES,
)
from optimum_benchmark import (
Benchmark,
BenchmarkConfig,
InferenceConfig,
ProcessConfig,
PyTorchConfig,
OVConfig,
)
from optimum_benchmark.logging_utils import setup_logging
from optimum_benchmark.task_utils import infer_task_from_model_name_or_path
DEVICE = "cpu"
LAUNCHER = "process"
SCENARIO = "inference"
BACKENDS = ["pytorch", "openvino"]
BENCHMARKS_HF_TOKEN = os.getenv("BENCHMARKS_HF_TOKEN")
BENCHMARKS_REPO_ID = "optimum-benchmark/OpenVINO-Benchmarks"
TASKS = set(TASKS_TO_OVMODELS.keys() | TASKS_TO_OVPIPELINES) & set(
TASKS_TO_AUTO_MODEL_CLASS_NAMES.keys()
)
def parse_configs(inputs):
configs = {"process": {}, "inference": {}, "pytorch": {}, "openvino": {}}
for key, value in inputs.items():
if key.label == "model":
model = value
elif key.label == "task":
task = value
elif key.label == "openvino_model":
openvino_label = value
elif "." in key.label:
backend, argument = key.label.split(".")
configs[backend][argument] = value
else:
continue
for key in configs.keys():
for k, v in configs[key].items():
if k in ["input_shapes", "generate_kwargs", "numactl_kwargs"]:
configs[key][k] = eval(v)
configs["process"] = ProcessConfig(**configs.pop("process"))
configs["inference"] = InferenceConfig(**configs.pop("inference"))
configs["pytorch"] = PyTorchConfig(
task=task,
model=model,
device=DEVICE,
**{k: v for k, v in configs["pytorch"].items() if v},
)
configs["openvino"] = OVConfig(
task=task,
model=openvino_label or model,
device=DEVICE,
**{k: v for k, v in configs["openvino"].items() if v},
)
return configs
def run_benchmark(inputs, oauth_token: Optional[gr.OAuthToken]):
if oauth_token is None:
raise gr.Error("Please login to be able to run the benchmark.")
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
use_name = whoami(oauth_token.token)["name"]
folder = f"{use_name}/{timestamp}"
gr.Info(f"π© Benchmark will be saved under {BENCHMARKS_REPO_ID}/{folder}")
outputs = {backend: "Running..." for backend in BACKENDS}
configs = parse_configs(inputs)
yield tuple(outputs[b] for b in BACKENDS)
for backend in BACKENDS:
try:
benchmark_name = f"{folder}/{backend}"
benchmark_config = BenchmarkConfig(
name=benchmark_name,
backend=configs[backend],
launcher=configs[LAUNCHER],
scenario=configs[SCENARIO],
)
benchmark_report = Benchmark.launch(benchmark_config)
benchmark_config.push_to_hub(
repo_id=BENCHMARKS_REPO_ID,
subfolder=benchmark_name,
token=BENCHMARKS_HF_TOKEN,
)
benchmark_report.push_to_hub(
repo_id=BENCHMARKS_REPO_ID,
subfolder=benchmark_name,
token=BENCHMARKS_HF_TOKEN,
)
except GatedRepoError:
outputs[backend] = f"π Model {configs[backend].model} is gated."
yield tuple(outputs[b] for b in BACKENDS)
gr.Info("π Gated Repo Error while trying to access the model.")
except Exception:
outputs[backend] = f"\n```python-traceback\n{traceback.format_exc()}```\n"
yield tuple(outputs[b] for b in BACKENDS)
gr.Info(f"β Error while running benchmark for {backend} backend.")
else:
outputs[backend] = f"\n{benchmark_report.to_markdown_text()}\n"
yield tuple(outputs[b] for b in BACKENDS)
gr.Info(f"β
Benchmark for {backend} backend ran successfully.")
def update_task(model_id):
try:
inferred_task = infer_task_from_model_name_or_path(model_id)
except GatedRepoError:
raise gr.Error(
f"Model {model_id} is gated, please use optimum-benchmark locally to benchmark it."
)
except Exception:
raise gr.Error(
f"Error while inferring task for {model_id}, please select a task manually."
)
if inferred_task not in TASKS:
raise gr.Error(
f"Task {inferred_task} is not supported by OpenVINO, please select a task manually."
)
return inferred_task
with gr.Blocks() as demo:
# add login button
gr.LoginButton()
# add image
gr.HTML(
"""<img src="https://huggingface.co/spaces/optimum/optimum-benchmark-ui/resolve/main/huggy_bench.png" style="display: block; margin-left: auto; margin-right: auto; width: 30%;">"""
"<h1 style='text-align: center'>π€ Optimum-Benchmark Interface ποΈ</h1>"
"<p style='text-align: center'>"
"This Space uses <a href='https://github.com/huggingface/optimum-benchmark.git'>Optimum-Benchmark</a> to automatically benchmark a model from the Hub on different backends."
"<br>The results (config and report) will be pushed under your namespace in a benchmark repository on the Hub."
"</p>"
)
with gr.Column(variant="panel"):
model = HuggingfaceHubSearch(
placeholder="Search for a PyTorch model",
search_type="model",
label="model",
)
openvino_model = HuggingfaceHubSearch(
placeholder="Search for an OpenVINO model (optional)",
search_type="model",
label="openvino_model",
)
with gr.Row():
task = gr.Dropdown(
info="Task to run the benchmark on.",
elem_id="task-dropdown",
choices=TASKS,
label="task",
)
with gr.Column(variant="panel"):
with gr.Accordion(label="Process Config", open=False, visible=True):
process_config = get_process_config()
with gr.Accordion(label="Inference Config", open=False, visible=True):
inference_config = get_inference_config()
with gr.Row() as backend_configs:
with gr.Accordion(label="PyTorch Config", open=False, visible=True):
pytorch_config = get_pytorch_config()
with gr.Accordion(label="OpenVINO Config", open=False, visible=True):
openvino_config = get_openvino_config()
with gr.Row():
button = gr.Button(value="Run Benchmark", variant="primary")
with gr.Row():
with gr.Accordion(label="PyTorch Report", open=True, visible=True):
pytorch_report = gr.Markdown()
with gr.Accordion(label="OpenVINO Report", open=True, visible=True):
openvino_report = gr.Markdown()
model.submit(inputs=model, outputs=task, fn=update_task)
button.click(
fn=run_benchmark,
inputs={
task,
model,
openvino_model,
# backends,
*process_config.values(),
*inference_config.values(),
*pytorch_config.values(),
*openvino_config.values(),
},
outputs={
pytorch_report,
openvino_report,
},
concurrency_limit=1,
)
if __name__ == "__main__":
os.environ["LOG_TO_FILE"] = "0"
os.environ["LOG_LEVEL"] = "INFO"
setup_logging(level="INFO", prefix="MAIN-PROCESS")
demo.queue(max_size=10).launch()
|