|
import gradio as gr |
|
|
|
|
|
def get_process_config(): |
|
return { |
|
"process.numactl": gr.Checkbox( |
|
value=True, |
|
label="process.numactl", |
|
info="Runs the model with numactl", |
|
), |
|
"process.numactl_kwargs": gr.Textbox( |
|
label="process.numactl_kwargs", |
|
value="{'cpunodebind': 0, 'membind': 0}", |
|
info="Additional python dict of kwargs to pass to numactl", |
|
), |
|
} |
|
|
|
|
|
def get_inference_config(): |
|
return { |
|
"inference.warmup_runs": gr.Slider( |
|
step=1, |
|
value=10, |
|
minimum=0, |
|
maximum=10, |
|
label="inference.warmup_runs", |
|
info="Number of warmup runs", |
|
), |
|
"inference.duration": gr.Slider( |
|
step=1, |
|
value=10, |
|
minimum=0, |
|
maximum=10, |
|
label="inference.duration", |
|
info="Minimum duration of the benchmark in seconds", |
|
), |
|
"inference.iterations": gr.Slider( |
|
step=1, |
|
value=10, |
|
minimum=0, |
|
maximum=10, |
|
label="inference.iterations", |
|
info="Minimum number of iterations of the benchmark", |
|
), |
|
"inference.latency": gr.Checkbox( |
|
value=True, |
|
label="inference.latency", |
|
info="Measures the latency of the model", |
|
), |
|
"inference.memory": gr.Checkbox( |
|
value=True, |
|
label="inference.memory", |
|
info="Measures the peak memory consumption", |
|
), |
|
"inference.input_shapes": gr.Textbox( |
|
label="inference.input_shapes", |
|
value="{'batch_size': 2, 'sequence_length': 16}", |
|
info="Input shapes to use for the benchmark", |
|
), |
|
"inference.generate_kwargs": gr.Textbox( |
|
label="inference.generate_kwargs", |
|
value="{'max_new_tokens': 32, 'min_new_tokens': 32}", |
|
info="Additional python dict of kwargs to pass to the generate function", |
|
), |
|
} |
|
|
|
|
|
def get_pytorch_config(): |
|
return { |
|
"pytorch.torch_dtype": gr.Dropdown( |
|
value="float32", |
|
label="pytorch.torch_dtype", |
|
choices=["bfloat16", "float16", "float32", "auto"], |
|
info="The dtype to use for the model", |
|
), |
|
"pytorch.torch_compile": gr.Checkbox( |
|
value=False, |
|
label="pytorch.torch_compile", |
|
info="Compiles the model with torch.compile", |
|
), |
|
} |
|
|
|
|
|
def get_openvino_config(): |
|
return { |
|
"openvino.export": gr.Checkbox( |
|
value=True, |
|
label="openvino.export", |
|
info="Exports the model to OpenVINO", |
|
), |
|
"openvino.use_cache": gr.Checkbox( |
|
value=True, |
|
label="openvino.use_cache", |
|
info="Uses the decoder with cache if available", |
|
), |
|
"openvino.use_merged": gr.Checkbox( |
|
value=True, |
|
label="openvino.use_merged", |
|
info="Uses merged model if available", |
|
), |
|
"openvino.reshape": gr.Checkbox( |
|
value=False, |
|
label="openvino.reshape", |
|
info="Reshapes the model to the input shape", |
|
), |
|
"openvino.half": gr.Checkbox( |
|
value=False, |
|
label="openvino.half", |
|
info="Converts model to half precision", |
|
), |
|
} |
|
|