hf-transformers-bot's picture
Upload folder using huggingface_hub
8993785 verified
raw
history blame
5.55 kB
hydra:
run:
dir: _benchmark/tmp8s70exus/commit=d806fa3e92289876e01ab19c9e19e9264ea1c1a1/${hydra.job.override_dirname}
sweep:
dir: _benchmark/tmp8s70exus/commit=d806fa3e92289876e01ab19c9e19e9264ea1c1a1
subdir: ${hydra.job.override_dirname}
launcher:
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
sweeper:
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
max_batch_size: null
params: null
help:
app_name: ${hydra.job.name}
header: '${hydra.help.app_name} is powered by Hydra.
'
footer: 'Powered by Hydra (https://hydra.cc)
Use --hydra-help to view Hydra specific help
'
template: '${hydra.help.header}
== Configuration groups ==
Compose your configuration from those groups (group=option)
$APP_CONFIG_GROUPS
== Config ==
Override anything in the config (foo.bar=value)
$CONFIG
${hydra.help.footer}
'
hydra_help:
template: 'Hydra (${hydra.runtime.version})
See https://hydra.cc for more info.
== Flags ==
$FLAGS_HELP
== Configuration groups ==
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
to command line)
$HYDRA_CONFIG_GROUPS
Use ''--cfg hydra'' to Show the Hydra config.
'
hydra_help: ???
hydra_logging:
version: 1
root:
level: ERROR
disable_existing_loggers: true
job_logging:
version: 1
root:
level: ERROR
disable_existing_loggers: true
env: {}
mode: MULTIRUN
searchpath: []
callbacks: {}
output_subdir: .hydra
overrides:
hydra:
- hydra/job_logging=disabled
- hydra/hydra_logging=disabled
- hydra.sweep.dir=_benchmark/tmp8s70exus/commit\=d806fa3e92289876e01ab19c9e19e9264ea1c1a1
- hydra.run.dir=_benchmark/tmp8s70exus/commit\=d806fa3e92289876e01ab19c9e19e9264ea1c1a1/${hydra.job.override_dirname}
- hydra.mode=MULTIRUN
task:
- backend.model=google/gemma-2b
- backend.cache_implementation=null,static
- backend.torch_compile=false,true
job:
name: cli
chdir: true
override_dirname: backend.cache_implementation=null,static,backend.model=google/gemma-2b,backend.torch_compile=false,true
id: ???
num: ???
config_name: generation
env_set:
OVERRIDE_BENCHMARKS: '1'
LOG_LEVEL: WARN
env_copy: []
config:
override_dirname:
kv_sep: '='
item_sep: ','
exclude_keys: []
runtime:
version: 1.3.2
version_base: '1.3'
cwd: /transformers
config_sources:
- path: hydra.conf
schema: pkg
provider: hydra
- path: optimum_benchmark
schema: pkg
provider: main
- path: /transformers/benchmark/config
schema: file
provider: command-line
- path: ''
schema: structured
provider: schema
output_dir: ???
choices:
backend: pytorch
launcher: process
scenario: inference
hydra/env: default
hydra/callbacks: null
hydra/job_logging: disabled
hydra/hydra_logging: disabled
hydra/hydra_help: default
hydra/help: default
hydra/sweeper: basic
hydra/launcher: basic
hydra/output: default
verbose: false
name: pytorch_generate
backend:
name: pytorch
version: 2.4.0+cu121
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
task: null
library: null
model_type: null
model: google/gemma-2b
processor: null
device: cuda
device_ids: '0'
seed: 42
inter_op_num_threads: null
intra_op_num_threads: null
model_kwargs: {}
processor_kwargs: {}
no_weights: true
device_map: null
torch_dtype: float16
eval_mode: true
to_bettertransformer: false
low_cpu_mem_usage: null
attn_implementation: null
cache_implementation: static
autocast_enabled: false
autocast_dtype: null
torch_compile: true
torch_compile_target: forward
torch_compile_config:
backend: inductor
mode: reduce-overhead
fullgraph: true
quantization_scheme: null
quantization_config: {}
deepspeed_inference: false
deepspeed_inference_config: {}
peft_type: null
peft_config: {}
scenario:
name: inference
_target_: optimum_benchmark.scenarios.inference.scenario.InferenceScenario
iterations: 2
duration: 0
warmup_runs: 10
input_shapes:
batch_size: 1
sequence_length: 7
new_tokens: null
memory: true
latency: true
energy: false
forward_kwargs: {}
generate_kwargs:
max_new_tokens: 128
min_new_tokens: 128
do_sample: false
call_kwargs: {}
launcher:
name: process
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher
device_isolation: true
device_isolation_action: warn
numactl: false
numactl_kwargs: {}
start_method: spawn
environment:
cpu: ' AMD EPYC 7R32'
cpu_count: 16
cpu_ram_mb: 66697.261056
system: Linux
machine: x86_64
platform: Linux-5.10.223-211.872.amzn2.x86_64-x86_64-with-glibc2.29
processor: x86_64
python_version: 3.8.10
gpu:
- NVIDIA A10G
gpu_count: 1
gpu_vram_mb: 24146608128
optimum_benchmark_version: 0.4.0
optimum_benchmark_commit: null
transformers_version: 4.45.0.dev0
transformers_commit: d806fa3e92289876e01ab19c9e19e9264ea1c1a1
accelerate_version: 0.34.0.dev0
accelerate_commit: null
diffusers_version: null
diffusers_commit: null
optimum_version: 1.22.0.dev0
optimum_commit: null
timm_version: 0.9.16
timm_commit: null
peft_version: 0.12.1.dev0
peft_commit: null