repo_id
stringlengths 15
89
| file_path
stringlengths 27
180
| content
stringlengths 1
2.23M
| __index_level_0__
int64 0
0
|
---|---|---|---|
hf_public_repos/datasets/src/datasets/packaged_modules | hf_public_repos/datasets/src/datasets/packaged_modules/webdataset/webdataset.py | import io
import json
from itertools import islice
from typing import Any, Callable, Dict, List
import numpy as np
import pyarrow as pa
import datasets
logger = datasets.utils.logging.get_logger(__name__)
class WebDataset(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 100
IMAGE_EXTENSIONS: List[str] # definition at the bottom of the script
AUDIO_EXTENSIONS: List[str] # definition at the bottom of the script
DECODERS: Dict[str, Callable[[Any], Any]] # definition at the bottom of the script
NUM_EXAMPLES_FOR_FEATURES_INFERENCE = 5
@classmethod
def _get_pipeline_from_tar(cls, tar_path, tar_iterator):
current_example = {}
for filename, f in tar_iterator:
if "." in filename:
example_key, field_name = filename.split(".", 1)
if current_example and current_example["__key__"] != example_key:
yield current_example
current_example = {}
current_example["__key__"] = example_key
current_example["__url__"] = tar_path
current_example[field_name.lower()] = f.read()
if field_name in cls.DECODERS:
current_example[field_name] = cls.DECODERS[field_name](current_example[field_name])
if current_example:
yield current_example
def _info(self) -> datasets.DatasetInfo:
return datasets.DatasetInfo()
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
# Download the data files
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
tar_paths = data_files
if isinstance(tar_paths, str):
tar_paths = [tar_paths]
tar_iterators = [dl_manager.iter_archive(tar_path) for tar_path in tar_paths]
splits = [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"tar_paths": tar_paths, "tar_iterators": tar_iterators}
)
]
else:
splits = []
for split_name, tar_paths in data_files.items():
if isinstance(tar_paths, str):
tar_paths = [tar_paths]
tar_iterators = [dl_manager.iter_archive(tar_path) for tar_path in tar_paths]
splits.append(
datasets.SplitGenerator(
name=split_name, gen_kwargs={"tar_paths": tar_paths, "tar_iterators": tar_iterators}
)
)
if not self.info.features:
# Get one example to get the feature types
pipeline = self._get_pipeline_from_tar(tar_paths[0], tar_iterators[0])
first_examples = list(islice(pipeline, self.NUM_EXAMPLES_FOR_FEATURES_INFERENCE))
if any(example.keys() != first_examples[0].keys() for example in first_examples):
raise ValueError(
"The TAR archives of the dataset should be in WebDataset format, "
"but the files in the archive don't share the same prefix or the same types."
)
pa_tables = [pa.Table.from_pylist([example]) for example in first_examples]
if datasets.config.PYARROW_VERSION.major < 14:
inferred_arrow_schema = pa.concat_tables(pa_tables, promote=True).schema
else:
inferred_arrow_schema = pa.concat_tables(pa_tables, promote_options="default").schema
features = datasets.Features.from_arrow_schema(inferred_arrow_schema)
# Set Image types
for field_name in first_examples[0]:
extension = field_name.rsplit(".", 1)[-1]
if extension in self.IMAGE_EXTENSIONS:
features[field_name] = datasets.Image()
# Set Audio types
for field_name in first_examples[0]:
extension = field_name.rsplit(".", 1)[-1]
if extension in self.AUDIO_EXTENSIONS:
features[field_name] = datasets.Audio()
self.info.features = features
return splits
def _generate_examples(self, tar_paths, tar_iterators):
image_field_names = [
field_name for field_name, feature in self.info.features.items() if isinstance(feature, datasets.Image)
]
audio_field_names = [
field_name for field_name, feature in self.info.features.items() if isinstance(feature, datasets.Audio)
]
for tar_idx, (tar_path, tar_iterator) in enumerate(zip(tar_paths, tar_iterators)):
for example_idx, example in enumerate(self._get_pipeline_from_tar(tar_path, tar_iterator)):
for field_name in image_field_names + audio_field_names:
example[field_name] = {"path": example["__key__"] + "." + field_name, "bytes": example[field_name]}
yield f"{tar_idx}_{example_idx}", example
# Obtained with:
# ```
# import PIL.Image
# IMAGE_EXTENSIONS = []
# PIL.Image.init()
# for ext, format in PIL.Image.EXTENSION.items():
# if format in PIL.Image.OPEN:
# IMAGE_EXTENSIONS.append(ext[1:])
# ```
# We intentionally do not run this code on launch because:
# (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed
# (2) To ensure the list of supported extensions is deterministic
IMAGE_EXTENSIONS = [
"blp",
"bmp",
"dib",
"bufr",
"cur",
"pcx",
"dcx",
"dds",
"ps",
"eps",
"fit",
"fits",
"fli",
"flc",
"ftc",
"ftu",
"gbr",
"gif",
"grib",
"h5",
"hdf",
"png",
"apng",
"jp2",
"j2k",
"jpc",
"jpf",
"jpx",
"j2c",
"icns",
"ico",
"im",
"iim",
"tif",
"tiff",
"jfif",
"jpe",
"jpg",
"jpeg",
"mpg",
"mpeg",
"msp",
"pcd",
"pxr",
"pbm",
"pgm",
"ppm",
"pnm",
"psd",
"bw",
"rgb",
"rgba",
"sgi",
"ras",
"tga",
"icb",
"vda",
"vst",
"webp",
"wmf",
"emf",
"xbm",
"xpm",
]
WebDataset.IMAGE_EXTENSIONS = IMAGE_EXTENSIONS
# Obtained with:
# ```
# import soundfile as sf
#
# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
#
# # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30:
# AUDIO_EXTENSIONS.extend([".mp3", ".opus"])
# ```
# We intentionally do not run this code on launch because:
# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
# (2) To ensure the list of supported extensions is deterministic
AUDIO_EXTENSIONS = [
"aiff",
"au",
"avr",
"caf",
"flac",
"htk",
"svx",
"mat4",
"mat5",
"mpc2k",
"ogg",
"paf",
"pvf",
"raw",
"rf64",
"sd2",
"sds",
"ircam",
"voc",
"w64",
"wav",
"nist",
"wavex",
"wve",
"xi",
"mp3",
"opus",
]
WebDataset.AUDIO_EXTENSIONS = AUDIO_EXTENSIONS
def text_loads(data: bytes):
return data.decode("utf-8")
def tenbin_loads(data: bytes):
from . import _tenbin
return _tenbin.decode_buffer(data)
def msgpack_loads(data: bytes):
import msgpack
return msgpack.unpackb(data)
def npy_loads(data: bytes):
import numpy.lib.format
stream = io.BytesIO(data)
return numpy.lib.format.read_array(stream, allow_pickle=False)
def npz_loads(data: bytes):
return np.load(io.BytesIO(data), allow_pickle=False)
def cbor_loads(data: bytes):
import cbor
return cbor.loads(data)
# Obtained by checking `decoders` in `webdataset.autodecode`
# and removing unsafe extension decoders.
# Removed Pickle decoders:
# - "pyd": lambda data: pickle.loads(data)
# - "pickle": lambda data: pickle.loads(data)
# Removed Torch decoders:
# - "pth": lambda data: torch_loads(data)
# Modified NumPy decoders to fix CVE-2019-6446 (add allow_pickle=False):
# - "npy": npy_loads,
# - "npz": lambda data: np.load(io.BytesIO(data)),
DECODERS = {
"txt": text_loads,
"text": text_loads,
"transcript": text_loads,
"cls": int,
"cls2": int,
"index": int,
"inx": int,
"id": int,
"json": json.loads,
"jsn": json.loads,
"ten": tenbin_loads,
"tb": tenbin_loads,
"mp": msgpack_loads,
"msg": msgpack_loads,
"npy": npy_loads,
"npz": npz_loads,
"cbor": cbor_loads,
}
WebDataset.DECODERS = DECODERS
| 0 |
hf_public_repos/datasets/src/datasets/packaged_modules | hf_public_repos/datasets/src/datasets/packaged_modules/webdataset/_tenbin.py | #
# Copyright (c) 2017-2021 NVIDIA CORPORATION. All rights reserved.
# This file coems from the WebDataset library.
# See the LICENSE file for licensing terms (BSD-style).
#
"""
Binary tensor encodings for PyTorch and NumPy.
This defines efficient binary encodings for tensors. The format is 8 byte
aligned and can be used directly for computations when transmitted, say,
via RDMA. The format is supported by WebDataset with the `.ten` filename
extension. It is also used by Tensorcom, Tensorcom RDMA, and can be used
for fast tensor storage with LMDB and in disk files (which can be memory
mapped)
Data is encoded as a series of chunks:
- magic number (int64)
- length in bytes (int64)
- bytes (multiple of 64 bytes long)
Arrays are a header chunk followed by a data chunk.
Header chunks have the following structure:
- dtype (int64)
- 8 byte array name
- ndim (int64)
- dim[0]
- dim[1]
- ...
"""
import struct
import sys
import numpy as np
def bytelen(a):
"""Determine the length of a in bytes."""
if hasattr(a, "nbytes"):
return a.nbytes
elif isinstance(a, (bytearray, bytes)):
return len(a)
else:
raise ValueError(a, "cannot determine nbytes")
def bytedata(a):
"""Return a the raw data corresponding to a."""
if isinstance(a, (bytearray, bytes, memoryview)):
return a
elif hasattr(a, "data"):
return a.data
else:
raise ValueError(a, "cannot return bytedata")
# tables for converting between long/short NumPy dtypes
long_to_short = """
float16 f2
float32 f4
float64 f8
int8 i1
int16 i2
int32 i4
int64 i8
uint8 u1
uint16 u2
unit32 u4
uint64 u8
""".strip()
long_to_short = [x.split() for x in long_to_short.split("\n")]
long_to_short = {x[0]: x[1] for x in long_to_short}
short_to_long = {v: k for k, v in long_to_short.items()}
def check_acceptable_input_type(data, allow64):
"""Check that the data has an acceptable type for tensor encoding.
:param data: array
:param allow64: allow 64 bit types
"""
for a in data:
if a.dtype.name not in long_to_short:
raise ValueError("unsupported dataypte")
if not allow64 and a.dtype.name not in ["float64", "int64", "uint64"]:
raise ValueError("64 bit datatypes not allowed unless explicitly enabled")
def str64(s):
"""Convert a string to an int64."""
s = s + "\0" * (8 - len(s))
s = s.encode("ascii")
return struct.unpack("@q", s)[0]
def unstr64(i):
"""Convert an int64 to a string."""
b = struct.pack("@q", i)
return b.decode("ascii").strip("\0")
def check_infos(data, infos, required_infos=None):
"""Verify the info strings."""
if required_infos is False or required_infos is None:
return data
if required_infos is True:
return data, infos
if not isinstance(required_infos, (tuple, list)):
raise ValueError("required_infos must be tuple or list")
for required, actual in zip(required_infos, infos):
raise ValueError(f"actual info {actual} doesn't match required info {required}")
return data
def encode_header(a, info=""):
"""Encode an array header as a byte array."""
if a.ndim >= 10:
raise ValueError("too many dimensions")
if a.nbytes != np.prod(a.shape) * a.itemsize:
raise ValueError("mismatch between size and shape")
if a.dtype.name not in long_to_short:
raise ValueError("unsupported array type")
header = [str64(long_to_short[a.dtype.name]), str64(info), len(a.shape)] + list(a.shape)
return bytedata(np.array(header, dtype="i8"))
def decode_header(h):
"""Decode a byte array into an array header."""
h = np.frombuffer(h, dtype="i8")
if unstr64(h[0]) not in short_to_long:
raise ValueError("unsupported array type")
dtype = np.dtype(short_to_long[unstr64(h[0])])
info = unstr64(h[1])
rank = int(h[2])
shape = tuple(h[3 : 3 + rank])
return shape, dtype, info
def encode_list(l, infos=None): # noqa: E741
"""Given a list of arrays, encode them into a list of byte arrays."""
if infos is None:
infos = [""]
else:
if len(l) != len(infos):
raise ValueError(f"length of list {l} must muatch length of infos {infos}")
result = []
for i, a in enumerate(l):
header = encode_header(a, infos[i % len(infos)])
result += [header, bytedata(a)]
return result
def decode_list(l, infos=False): # noqa: E741
"""Given a list of byte arrays, decode them into arrays."""
result = []
infos0 = []
for header, data in zip(l[::2], l[1::2]):
shape, dtype, info = decode_header(header)
a = np.frombuffer(data, dtype=dtype, count=np.prod(shape)).reshape(*shape)
result += [a]
infos0 += [info]
return check_infos(result, infos0, infos)
magic_str = "~TenBin~"
magic = str64(magic_str)
magic_bytes = unstr64(magic).encode("ascii")
def roundup(n, k=64):
"""Round up to the next multiple of 64."""
return k * ((n + k - 1) // k)
def encode_chunks(l): # noqa: E741
"""Encode a list of chunks into a single byte array, with lengths and magics.."""
size = sum(16 + roundup(b.nbytes) for b in l)
result = bytearray(size)
offset = 0
for b in l:
result[offset : offset + 8] = magic_bytes
offset += 8
result[offset : offset + 8] = struct.pack("@q", b.nbytes)
offset += 8
result[offset : offset + bytelen(b)] = b
offset += roundup(bytelen(b))
return result
def decode_chunks(buf):
"""Decode a byte array into a list of chunks."""
result = []
offset = 0
total = bytelen(buf)
while offset < total:
if magic_bytes != buf[offset : offset + 8]:
raise ValueError("magic bytes mismatch")
offset += 8
nbytes = struct.unpack("@q", buf[offset : offset + 8])[0]
offset += 8
b = buf[offset : offset + nbytes]
offset += roundup(nbytes)
result.append(b)
return result
def encode_buffer(l, infos=None): # noqa: E741
"""Encode a list of arrays into a single byte array."""
if not isinstance(l, list):
raise ValueError("requires list")
return encode_chunks(encode_list(l, infos=infos))
def decode_buffer(buf, infos=False):
"""Decode a byte array into a list of arrays."""
return decode_list(decode_chunks(buf), infos=infos)
def write_chunk(stream, buf):
"""Write a byte chunk to the stream with magics, length, and padding."""
nbytes = bytelen(buf)
stream.write(magic_bytes)
stream.write(struct.pack("@q", nbytes))
stream.write(bytedata(buf))
padding = roundup(nbytes) - nbytes
if padding > 0:
stream.write(b"\0" * padding)
def read_chunk(stream):
"""Read a byte chunk from a stream with magics, length, and padding."""
magic = stream.read(8)
if magic == b"":
return None
if magic != magic_bytes:
raise ValueError("magic number does not match")
nbytes = stream.read(8)
nbytes = struct.unpack("@q", nbytes)[0]
if nbytes < 0:
raise ValueError("negative nbytes")
data = stream.read(nbytes)
padding = roundup(nbytes) - nbytes
if padding > 0:
stream.read(padding)
return data
def write(stream, l, infos=None): # noqa: E741
"""Write a list of arrays to a stream, with magics, length, and padding."""
for chunk in encode_list(l, infos=infos):
write_chunk(stream, chunk)
def read(stream, n=sys.maxsize, infos=False):
"""Read a list of arrays from a stream, with magics, length, and padding."""
chunks = []
for _ in range(n):
header = read_chunk(stream)
if header is None:
break
data = read_chunk(stream)
if data is None:
raise ValueError("premature EOF")
chunks += [header, data]
return decode_list(chunks, infos=infos)
def save(fname, *args, infos=None, nocheck=False):
"""Save a list of arrays to a file, with magics, length, and padding."""
if not nocheck and not fname.endswith(".ten"):
raise ValueError("file name should end in .ten")
with open(fname, "wb") as stream:
write(stream, args, infos=infos)
def load(fname, infos=False, nocheck=False):
"""Read a list of arrays from a file, with magics, length, and padding."""
if not nocheck and not fname.endswith(".ten"):
raise ValueError("file name should end in .ten")
with open(fname, "rb") as stream:
return read(stream, infos=infos)
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/features/features.py | # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""" This class handle features definition in datasets and some utilities to display table type."""
import copy
import json
import re
import sys
from collections.abc import Iterable, Mapping
from collections.abc import Sequence as SequenceABC
from dataclasses import InitVar, dataclass, field, fields
from functools import reduce, wraps
from operator import mul
from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union
from typing import Sequence as Sequence_
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.compute as pc
import pyarrow.types
import pyarrow_hotfix # noqa: F401 # to fix vulnerability on pyarrow<14.0.1
from pandas.api.extensions import ExtensionArray as PandasExtensionArray
from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype
from .. import config
from ..naming import camelcase_to_snakecase, snakecase_to_camelcase
from ..table import array_cast
from ..utils import logging
from ..utils.py_utils import asdict, first_non_null_value, zip_dict
from .audio import Audio
from .image import Image, encode_pil_image
from .translation import Translation, TranslationVariableLanguages
logger = logging.get_logger(__name__)
def _arrow_to_datasets_dtype(arrow_type: pa.DataType) -> str:
"""
_arrow_to_datasets_dtype takes a pyarrow.DataType and converts it to a datasets string dtype.
In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
"""
if pyarrow.types.is_null(arrow_type):
return "null"
elif pyarrow.types.is_boolean(arrow_type):
return "bool"
elif pyarrow.types.is_int8(arrow_type):
return "int8"
elif pyarrow.types.is_int16(arrow_type):
return "int16"
elif pyarrow.types.is_int32(arrow_type):
return "int32"
elif pyarrow.types.is_int64(arrow_type):
return "int64"
elif pyarrow.types.is_uint8(arrow_type):
return "uint8"
elif pyarrow.types.is_uint16(arrow_type):
return "uint16"
elif pyarrow.types.is_uint32(arrow_type):
return "uint32"
elif pyarrow.types.is_uint64(arrow_type):
return "uint64"
elif pyarrow.types.is_float16(arrow_type):
return "float16" # pyarrow dtype is "halffloat"
elif pyarrow.types.is_float32(arrow_type):
return "float32" # pyarrow dtype is "float"
elif pyarrow.types.is_float64(arrow_type):
return "float64" # pyarrow dtype is "double"
elif pyarrow.types.is_time32(arrow_type):
return f"time32[{pa.type_for_alias(str(arrow_type)).unit}]"
elif pyarrow.types.is_time64(arrow_type):
return f"time64[{pa.type_for_alias(str(arrow_type)).unit}]"
elif pyarrow.types.is_timestamp(arrow_type):
if arrow_type.tz is None:
return f"timestamp[{arrow_type.unit}]"
elif arrow_type.tz:
return f"timestamp[{arrow_type.unit}, tz={arrow_type.tz}]"
else:
raise ValueError(f"Unexpected timestamp object {arrow_type}.")
elif pyarrow.types.is_date32(arrow_type):
return "date32" # pyarrow dtype is "date32[day]"
elif pyarrow.types.is_date64(arrow_type):
return "date64" # pyarrow dtype is "date64[ms]"
elif pyarrow.types.is_duration(arrow_type):
return f"duration[{arrow_type.unit}]"
elif pyarrow.types.is_decimal128(arrow_type):
return f"decimal128({arrow_type.precision}, {arrow_type.scale})"
elif pyarrow.types.is_decimal256(arrow_type):
return f"decimal256({arrow_type.precision}, {arrow_type.scale})"
elif pyarrow.types.is_binary(arrow_type):
return "binary"
elif pyarrow.types.is_large_binary(arrow_type):
return "large_binary"
elif pyarrow.types.is_string(arrow_type):
return "string"
elif pyarrow.types.is_large_string(arrow_type):
return "large_string"
else:
raise ValueError(f"Arrow type {arrow_type} does not have a datasets dtype equivalent.")
def string_to_arrow(datasets_dtype: str) -> pa.DataType:
"""
string_to_arrow takes a datasets string dtype and converts it to a pyarrow.DataType.
In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
This is necessary because the datasets.Value() primitive type is constructed using a string dtype
Value(dtype=str)
But Features.type (via `get_nested_type()` expects to resolve Features into a pyarrow Schema,
which means that each Value() must be able to resolve into a corresponding pyarrow.DataType, which is the
purpose of this function.
"""
def _dtype_error_msg(dtype, pa_dtype, examples=None, urls=None):
msg = f"{dtype} is not a validly formatted string representation of the pyarrow {pa_dtype} type."
if examples:
examples = ", ".join(examples[:-1]) + " or " + examples[-1] if len(examples) > 1 else examples[0]
msg += f"\nValid examples include: {examples}."
if urls:
urls = ", ".join(urls[:-1]) + " and " + urls[-1] if len(urls) > 1 else urls[0]
msg += f"\nFor more insformation, see: {urls}."
return msg
if datasets_dtype in pa.__dict__:
return pa.__dict__[datasets_dtype]()
if (datasets_dtype + "_") in pa.__dict__:
return pa.__dict__[datasets_dtype + "_"]()
timestamp_matches = re.search(r"^timestamp\[(.*)\]$", datasets_dtype)
if timestamp_matches:
timestamp_internals = timestamp_matches.group(1)
internals_matches = re.search(r"^(s|ms|us|ns),\s*tz=([a-zA-Z0-9/_+\-:]*)$", timestamp_internals)
if timestamp_internals in ["s", "ms", "us", "ns"]:
return pa.timestamp(timestamp_internals)
elif internals_matches:
return pa.timestamp(internals_matches.group(1), internals_matches.group(2))
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"timestamp",
examples=["timestamp[us]", "timestamp[us, tz=America/New_York"],
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.timestamp.html"],
)
)
duration_matches = re.search(r"^duration\[(.*)\]$", datasets_dtype)
if duration_matches:
duration_internals = duration_matches.group(1)
if duration_internals in ["s", "ms", "us", "ns"]:
return pa.duration(duration_internals)
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"duration",
examples=["duration[s]", "duration[us]"],
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.duration.html"],
)
)
time_matches = re.search(r"^time(.*)\[(.*)\]$", datasets_dtype)
if time_matches:
time_internals_bits = time_matches.group(1)
if time_internals_bits == "32":
time_internals_unit = time_matches.group(2)
if time_internals_unit in ["s", "ms"]:
return pa.time32(time_internals_unit)
else:
raise ValueError(
f"{time_internals_unit} is not a valid unit for the pyarrow time32 type. Supported units: s (second) and ms (millisecond)."
)
elif time_internals_bits == "64":
time_internals_unit = time_matches.group(2)
if time_internals_unit in ["us", "ns"]:
return pa.time64(time_internals_unit)
else:
raise ValueError(
f"{time_internals_unit} is not a valid unit for the pyarrow time64 type. Supported units: us (microsecond) and ns (nanosecond)."
)
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"time",
examples=["time32[s]", "time64[us]"],
urls=[
"https://arrow.apache.org/docs/python/generated/pyarrow.time32.html",
"https://arrow.apache.org/docs/python/generated/pyarrow.time64.html",
],
)
)
decimal_matches = re.search(r"^decimal(.*)\((.*)\)$", datasets_dtype)
if decimal_matches:
decimal_internals_bits = decimal_matches.group(1)
if decimal_internals_bits == "128":
decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
if decimal_internals_precision_and_scale:
precision = decimal_internals_precision_and_scale.group(1)
scale = decimal_internals_precision_and_scale.group(2)
return pa.decimal128(int(precision), int(scale))
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"decimal128",
examples=["decimal128(10, 2)", "decimal128(4, -2)"],
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html"],
)
)
elif decimal_internals_bits == "256":
decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
if decimal_internals_precision_and_scale:
precision = decimal_internals_precision_and_scale.group(1)
scale = decimal_internals_precision_and_scale.group(2)
return pa.decimal256(int(precision), int(scale))
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"decimal256",
examples=["decimal256(30, 2)", "decimal256(38, -4)"],
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html"],
)
)
else:
raise ValueError(
_dtype_error_msg(
datasets_dtype,
"decimal",
examples=["decimal128(12, 3)", "decimal256(40, 6)"],
urls=[
"https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html",
"https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html",
],
)
)
raise ValueError(
f"Neither {datasets_dtype} nor {datasets_dtype + '_'} seems to be a pyarrow data type. "
f"Please make sure to use a correct data type, see: "
f"https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions"
)
def _cast_to_python_objects(obj: Any, only_1d_for_numpy: bool, optimize_list_casting: bool) -> Tuple[Any, bool]:
"""
Cast pytorch/tensorflow/pandas objects to python numpy array/lists.
It works recursively.
If `optimize_list_casting` is True, to avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
Args:
obj: the object (nested struct) to cast.
only_1d_for_numpy (bool): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
Indeed Arrow only support converting 1-dimensional array values.
optimize_list_casting (bool): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
and if it doesn't, not checking the rest of the list elements.
Returns:
casted_obj: the casted object
has_changed (bool): True if the object has been changed, False if it is identical
"""
if config.TF_AVAILABLE and "tensorflow" in sys.modules:
import tensorflow as tf
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if config.JAX_AVAILABLE and "jax" in sys.modules:
import jax.numpy as jnp
if config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(obj, np.ndarray):
if obj.ndim == 0:
return obj[()], True
elif not only_1d_for_numpy or obj.ndim == 1:
return obj, False
else:
return (
[
_cast_to_python_objects(
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for x in obj
],
True,
)
elif config.TORCH_AVAILABLE and "torch" in sys.modules and isinstance(obj, torch.Tensor):
if obj.ndim == 0:
return obj.detach().cpu().numpy()[()], True
elif not only_1d_for_numpy or obj.ndim == 1:
return obj.detach().cpu().numpy(), True
else:
return (
[
_cast_to_python_objects(
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for x in obj.detach().cpu().numpy()
],
True,
)
elif config.TF_AVAILABLE and "tensorflow" in sys.modules and isinstance(obj, tf.Tensor):
if obj.ndim == 0:
return obj.numpy()[()], True
elif not only_1d_for_numpy or obj.ndim == 1:
return obj.numpy(), True
else:
return (
[
_cast_to_python_objects(
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for x in obj.numpy()
],
True,
)
elif config.JAX_AVAILABLE and "jax" in sys.modules and isinstance(obj, jnp.ndarray):
if obj.ndim == 0:
return np.asarray(obj)[()], True
elif not only_1d_for_numpy or obj.ndim == 1:
return np.asarray(obj), True
else:
return (
[
_cast_to_python_objects(
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for x in np.asarray(obj)
],
True,
)
elif config.PIL_AVAILABLE and "PIL" in sys.modules and isinstance(obj, PIL.Image.Image):
return encode_pil_image(obj), True
elif isinstance(obj, pd.Series):
return (
_cast_to_python_objects(
obj.tolist(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0],
True,
)
elif isinstance(obj, pd.DataFrame):
return (
{
key: _cast_to_python_objects(
value, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for key, value in obj.to_dict("list").items()
},
True,
)
elif isinstance(obj, pd.Timestamp):
return obj.to_pydatetime(), True
elif isinstance(obj, pd.Timedelta):
return obj.to_pytimedelta(), True
elif isinstance(obj, Mapping):
has_changed = not isinstance(obj, dict)
output = {}
for k, v in obj.items():
casted_v, has_changed_v = _cast_to_python_objects(
v, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)
has_changed |= has_changed_v
output[k] = casted_v
return output if has_changed else obj, has_changed
elif hasattr(obj, "__array__"):
return (
_cast_to_python_objects(
obj.__array__(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0],
True,
)
elif isinstance(obj, (list, tuple)):
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt):
break
casted_first_elmt, has_changed_first_elmt = _cast_to_python_objects(
first_elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)
if has_changed_first_elmt or not optimize_list_casting:
return (
[
_cast_to_python_objects(
elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
for elmt in obj
],
True,
)
else:
if isinstance(obj, (list, tuple)):
return obj, False
else:
return list(obj), True
else:
return obj, False
else:
return obj, False
def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any:
"""
Cast numpy/pytorch/tensorflow/pandas objects to python lists.
It works recursively.
If `optimize_list_casting` is True, To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
Args:
obj: the object (nested struct) to cast
only_1d_for_numpy (bool, default ``False``): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
Indeed Arrow only support converting 1-dimensional array values.
optimize_list_casting (bool, default ``True``): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
and if it doesn't, not checking the rest of the list elements.
Returns:
casted_obj: the casted object
"""
return _cast_to_python_objects(
obj, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
)[0]
@dataclass
class Value:
"""
The `Value` dtypes are as follows:
- `null`
- `bool`
- `int8`
- `int16`
- `int32`
- `int64`
- `uint8`
- `uint16`
- `uint32`
- `uint64`
- `float16`
- `float32` (alias float)
- `float64` (alias double)
- `time32[(s|ms)]`
- `time64[(us|ns)]`
- `timestamp[(s|ms|us|ns)]`
- `timestamp[(s|ms|us|ns), tz=(tzstring)]`
- `date32`
- `date64`
- `duration[(s|ms|us|ns)]`
- `decimal128(precision, scale)`
- `decimal256(precision, scale)`
- `binary`
- `large_binary`
- `string`
- `large_string`
Example:
```py
>>> from datasets import Features
>>> features = Features({'stars': Value(dtype='int32')})
>>> features
{'stars': Value(dtype='int32', id=None)}
```
"""
dtype: str
id: Optional[str] = None
# Automatically constructed
pa_type: ClassVar[Any] = None
_type: str = field(default="Value", init=False, repr=False)
def __post_init__(self):
if self.dtype == "double": # fix inferred type
self.dtype = "float64"
if self.dtype == "float": # fix inferred type
self.dtype = "float32"
self.pa_type = string_to_arrow(self.dtype)
def __call__(self):
return self.pa_type
def encode_example(self, value):
if pa.types.is_boolean(self.pa_type):
return bool(value)
elif pa.types.is_integer(self.pa_type):
return int(value)
elif pa.types.is_floating(self.pa_type):
return float(value)
elif pa.types.is_string(self.pa_type):
return str(value)
else:
return value
class _ArrayXD:
def __post_init__(self):
self.shape = tuple(self.shape)
def __call__(self):
pa_type = globals()[self.__class__.__name__ + "ExtensionType"](self.shape, self.dtype)
return pa_type
def encode_example(self, value):
return value
@dataclass
class Array2D(_ArrayXD):
"""Create a two-dimensional array.
Args:
shape (`tuple`):
The size of each dimension.
dtype (`str`):
The value of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array2D(shape=(1, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array2D", init=False, repr=False)
@dataclass
class Array3D(_ArrayXD):
"""Create a three-dimensional array.
Args:
shape (`tuple`):
The size of each dimension.
dtype (`str`):
The value of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array3D(shape=(1, 2, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array3D", init=False, repr=False)
@dataclass
class Array4D(_ArrayXD):
"""Create a four-dimensional array.
Args:
shape (`tuple`):
The size of each dimension.
dtype (`str`):
The value of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array4D(shape=(1, 2, 2, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array4D", init=False, repr=False)
@dataclass
class Array5D(_ArrayXD):
"""Create a five-dimensional array.
Args:
shape (`tuple`):
The size of each dimension.
dtype (`str`):
The value of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array5D(shape=(1, 2, 2, 3, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array5D", init=False, repr=False)
class _ArrayXDExtensionType(pa.ExtensionType):
ndims: Optional[int] = None
def __init__(self, shape: tuple, dtype: str):
if self.ndims is None or self.ndims <= 1:
raise ValueError("You must instantiate an array type with a value for dim that is > 1")
if len(shape) != self.ndims:
raise ValueError(f"shape={shape} and ndims={self.ndims} don't match")
for dim in range(1, self.ndims):
if shape[dim] is None:
raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}")
self.shape = tuple(shape)
self.value_type = dtype
self.storage_dtype = self._generate_dtype(self.value_type)
pa.ExtensionType.__init__(self, self.storage_dtype, f"{self.__class__.__module__}.{self.__class__.__name__}")
def __arrow_ext_serialize__(self):
return json.dumps((self.shape, self.value_type)).encode()
@classmethod
def __arrow_ext_deserialize__(cls, storage_type, serialized):
args = json.loads(serialized)
return cls(*args)
# This was added to pa.ExtensionType in pyarrow >= 13.0.0
def __reduce__(self):
return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__())
def __hash__(self):
return hash((self.__class__, self.shape, self.value_type))
def __arrow_ext_class__(self):
return ArrayExtensionArray
def _generate_dtype(self, dtype):
dtype = string_to_arrow(dtype)
for d in reversed(self.shape):
dtype = pa.list_(dtype)
# Don't specify the size of the list, since fixed length list arrays have issues
# being validated after slicing in pyarrow 0.17.1
return dtype
def to_pandas_dtype(self):
return PandasArrayExtensionDtype(self.value_type)
class Array2DExtensionType(_ArrayXDExtensionType):
ndims = 2
class Array3DExtensionType(_ArrayXDExtensionType):
ndims = 3
class Array4DExtensionType(_ArrayXDExtensionType):
ndims = 4
class Array5DExtensionType(_ArrayXDExtensionType):
ndims = 5
# Register the extension types for deserialization
pa.register_extension_type(Array2DExtensionType((1, 2), "int64"))
pa.register_extension_type(Array3DExtensionType((1, 2, 3), "int64"))
pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), "int64"))
pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), "int64"))
def _is_zero_copy_only(pa_type: pa.DataType, unnest: bool = False) -> bool:
"""
When converting a pyarrow array to a numpy array, we must know whether this could be done in zero-copy or not.
This function returns the value of the ``zero_copy_only`` parameter to pass to ``.to_numpy()``, given the type of the pyarrow array.
# zero copy is available for all primitive types except booleans and temporal types (date, time, timestamp or duration)
# primitive types are types for which the physical representation in arrow and in numpy
# https://github.com/wesm/arrow/blob/c07b9b48cf3e0bbbab493992a492ae47e5b04cad/python/pyarrow/types.pxi#L821
# see https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy
# and https://issues.apache.org/jira/browse/ARROW-2871?jql=text%20~%20%22boolean%20to_numpy%22
"""
def _unnest_pa_type(pa_type: pa.DataType) -> pa.DataType:
if pa.types.is_list(pa_type):
return _unnest_pa_type(pa_type.value_type)
return pa_type
if unnest:
pa_type = _unnest_pa_type(pa_type)
return pa.types.is_primitive(pa_type) and not (pa.types.is_boolean(pa_type) or pa.types.is_temporal(pa_type))
class ArrayExtensionArray(pa.ExtensionArray):
def __array__(self):
zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
return self.to_numpy(zero_copy_only=zero_copy_only)
def __getitem__(self, i):
return self.storage[i]
def to_numpy(self, zero_copy_only=True):
storage: pa.ListArray = self.storage
null_mask = storage.is_null().to_numpy(zero_copy_only=False)
if self.type.shape[0] is not None:
size = 1
null_indices = np.arange(len(storage))[null_mask] - np.arange(np.sum(null_mask))
for i in range(self.type.ndims):
size *= self.type.shape[i]
storage = storage.flatten()
numpy_arr = storage.to_numpy(zero_copy_only=zero_copy_only)
numpy_arr = numpy_arr.reshape(len(self) - len(null_indices), *self.type.shape)
if len(null_indices):
numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0)
else:
shape = self.type.shape
ndims = self.type.ndims
arrays = []
first_dim_offsets = np.array([off.as_py() for off in storage.offsets])
for i, is_null in enumerate(null_mask):
if is_null:
arrays.append(np.nan)
else:
storage_el = storage[i : i + 1]
first_dim = first_dim_offsets[i + 1] - first_dim_offsets[i]
# flatten storage
for _ in range(ndims):
storage_el = storage_el.flatten()
numpy_arr = storage_el.to_numpy(zero_copy_only=zero_copy_only)
arrays.append(numpy_arr.reshape(first_dim, *shape[1:]))
if len(np.unique(np.diff(first_dim_offsets))) > 1:
# ragged
numpy_arr = np.empty(len(arrays), dtype=object)
numpy_arr[:] = arrays
else:
numpy_arr = np.array(arrays)
return numpy_arr
def to_pylist(self):
zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
numpy_arr = self.to_numpy(zero_copy_only=zero_copy_only)
if self.type.shape[0] is None and numpy_arr.dtype == object:
return [arr.tolist() for arr in numpy_arr.tolist()]
else:
return numpy_arr.tolist()
class PandasArrayExtensionDtype(PandasExtensionDtype):
_metadata = "value_type"
def __init__(self, value_type: Union["PandasArrayExtensionDtype", np.dtype]):
self._value_type = value_type
def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]):
if isinstance(array, pa.ChunkedArray):
array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks]))
zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True)
numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only)
return PandasArrayExtensionArray(numpy_arr)
@classmethod
def construct_array_type(cls):
return PandasArrayExtensionArray
@property
def type(self) -> type:
return np.ndarray
@property
def kind(self) -> str:
return "O"
@property
def name(self) -> str:
return f"array[{self.value_type}]"
@property
def value_type(self) -> np.dtype:
return self._value_type
class PandasArrayExtensionArray(PandasExtensionArray):
def __init__(self, data: np.ndarray, copy: bool = False):
self._data = data if not copy else np.array(data)
self._dtype = PandasArrayExtensionDtype(data.dtype)
def __array__(self, dtype=None):
"""
Convert to NumPy Array.
Note that Pandas expects a 1D array when dtype is set to object.
But for other dtypes, the returned shape is the same as the one of ``data``.
More info about pandas 1D requirement for PandasExtensionArray here:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.api.extensions.ExtensionArray.html#pandas.api.extensions.ExtensionArray
"""
if dtype == object:
out = np.empty(len(self._data), dtype=object)
for i in range(len(self._data)):
out[i] = self._data[i]
return out
if dtype is None:
return self._data
else:
return self._data.astype(dtype)
def copy(self, deep: bool = False) -> "PandasArrayExtensionArray":
return PandasArrayExtensionArray(self._data, copy=True)
@classmethod
def _from_sequence(
cls, scalars, dtype: Optional[PandasArrayExtensionDtype] = None, copy: bool = False
) -> "PandasArrayExtensionArray":
if len(scalars) > 1 and all(
isinstance(x, np.ndarray) and x.shape == scalars[0].shape and x.dtype == scalars[0].dtype for x in scalars
):
data = np.array(scalars, dtype=dtype if dtype is None else dtype.value_type, copy=copy)
else:
data = np.empty(len(scalars), dtype=object)
data[:] = scalars
return cls(data, copy=copy)
@classmethod
def _concat_same_type(cls, to_concat: Sequence_["PandasArrayExtensionArray"]) -> "PandasArrayExtensionArray":
if len(to_concat) > 1 and all(
va._data.shape == to_concat[0]._data.shape and va._data.dtype == to_concat[0]._data.dtype
for va in to_concat
):
data = np.vstack([va._data for va in to_concat])
else:
data = np.empty(len(to_concat), dtype=object)
data[:] = [va._data for va in to_concat]
return cls(data, copy=False)
@property
def dtype(self) -> PandasArrayExtensionDtype:
return self._dtype
@property
def nbytes(self) -> int:
return self._data.nbytes
def isna(self) -> np.ndarray:
return np.array([pd.isna(arr).any() for arr in self._data])
def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None:
raise NotImplementedError()
def __getitem__(self, item: Union[int, slice, np.ndarray]) -> Union[np.ndarray, "PandasArrayExtensionArray"]:
if isinstance(item, int):
return self._data[item]
return PandasArrayExtensionArray(self._data[item], copy=False)
def take(
self, indices: Sequence_[int], allow_fill: bool = False, fill_value: bool = None
) -> "PandasArrayExtensionArray":
indices: np.ndarray = np.asarray(indices, dtype=int)
if allow_fill:
fill_value = (
self.dtype.na_value if fill_value is None else np.asarray(fill_value, dtype=self.dtype.value_type)
)
mask = indices == -1
if (indices < -1).any():
raise ValueError("Invalid value in `indices`, must be all >= -1 for `allow_fill` is True")
elif len(self) > 0:
pass
elif not np.all(mask):
raise IndexError("Invalid take for empty PandasArrayExtensionArray, must be all -1.")
else:
data = np.array([fill_value] * len(indices), dtype=self.dtype.value_type)
return PandasArrayExtensionArray(data, copy=False)
took = self._data.take(indices, axis=0)
if allow_fill and mask.any():
took[mask] = [fill_value] * np.sum(mask)
return PandasArrayExtensionArray(took, copy=False)
def __len__(self) -> int:
return len(self._data)
def __eq__(self, other) -> np.ndarray:
if not isinstance(other, PandasArrayExtensionArray):
raise NotImplementedError(f"Invalid type to compare to: {type(other)}")
return (self._data == other._data).all()
def pandas_types_mapper(dtype):
if isinstance(dtype, _ArrayXDExtensionType):
return PandasArrayExtensionDtype(dtype.value_type)
@dataclass
class ClassLabel:
"""Feature type for integer class labels.
There are 3 ways to define a `ClassLabel`, which correspond to the 3 arguments:
* `num_classes`: Create 0 to (num_classes-1) labels.
* `names`: List of label strings.
* `names_file`: File containing the list of labels.
Under the hood the labels are stored as integers.
You can use negative integers to represent unknown/missing labels.
Args:
num_classes (`int`, *optional*):
Number of classes. All labels must be < `num_classes`.
names (`list` of `str`, *optional*):
String names for the integer classes.
The order in which the names are provided is kept.
names_file (`str`, *optional*):
Path to a file with names for the integer classes, one per line.
Example:
```py
>>> from datasets import Features
>>> features = Features({'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'])})
>>> features
{'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'], id=None)}
```
"""
num_classes: InitVar[Optional[int]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
names: List[str] = None
names_file: InitVar[Optional[str]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "int64"
pa_type: ClassVar[Any] = pa.int64()
_str2int: ClassVar[Dict[str, int]] = None
_int2str: ClassVar[Dict[int, int]] = None
_type: str = field(default="ClassLabel", init=False, repr=False)
def __post_init__(self, num_classes, names_file):
self.num_classes = num_classes
self.names_file = names_file
if self.names_file is not None and self.names is not None:
raise ValueError("Please provide either names or names_file but not both.")
# Set self.names
if self.names is None:
if self.names_file is not None:
self.names = self._load_names_from_file(self.names_file)
elif self.num_classes is not None:
self.names = [str(i) for i in range(self.num_classes)]
else:
raise ValueError("Please provide either num_classes, names or names_file.")
elif not isinstance(self.names, SequenceABC):
raise TypeError(f"Please provide names as a list, is {type(self.names)}")
# Set self.num_classes
if self.num_classes is None:
self.num_classes = len(self.names)
elif self.num_classes != len(self.names):
raise ValueError(
"ClassLabel number of names do not match the defined num_classes. "
f"Got {len(self.names)} names VS {self.num_classes} num_classes"
)
# Prepare mappings
self._int2str = [str(name) for name in self.names]
self._str2int = {name: i for i, name in enumerate(self._int2str)}
if len(self._int2str) != len(self._str2int):
raise ValueError("Some label names are duplicated. Each label name should be unique.")
def __call__(self):
return self.pa_type
def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]:
"""Conversion class name `string` => `integer`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> ds.features["label"].str2int('neg')
0
```
"""
if not isinstance(values, str) and not isinstance(values, Iterable):
raise ValueError(
f"Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
)
return_list = True
if isinstance(values, str):
values = [values]
return_list = False
output = [self._strval2int(value) for value in values]
return output if return_list else output[0]
def _strval2int(self, value: str) -> int:
failed_parse = False
value = str(value)
# first attempt - raw string value
int_value = self._str2int.get(value)
if int_value is None:
# second attempt - strip whitespace
int_value = self._str2int.get(value.strip())
if int_value is None:
# third attempt - convert str to int
try:
int_value = int(value)
except ValueError:
failed_parse = True
else:
if int_value < -1 or int_value >= self.num_classes:
failed_parse = True
if failed_parse:
raise ValueError(f"Invalid string class label {value}")
return int_value
def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]:
"""Conversion `integer` => class name `string`.
Regarding unknown/missing labels: passing negative integers raises `ValueError`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> ds.features["label"].int2str(0)
'neg'
```
"""
if not isinstance(values, int) and not isinstance(values, Iterable):
raise ValueError(
f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
)
return_list = True
if isinstance(values, int):
values = [values]
return_list = False
for v in values:
if not 0 <= v < self.num_classes:
raise ValueError(f"Invalid integer class label {v:d}")
output = [self._int2str[int(v)] for v in values]
return output if return_list else output[0]
def encode_example(self, example_data):
if self.num_classes is None:
raise ValueError(
"Trying to use ClassLabel feature with undefined number of class. "
"Please set ClassLabel.names or num_classes."
)
# If a string is given, convert to associated integer
if isinstance(example_data, str):
example_data = self.str2int(example_data)
# Allowing -1 to mean no label.
if not -1 <= example_data < self.num_classes:
raise ValueError(f"Class label {example_data:d} greater than configured num_classes {self.num_classes}")
return example_data
def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array:
"""Cast an Arrow array to the `ClassLabel` arrow storage type.
The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are:
- `pa.string()`
- `pa.int()`
Args:
storage (`Union[pa.StringArray, pa.IntegerArray]`):
PyArrow array to cast.
Returns:
`pa.Int64Array`: Array in the `ClassLabel` arrow storage type.
"""
if isinstance(storage, pa.IntegerArray) and len(storage) > 0:
min_max = pc.min_max(storage).as_py()
if min_max["max"] is not None and min_max["max"] >= self.num_classes:
raise ValueError(
f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}"
)
elif isinstance(storage, pa.StringArray):
storage = pa.array(
[self._strval2int(label) if label is not None else None for label in storage.to_pylist()]
)
return array_cast(storage, self.pa_type)
@staticmethod
def _load_names_from_file(names_filepath):
with open(names_filepath, encoding="utf-8") as f:
return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names
@dataclass
class Sequence:
"""Construct a list of feature from a single type or a dict of types.
Mostly here for compatiblity with tfds.
Args:
feature:
A list of features of a single type or a dictionary of types.
length (`int`):
Length of the sequence.
Example:
```py
>>> from datasets import Features, Sequence, Value, ClassLabel
>>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})})
>>> features
{'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'], id=None)}, length=-1, id=None)}
```
"""
feature: Any
length: int = -1
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "list"
pa_type: ClassVar[Any] = None
_type: str = field(default="Sequence", init=False, repr=False)
FeatureType = Union[
dict,
list,
tuple,
Value,
ClassLabel,
Translation,
TranslationVariableLanguages,
Sequence,
Array2D,
Array3D,
Array4D,
Array5D,
Audio,
Image,
]
def _check_non_null_non_empty_recursive(obj, schema: Optional[FeatureType] = None) -> bool:
"""
Check if the object is not None.
If the object is a list or a tuple, recursively check the first element of the sequence and stop if at any point the first element is not a sequence or is an empty sequence.
"""
if obj is None:
return False
elif isinstance(obj, (list, tuple)) and (schema is None or isinstance(schema, (list, tuple, Sequence))):
if len(obj) > 0:
if schema is None:
pass
elif isinstance(schema, (list, tuple)):
schema = schema[0]
else:
schema = schema.feature
return _check_non_null_non_empty_recursive(obj[0], schema)
else:
return False
else:
return True
def get_nested_type(schema: FeatureType) -> pa.DataType:
"""
get_nested_type() converts a datasets.FeatureType into a pyarrow.DataType, and acts as the inverse of
generate_from_arrow_type().
It performs double-duty as the implementation of Features.type and handles the conversion of
datasets.Feature->pa.struct
"""
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(schema, Features):
return pa.struct(
{key: get_nested_type(schema[key]) for key in schema}
) # Features is subclass of dict, and dict order is deterministic since Python 3.6
elif isinstance(schema, dict):
return pa.struct(
{key: get_nested_type(schema[key]) for key in schema}
) # however don't sort on struct types since the order matters
elif isinstance(schema, (list, tuple)):
if len(schema) != 1:
raise ValueError("When defining list feature, you should just provide one example of the inner type")
value_type = get_nested_type(schema[0])
return pa.list_(value_type)
elif isinstance(schema, Sequence):
value_type = get_nested_type(schema.feature)
# We allow to reverse list of dict => dict of list for compatibility with tfds
if isinstance(schema.feature, dict):
return pa.struct({f.name: pa.list_(f.type, schema.length) for f in value_type})
return pa.list_(value_type, schema.length)
# Other objects are callable which returns their data type (ClassLabel, Array2D, Translation, Arrow datatype creation methods)
return schema()
def encode_nested_example(schema, obj, level=0):
"""Encode a nested example.
This is used since some features (in particular ClassLabel) have some logic during encoding.
To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded.
If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same.
"""
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(schema, dict):
if level == 0 and obj is None:
raise ValueError("Got None but expected a dictionary instead")
return (
{k: encode_nested_example(schema[k], obj.get(k), level=level + 1) for k in schema}
if obj is not None
else None
)
elif isinstance(schema, (list, tuple)):
sub_schema = schema[0]
if obj is None:
return None
else:
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
break
if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt:
return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj]
return list(obj)
elif isinstance(schema, Sequence):
if obj is None:
return None
# We allow to reverse list of dict => dict of list for compatiblity with tfds
if isinstance(schema.feature, dict):
# dict of list to fill
list_dict = {}
if isinstance(obj, (list, tuple)):
# obj is a list of dict
for k in schema.feature:
list_dict[k] = [encode_nested_example(schema.feature[k], o.get(k), level=level + 1) for o in obj]
return list_dict
else:
# obj is a single dict
for k in schema.feature:
list_dict[k] = (
[encode_nested_example(schema.feature[k], o, level=level + 1) for o in obj[k]]
if k in obj
else None
)
return list_dict
# schema.feature is not a dict
if isinstance(obj, str): # don't interpret a string as a list
raise ValueError(f"Got a string but expected a list instead: '{obj}'")
else:
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt, schema.feature):
break
# be careful when comparing tensors here
if (
not isinstance(first_elmt, list)
or encode_nested_example(schema.feature, first_elmt, level=level + 1) != first_elmt
):
return [encode_nested_example(schema.feature, o, level=level + 1) for o in obj]
return list(obj)
# Object with special encoding:
# ClassLabel will convert from string to int, TranslationVariableLanguages does some checks
elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)):
return schema.encode_example(obj) if obj is not None else None
# Other object should be directly convertible to a native Arrow type (like Translation and Translation)
return obj
def decode_nested_example(schema, obj, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
"""Decode a nested example.
This is used since some features (in particular Audio and Image) have some logic during decoding.
To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded.
If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same.
"""
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(schema, dict):
return (
{k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)}
if obj is not None
else None
)
elif isinstance(schema, (list, tuple)):
sub_schema = schema[0]
if obj is None:
return None
else:
if len(obj) > 0:
for first_elmt in obj:
if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
break
if decode_nested_example(sub_schema, first_elmt) != first_elmt:
return [decode_nested_example(sub_schema, o) for o in obj]
return list(obj)
elif isinstance(schema, Sequence):
# We allow to reverse list of dict => dict of list for compatiblity with tfds
if isinstance(schema.feature, dict):
return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature}
else:
return decode_nested_example([schema.feature], obj)
# Object with special decoding:
elif isinstance(schema, (Audio, Image)):
# we pass the token to read and decode files from private repositories in streaming mode
if obj is not None and schema.decode:
return schema.decode_example(obj, token_per_repo_id=token_per_repo_id)
return obj
def generate_from_dict(obj: Any):
"""Regenerate the nested feature object from a deserialized dict.
We use the '_type' fields to get the dataclass name to load.
generate_from_dict is the recursive helper for Features.from_dict, and allows for a convenient constructor syntax
to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
a :class:`DatasetInfo` that was dumped to a JSON object. This acts as an analogue to
:meth:`Features.from_arrow_schema` and handles the recursive field-by-field instantiation, but doesn't require any
mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes
that :class:`Value` automatically performs.
"""
# Nested structures: we allow dict, list/tuples, sequences
if isinstance(obj, list):
return [generate_from_dict(value) for value in obj]
# Otherwise we have a dict or a dataclass
if "_type" not in obj or isinstance(obj["_type"], dict):
return {key: generate_from_dict(value) for key, value in obj.items()}
obj = dict(obj)
class_type = globals()[obj.pop("_type")]
if class_type == Sequence:
return Sequence(feature=generate_from_dict(obj["feature"]), length=obj.get("length", -1))
field_names = {f.name for f in fields(class_type)}
return class_type(**{k: v for k, v in obj.items() if k in field_names})
def generate_from_arrow_type(pa_type: pa.DataType) -> FeatureType:
"""
generate_from_arrow_type accepts an arrow DataType and returns a datasets FeatureType to be used as the type for
a single field.
This is the high-level arrow->datasets type conversion and is inverted by get_nested_type().
This operates at the individual *field* level, whereas Features.from_arrow_schema() operates at the
full schema level and holds the methods that represent the bijection from Features<->pyarrow.Schema
"""
if isinstance(pa_type, pa.StructType):
return {field.name: generate_from_arrow_type(field.type) for field in pa_type}
elif isinstance(pa_type, pa.FixedSizeListType):
return Sequence(feature=generate_from_arrow_type(pa_type.value_type), length=pa_type.list_size)
elif isinstance(pa_type, pa.ListType):
feature = generate_from_arrow_type(pa_type.value_type)
if isinstance(feature, (dict, tuple, list)):
return [feature]
return Sequence(feature=feature)
elif isinstance(pa_type, _ArrayXDExtensionType):
array_feature = [None, None, Array2D, Array3D, Array4D, Array5D][pa_type.ndims]
return array_feature(shape=pa_type.shape, dtype=pa_type.value_type)
elif isinstance(pa_type, pa.DictionaryType):
raise NotImplementedError # TODO(thom) this will need access to the dictionary as well (for labels). I.e. to the py_table
elif isinstance(pa_type, pa.DataType):
return Value(dtype=_arrow_to_datasets_dtype(pa_type))
else:
raise ValueError(f"Cannot convert {pa_type} to a Feature type.")
def numpy_to_pyarrow_listarray(arr: np.ndarray, type: pa.DataType = None) -> pa.ListArray:
"""Build a PyArrow ListArray from a multidimensional NumPy array"""
arr = np.array(arr)
values = pa.array(arr.flatten(), type=type)
for i in range(arr.ndim - 1):
n_offsets = reduce(mul, arr.shape[: arr.ndim - i - 1], 1)
step_offsets = arr.shape[arr.ndim - i - 1]
offsets = pa.array(np.arange(n_offsets + 1) * step_offsets, type=pa.int32())
values = pa.ListArray.from_arrays(offsets, values)
return values
def list_of_pa_arrays_to_pyarrow_listarray(l_arr: List[Optional[pa.Array]]) -> pa.ListArray:
null_mask = np.array([arr is None for arr in l_arr])
null_indices = np.arange(len(null_mask))[null_mask] - np.arange(np.sum(null_mask))
l_arr = [arr for arr in l_arr if arr is not None]
offsets = np.cumsum(
[0] + [len(arr) for arr in l_arr], dtype=object
) # convert to dtype object to allow None insertion
offsets = np.insert(offsets, null_indices, None)
offsets = pa.array(offsets, type=pa.int32())
values = pa.concat_arrays(l_arr)
return pa.ListArray.from_arrays(offsets, values)
def list_of_np_array_to_pyarrow_listarray(l_arr: List[np.ndarray], type: pa.DataType = None) -> pa.ListArray:
"""Build a PyArrow ListArray from a possibly nested list of NumPy arrays"""
if len(l_arr) > 0:
return list_of_pa_arrays_to_pyarrow_listarray(
[numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr]
)
else:
return pa.array([], type=type)
def contains_any_np_array(data: Any):
"""Return `True` if data is a NumPy ndarray or (recursively) if first non-null value in list is a NumPy ndarray.
Args:
data (Any): Data.
Returns:
bool
"""
if isinstance(data, np.ndarray):
return True
elif isinstance(data, list):
return contains_any_np_array(first_non_null_value(data)[1])
else:
return False
def any_np_array_to_pyarrow_listarray(data: Union[np.ndarray, List], type: pa.DataType = None) -> pa.ListArray:
"""Convert to PyArrow ListArray either a NumPy ndarray or (recursively) a list that may contain any NumPy ndarray.
Args:
data (Union[np.ndarray, List]): Data.
type (pa.DataType): Explicit PyArrow DataType passed to coerce the ListArray data type.
Returns:
pa.ListArray
"""
if isinstance(data, np.ndarray):
return numpy_to_pyarrow_listarray(data, type=type)
elif isinstance(data, list):
return list_of_pa_arrays_to_pyarrow_listarray([any_np_array_to_pyarrow_listarray(i, type=type) for i in data])
def to_pyarrow_listarray(data: Any, pa_type: _ArrayXDExtensionType) -> pa.Array:
"""Convert to PyArrow ListArray.
Args:
data (Any): Sequence, iterable, np.ndarray or pd.Series.
pa_type (_ArrayXDExtensionType): Any of the ArrayNDExtensionType.
Returns:
pyarrow.Array
"""
if contains_any_np_array(data):
return any_np_array_to_pyarrow_listarray(data, type=pa_type.value_type)
else:
return pa.array(data, pa_type.storage_dtype)
def _visit(feature: FeatureType, func: Callable[[FeatureType], Optional[FeatureType]]) -> FeatureType:
"""Visit a (possibly nested) feature.
Args:
feature (FeatureType): the feature type to be checked
Returns:
visited feature (FeatureType)
"""
if isinstance(feature, dict):
out = func({k: _visit(f, func) for k, f in feature.items()})
elif isinstance(feature, (list, tuple)):
out = func([_visit(feature[0], func)])
elif isinstance(feature, Sequence):
out = func(Sequence(_visit(feature.feature, func), length=feature.length))
else:
out = func(feature)
return feature if out is None else out
def require_decoding(feature: FeatureType, ignore_decode_attribute: bool = False) -> bool:
"""Check if a (possibly nested) feature requires decoding.
Args:
feature (FeatureType): the feature type to be checked
ignore_decode_attribute (:obj:`bool`, default ``False``): Whether to ignore the current value
of the `decode` attribute of the decodable feature types.
Returns:
:obj:`bool`
"""
if isinstance(feature, dict):
return any(require_decoding(f) for f in feature.values())
elif isinstance(feature, (list, tuple)):
return require_decoding(feature[0])
elif isinstance(feature, Sequence):
return require_decoding(feature.feature)
else:
return hasattr(feature, "decode_example") and (feature.decode if not ignore_decode_attribute else True)
def require_storage_cast(feature: FeatureType) -> bool:
"""Check if a (possibly nested) feature requires storage casting.
Args:
feature (FeatureType): the feature type to be checked
Returns:
:obj:`bool`
"""
if isinstance(feature, dict):
return any(require_storage_cast(f) for f in feature.values())
elif isinstance(feature, (list, tuple)):
return require_storage_cast(feature[0])
elif isinstance(feature, Sequence):
return require_storage_cast(feature.feature)
else:
return hasattr(feature, "cast_storage")
def require_storage_embed(feature: FeatureType) -> bool:
"""Check if a (possibly nested) feature requires embedding data into storage.
Args:
feature (FeatureType): the feature type to be checked
Returns:
:obj:`bool`
"""
if isinstance(feature, dict):
return any(require_storage_cast(f) for f in feature.values())
elif isinstance(feature, (list, tuple)):
return require_storage_cast(feature[0])
elif isinstance(feature, Sequence):
return require_storage_cast(feature.feature)
else:
return hasattr(feature, "embed_storage")
def keep_features_dicts_synced(func):
"""
Wrapper to keep the secondary dictionary, which tracks whether keys are decodable, of the :class:`datasets.Features` object
in sync with the main dictionary.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if args:
self: "Features" = args[0]
args = args[1:]
else:
self: "Features" = kwargs.pop("self")
out = func(self, *args, **kwargs)
assert hasattr(self, "_column_requires_decoding")
self._column_requires_decoding = {col: require_decoding(feature) for col, feature in self.items()}
return out
wrapper._decorator_name_ = "_keep_dicts_synced"
return wrapper
class Features(dict):
"""A special dictionary that defines the internal structure of a dataset.
Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names,
and values are the type of that column.
`FieldType` can be one of the following:
- a [`~datasets.Value`] feature specifies a single typed value, e.g. `int64` or `string`.
- a [`~datasets.ClassLabel`] feature specifies a field with a predefined set of classes which can have labels
associated to them and will be stored as integers in the dataset.
- a python `dict` which specifies that the field is a nested field containing a mapping of sub-fields to sub-fields
features. It's possible to have nested fields of nested fields in an arbitrary manner.
- a python `list` or a [`~datasets.Sequence`] specifies that the field contains a list of objects. The python
`list` or [`~datasets.Sequence`] should be provided with a single sub-feature as an example of the feature
type hosted in this list.
<Tip>
A [`~datasets.Sequence`] with a internal dictionary feature will be automatically converted into a dictionary of
lists. This behavior is implemented to have a compatilbity layer with the TensorFlow Datasets library but may be
un-wanted in some cases. If you don't want this behavior, you can use a python `list` instead of the
[`~datasets.Sequence`].
</Tip>
- a [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays.
- an [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path
to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data.
- an [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object
or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key). This feature extracts the image data.
- [`~datasets.Translation`] and [`~datasets.TranslationVariableLanguages`], the two features specific to Machine Translation.
"""
def __init__(*args, **kwargs):
# self not in the signature to allow passing self as a kwarg
if not args:
raise TypeError("descriptor '__init__' of 'Features' object needs an argument")
self, *args = args
super(Features, self).__init__(*args, **kwargs)
self._column_requires_decoding: Dict[str, bool] = {
col: require_decoding(feature) for col, feature in self.items()
}
__setitem__ = keep_features_dicts_synced(dict.__setitem__)
__delitem__ = keep_features_dicts_synced(dict.__delitem__)
update = keep_features_dicts_synced(dict.update)
setdefault = keep_features_dicts_synced(dict.setdefault)
pop = keep_features_dicts_synced(dict.pop)
popitem = keep_features_dicts_synced(dict.popitem)
clear = keep_features_dicts_synced(dict.clear)
def __reduce__(self):
return Features, (dict(self),)
@property
def type(self):
"""
Features field types.
Returns:
:obj:`pyarrow.DataType`
"""
return get_nested_type(self)
@property
def arrow_schema(self):
"""
Features schema.
Returns:
:obj:`pyarrow.Schema`
"""
hf_metadata = {"info": {"features": self.to_dict()}}
return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)})
@classmethod
def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features":
"""
Construct [`Features`] from Arrow Schema.
It also checks the schema metadata for Hugging Face Datasets features.
Non-nullable fields are not supported and set to nullable.
Args:
pa_schema (`pyarrow.Schema`):
Arrow Schema.
Returns:
[`Features`]
"""
# try to load features from the arrow schema metadata
if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata:
metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode())
if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None:
return Features.from_dict(metadata["info"]["features"])
obj = {field.name: generate_from_arrow_type(field.type) for field in pa_schema}
return cls(**obj)
@classmethod
def from_dict(cls, dic) -> "Features":
"""
Construct [`Features`] from dict.
Regenerate the nested feature object from a deserialized dict.
We use the `_type` key to infer the dataclass name of the feature `FieldType`.
It allows for a convenient constructor syntax
to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to
[`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require
any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive
dtypes that [`Value`] automatically performs.
Args:
dic (`dict[str, Any]`):
Python dictionary.
Returns:
`Features`
Example::
>>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}})
{'_type': Value(dtype='string', id=None)}
"""
obj = generate_from_dict(dic)
return cls(**obj)
def to_dict(self):
return asdict(self)
def _to_yaml_list(self) -> list:
# we compute the YAML list from the dict representation that is used for JSON dump
yaml_data = self.to_dict()
def simplify(feature: dict) -> dict:
if not isinstance(feature, dict):
raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
#
# sequence: -> sequence: int32
# dtype: int32 ->
#
if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["dtype"]:
feature["sequence"] = feature["sequence"]["dtype"]
#
# sequence: -> sequence:
# struct: -> - name: foo
# - name: foo -> dtype: int32
# dtype: int32 ->
#
if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["struct"]:
feature["sequence"] = feature["sequence"]["struct"]
#
# list: -> list: int32
# dtype: int32 ->
#
if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["dtype"]:
feature["list"] = feature["list"]["dtype"]
#
# list: -> list:
# struct: -> - name: foo
# - name: foo -> dtype: int32
# dtype: int32 ->
#
if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["struct"]:
feature["list"] = feature["list"]["struct"]
#
# class_label: -> class_label:
# names: -> names:
# - negative -> '0': negative
# - positive -> '1': positive
#
if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list):
# server-side requirement: keys must be strings
feature["class_label"]["names"] = {
str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"])
}
return feature
def to_yaml_inner(obj: Union[dict, list]) -> dict:
if isinstance(obj, dict):
_type = obj.pop("_type", None)
if _type == "Sequence":
_feature = obj.pop("feature")
return simplify({"sequence": to_yaml_inner(_feature), **obj})
elif _type == "Value":
return obj
elif _type and not obj:
return {"dtype": camelcase_to_snakecase(_type)}
elif _type:
return {"dtype": simplify({camelcase_to_snakecase(_type): obj})}
else:
return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]}
elif isinstance(obj, list):
return simplify({"list": simplify(to_yaml_inner(obj[0]))})
elif isinstance(obj, tuple):
return to_yaml_inner(list(obj))
else:
raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
def to_yaml_types(obj: dict) -> dict:
if isinstance(obj, dict):
return {k: to_yaml_types(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [to_yaml_types(v) for v in obj]
elif isinstance(obj, tuple):
return to_yaml_types(list(obj))
else:
return obj
return to_yaml_types(to_yaml_inner(yaml_data)["struct"])
@classmethod
def _from_yaml_list(cls, yaml_data: list) -> "Features":
yaml_data = copy.deepcopy(yaml_data)
# we convert the list obtained from YAML data into the dict representation that is used for JSON dump
def unsimplify(feature: dict) -> dict:
if not isinstance(feature, dict):
raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
#
# sequence: int32 -> sequence:
# -> dtype: int32
#
if isinstance(feature.get("sequence"), str):
feature["sequence"] = {"dtype": feature["sequence"]}
#
# list: int32 -> list:
# -> dtype: int32
#
if isinstance(feature.get("list"), str):
feature["list"] = {"dtype": feature["list"]}
#
# class_label: -> class_label:
# names: -> names:
# '0': negative -> - negative
# '1': positive -> - positive
#
if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict):
label_ids = sorted(feature["class_label"]["names"], key=int)
if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)):
raise ValueError(
f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing."
)
feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids]
return feature
def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]:
if isinstance(obj, dict):
if not obj:
return {}
_type = next(iter(obj))
if _type == "sequence":
_feature = unsimplify(obj).pop(_type)
return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"}
if _type == "list":
return [from_yaml_inner(unsimplify(obj)[_type])]
if _type == "struct":
return from_yaml_inner(obj["struct"])
elif _type == "dtype":
if isinstance(obj["dtype"], str):
# e.g. int32, float64, string, audio, image
try:
Value(obj["dtype"])
return {**obj, "_type": "Value"}
except ValueError:
# e.g. Audio, Image, ArrayXD
return {"_type": snakecase_to_camelcase(obj["dtype"])}
else:
return from_yaml_inner(obj["dtype"])
else:
return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]}
elif isinstance(obj, list):
names = [_feature.pop("name") for _feature in obj]
return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)}
else:
raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
return cls.from_dict(from_yaml_inner(yaml_data))
def encode_example(self, example):
"""
Encode example into a format for Arrow.
Args:
example (`dict[str, Any]`):
Data in a Dataset row.
Returns:
`dict[str, Any]`
"""
example = cast_to_python_objects(example)
return encode_nested_example(self, example)
def encode_column(self, column, column_name: str):
"""
Encode column into a format for Arrow.
Args:
column (`list[Any]`):
Data in a Dataset column.
column_name (`str`):
Dataset column name.
Returns:
`list[Any]`
"""
column = cast_to_python_objects(column)
return [encode_nested_example(self[column_name], obj) for obj in column]
def encode_batch(self, batch):
"""
Encode batch into a format for Arrow.
Args:
batch (`dict[str, list[Any]]`):
Data in a Dataset batch.
Returns:
`dict[str, list[Any]]`
"""
encoded_batch = {}
if set(batch) != set(self):
raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}")
for key, column in batch.items():
column = cast_to_python_objects(column)
encoded_batch[key] = [encode_nested_example(self[key], obj) for obj in column]
return encoded_batch
def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
"""Decode example with custom feature decoding.
Args:
example (`dict[str, Any]`):
Dataset row data.
token_per_repo_id (`dict`, *optional*):
To access and decode audio or image files from private repositories on the Hub, you can pass
a dictionary `repo_id (str) -> token (bool or str)`.
Returns:
`dict[str, Any]`
"""
return {
column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id)
if self._column_requires_decoding[column_name]
else value
for column_name, (feature, value) in zip_dict(
{key: value for key, value in self.items() if key in example}, example
)
}
def decode_column(self, column: list, column_name: str):
"""Decode column with custom feature decoding.
Args:
column (`list[Any]`):
Dataset column data.
column_name (`str`):
Dataset column name.
Returns:
`list[Any]`
"""
return (
[decode_nested_example(self[column_name], value) if value is not None else None for value in column]
if self._column_requires_decoding[column_name]
else column
)
def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
"""Decode batch with custom feature decoding.
Args:
batch (`dict[str, list[Any]]`):
Dataset batch data.
token_per_repo_id (`dict`, *optional*):
To access and decode audio or image files from private repositories on the Hub, you can pass
a dictionary repo_id (str) -> token (bool or str)
Returns:
`dict[str, list[Any]]`
"""
decoded_batch = {}
for column_name, column in batch.items():
decoded_batch[column_name] = (
[
decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id)
if value is not None
else None
for value in column
]
if self._column_requires_decoding[column_name]
else column
)
return decoded_batch
def copy(self) -> "Features":
"""
Make a deep copy of [`Features`].
Returns:
[`Features`]
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> copy_of_features = ds.features.copy()
>>> copy_of_features
{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
```
"""
return copy.deepcopy(self)
def reorder_fields_as(self, other: "Features") -> "Features":
"""
Reorder Features fields to match the field order of other [`Features`].
The order of the fields is important since it matters for the underlying arrow data.
Re-ordering the fields allows to make the underlying arrow data type match.
Args:
other ([`Features`]):
The other [`Features`] to align with.
Returns:
[`Features`]
Example::
>>> from datasets import Features, Sequence, Value
>>> # let's say we have to features with a different order of nested fields (for a and b for example)
>>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})})
>>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}})
>>> assert f1.type != f2.type
>>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but make the fields order match
>>> f1.reorder_fields_as(f2)
{'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)}
>>> assert f1.reorder_fields_as(f2).type == f2.type
"""
def recursive_reorder(source, target, stack=""):
stack_position = " at " + stack[1:] if stack else ""
if isinstance(target, Sequence):
target = target.feature
if isinstance(target, dict):
target = {k: [v] for k, v in target.items()}
else:
target = [target]
if isinstance(source, Sequence):
source, id_, length = source.feature, source.id, source.length
if isinstance(source, dict):
source = {k: [v] for k, v in source.items()}
reordered = recursive_reorder(source, target, stack)
return Sequence({k: v[0] for k, v in reordered.items()}, id=id_, length=length)
else:
source = [source]
reordered = recursive_reorder(source, target, stack)
return Sequence(reordered[0], id=id_, length=length)
elif isinstance(source, dict):
if not isinstance(target, dict):
raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
if sorted(source) != sorted(target):
message = (
f"Keys mismatch: between {source} (source) and {target} (target).\n"
f"{source.keys()-target.keys()} are missing from target "
f"and {target.keys()-source.keys()} are missing from source" + stack_position
)
raise ValueError(message)
return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target}
elif isinstance(source, list):
if not isinstance(target, list):
raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
if len(source) != len(target):
raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position)
return [recursive_reorder(source[i], target[i], stack + ".<list>") for i in range(len(target))]
else:
return source
return Features(recursive_reorder(self, other))
def flatten(self, max_depth=16) -> "Features":
"""Flatten the features. Every dictionary column is removed and is replaced by
all the subfields it contains. The new fields are named by concatenating the
name of the original column and the subfield name like this: `<original>.<subfield>`.
If a column contains nested dictionaries, then all the lower-level subfields names are
also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc.
Returns:
[`Features`]:
The flattened features.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("squad", split="train")
>>> ds.features.flatten()
{'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None),
'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None),
'context': Value(dtype='string', id=None),
'id': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None),
'title': Value(dtype='string', id=None)}
```
"""
for depth in range(1, max_depth):
no_change = True
flattened = self.copy()
for column_name, subfeature in self.items():
if isinstance(subfeature, dict):
no_change = False
flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()})
del flattened[column_name]
elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict):
no_change = False
flattened.update(
{
f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v]
for k, v in subfeature.feature.items()
}
)
del flattened[column_name]
elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature:
no_change = False
flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()})
del flattened[column_name]
self = flattened
if no_change:
break
return self
def _align_features(features_list: List[Features]) -> List[Features]:
"""Align dictionaries of features so that the keys that are found in multiple dictionaries share the same feature."""
name2feature = {}
for features in features_list:
for k, v in features.items():
if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
name2feature[k] = v
return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list]
def _check_if_features_can_be_aligned(features_list: List[Features]):
"""Check if the dictionaries of features can be aligned.
Two dictonaries of features can be aligned if the keys they share have the same type or some of them is of type `Value("null")`.
"""
name2feature = {}
for features in features_list:
for k, v in features.items():
if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
name2feature[k] = v
for features in features_list:
for k, v in features.items():
if not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v:
raise ValueError(
f'The features can\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").'
)
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/features/translation.py | from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class Translation:
"""`FeatureConnector` for translations with fixed languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to string translations.
Example:
```python
>>> # At construction time:
>>> datasets.features.Translation(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': 'le chat',
... 'de': 'die katze'
... }
```
"""
languages: List[str]
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="Translation", init=False, repr=False)
def __call__(self):
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the Translation feature into a dictionary."""
from .features import Value
return {k: Value("string") for k in sorted(self.languages)}
@dataclass
class TranslationVariableLanguages:
"""`FeatureConnector` for translations with variable languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to one or more string translations.
The languages present may vary from example to example.
Returns:
- `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`):
Language codes sorted in ascending order or plain text translations, sorted to align with language codes.
Example:
```python
>>> # At construction time:
>>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': ['le chat', 'la chatte,']
... 'de': 'die katze'
... }
>>> # Tensor returned :
>>> {
... 'language': ['en', 'de', 'fr', 'fr'],
... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'],
... }
```
"""
languages: Optional[List] = None
num_languages: Optional[int] = None
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="TranslationVariableLanguages", init=False, repr=False)
def __post_init__(self):
self.languages = sorted(set(self.languages)) if self.languages else None
self.num_languages = len(self.languages) if self.languages else None
def __call__(self):
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def encode_example(self, translation_dict):
lang_set = set(self.languages)
if set(translation_dict) == {"language", "translation"}:
return translation_dict
elif self.languages and set(translation_dict) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({", ".join(lang_set)}).'
)
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
translation_tuples = []
for lang, text in translation_dict.items():
if isinstance(text, str):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
languages, translations = zip(*sorted(translation_tuples))
return {"language": languages, "translation": translations}
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the TranslationVariableLanguages feature into a dictionary."""
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
}
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/features/image.py | import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.download_config import DownloadConfig
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_IMAGE_COMPRESSION_FORMATS: Optional[List[str]] = None
_NATIVE_BYTEORDER = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_VALID_IMAGE_ARRAY_DTPYES = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class Image:
"""Image [`Feature`] to read image data from an image file.
Input: The Image feature accepts as input:
- A `str`: Absolute path to the image file (i.e. random access is allowed).
- A `dict` with the keys:
- `path`: String with relative path of the image file to the archive file.
- `bytes`: Bytes of the image file.
This is useful for archived files with sequential access.
- An `np.ndarray`: NumPy array representing an image.
- A `PIL.Image.Image`: PIL image object.
Args:
decode (`bool`, defaults to `True`):
Whether to decode the image data. If `False`,
returns the underlying dictionary in the format `{"path": image_path, "bytes": image_bytes}`.
Examples:
```py
>>> from datasets import load_dataset, Image
>>> ds = load_dataset("beans", split="train")
>>> ds.features["image"]
Image(decode=True, id=None)
>>> ds[0]["image"]
<PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x15E52E7F0>
>>> ds = ds.cast_column('image', Image(decode=False))
{'bytes': None,
'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/healthy/healthy_train.85.jpg'}
```
"""
decode: bool = True
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "PIL.Image.Image"
pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
_type: str = field(default="Image", init=False, repr=False)
def __call__(self):
return self.pa_type
def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict:
"""Encode example into a format for Arrow.
Args:
value (`str`, `np.ndarray`, `PIL.Image.Image` or `dict`):
Data passed as input to Image feature.
Returns:
`dict` with "path" and "bytes" fields
"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
if isinstance(value, list):
value = np.array(value)
if isinstance(value, str):
return {"path": value, "bytes": None}
elif isinstance(value, bytes):
return {"path": None, "bytes": value}
elif isinstance(value, np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(value)
elif isinstance(value, PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(value)
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
)
def decode_example(self, value: dict, token_per_repo_id=None) -> "PIL.Image.Image":
"""Decode example image file into image data.
Args:
value (`str` or `dict`):
A string with the absolute image file path, a dictionary with
keys:
- `path`: String with absolute or relative image file path.
- `bytes`: The bytes of the image file.
token_per_repo_id (`dict`, *optional*):
To access and decode
image files from private repositories on the Hub, you can pass
a dictionary repo_id (`str`) -> token (`bool` or `str`).
Returns:
`PIL.Image.Image`
"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.")
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'.")
if token_per_repo_id is None:
token_per_repo_id = {}
path, bytes_ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.")
else:
if is_local_path(path):
image = PIL.Image.open(path)
else:
source_url = path.split("::")[-1]
pattern = (
config.HUB_DATASETS_URL
if source_url.startswith(config.HF_ENDPOINT)
else config.HUB_DATASETS_HFFS_URL
)
try:
repo_id = string_to_dict(source_url, pattern)["repo_id"]
token = token_per_repo_id.get(repo_id)
except ValueError:
token = None
download_config = DownloadConfig(token=token)
with xopen(path, "rb", download_config=download_config) as f:
bytes_ = BytesIO(f.read())
image = PIL.Image.open(bytes_)
else:
image = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary."""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary"),
"path": Value("string"),
}
)
def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
"""Cast an Arrow array to the Image arrow storage type.
The Arrow types that can be converted to the Image pyarrow storage type are:
- `pa.string()` - it must contain the "path" data
- `pa.binary()` - it must contain the image bytes
- `pa.struct({"bytes": pa.binary()})`
- `pa.struct({"path": pa.string()})`
- `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
- `pa.list(*)` - it must contain the image array data
Args:
storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`):
PyArrow array to cast.
Returns:
`pa.StructArray`: Array in the Image arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
"""
if pa.types.is_string(storage.type):
bytes_array = pa.array([None] * len(storage), type=pa.binary())
storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_binary(storage.type):
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
bytes_array = storage.field("bytes")
else:
bytes_array = pa.array([None] * len(storage), type=pa.binary())
if storage.type.get_field_index("path") >= 0:
path_array = storage.field("path")
else:
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_list(storage.type):
bytes_array = pa.array(
[encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()],
type=pa.binary(),
)
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()
)
return array_cast(storage, self.pa_type)
def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
"""Embed image files into the Arrow array.
Args:
storage (`pa.StructArray`):
PyArrow array to embed.
Returns:
`pa.StructArray`: Array in the Image arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
"""
@no_op_if_value_is_null
def path_to_bytes(path):
with xopen(path, "rb") as f:
bytes_ = f.read()
return bytes_
bytes_array = pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],
type=pa.binary(),
)
path_array = pa.array(
[os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
type=pa.string(),
)
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
return array_cast(storage, self.pa_type)
def list_image_compression_formats() -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_IMAGE_COMPRESSION_FORMATS = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys()))
return _IMAGE_COMPRESSION_FORMATS
def image_to_bytes(image: "PIL.Image.Image") -> bytes:
"""Convert a PIL Image object to bytes using native compression if possible, otherwise use PNG/TIFF compression."""
buffer = BytesIO()
if image.format in list_image_compression_formats():
format = image.format
else:
format = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(buffer, format=format)
return buffer.getvalue()
def encode_pil_image(image: "PIL.Image.Image") -> dict:
if hasattr(image, "filename") and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(image)}
def encode_np_array(array: np.ndarray) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
dtype = array.dtype
dtype_byteorder = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
dtype_kind = dtype.kind
dtype_itemsize = dtype.itemsize
dest_dtype = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays."
)
dest_dtype = np.dtype("|u1")
if dtype != dest_dtype:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
dest_dtype = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
dtype_str = dtype_byteorder + dtype_kind + str(dtype_itemsize)
if np.dtype(dtype_str) in _VALID_IMAGE_ARRAY_DTPYES:
dest_dtype = np.dtype(dtype_str)
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"Cannot downcast dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}"
)
image = PIL.Image.fromarray(array.astype(dest_dtype))
return {"path": None, "bytes": image_to_bytes(image)}
def objects_to_list_of_image_dicts(
objs: Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]],
) -> List[dict]:
"""Encode a list of objects into a format suitable for creating an extension array of type `ImageExtensionType`."""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
if objs:
_, obj = first_non_null_value(objs)
if isinstance(obj, str):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(obj, np.ndarray):
obj_to_image_dict_func = no_op_if_value_is_null(encode_np_array)
return [obj_to_image_dict_func(obj) for obj in objs]
elif isinstance(obj, PIL.Image.Image):
obj_to_image_dict_func = no_op_if_value_is_null(encode_pil_image)
return [obj_to_image_dict_func(obj) for obj in objs]
else:
return objs
else:
return objs
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/features/audio.py | import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.download_config import DownloadConfig
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class Audio:
"""Audio [`Feature`] to extract audio data from an audio file.
Input: The Audio feature accepts as input:
- A `str`: Absolute path to the audio file (i.e. random access is allowed).
- A `dict` with the keys:
- `path`: String with relative path of the audio file to the archive file.
- `bytes`: Bytes content of the audio file.
This is useful for archived files with sequential access.
- A `dict` with the keys:
- `path`: String with relative path of the audio file to the archive file.
- `array`: Array containing the audio sample
- `sampling_rate`: Integer corresponding to the sampling rate of the audio sample.
This is useful for archived files with sequential access.
Args:
sampling_rate (`int`, *optional*):
Target sampling rate. If `None`, the native sampling rate is used.
mono (`bool`, defaults to `True`):
Whether to convert the audio signal to mono by averaging samples across
channels.
decode (`bool`, defaults to `True`):
Whether to decode the audio data. If `False`,
returns the underlying dictionary in the format `{"path": audio_path, "bytes": audio_bytes}`.
Example:
```py
>>> from datasets import load_dataset, Audio
>>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train")
>>> ds = ds.cast_column("audio", Audio(sampling_rate=16000))
>>> ds[0]["audio"]
{'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ...,
3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32),
'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
'sampling_rate': 16000}
```
"""
sampling_rate: Optional[int] = None
mono: bool = True
decode: bool = True
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
_type: str = field(default="Audio", init=False, repr=False)
def __call__(self):
return self.pa_type
def encode_example(self, value: Union[str, bytes, dict]) -> dict:
"""Encode example into a format for Arrow.
Args:
value (`str` or `dict`):
Data passed as input to Audio feature.
Returns:
`dict`
"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'.") from err
if isinstance(value, str):
return {"bytes": None, "path": value}
elif isinstance(value, bytes):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
buffer = BytesIO()
sf.write(buffer, value["array"], value["sampling_rate"], format="wav")
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm"):
# "PCM" only has raw audio bytes
if value.get("sampling_rate") is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object")
if value.get("bytes"):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
bytes_value = np.frombuffer(value["bytes"], dtype=np.int16).astype(np.float32) / 32767
else:
bytes_value = np.memmap(value["path"], dtype="h", mode="r").astype(np.float32) / 32767
buffer = BytesIO(bytes())
sf.write(buffer, bytes_value, value["sampling_rate"], format="wav")
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
)
def decode_example(
self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None
) -> dict:
"""Decode example audio file into audio data.
Args:
value (`dict`):
A dictionary with keys:
- `path`: String with relative audio file path.
- `bytes`: Bytes of the audio file.
token_per_repo_id (`dict`, *optional*):
To access and decode
audio files from private repositories on the Hub, you can pass
a dictionary repo_id (`str`) -> token (`bool` or `str`)
Returns:
`dict`
"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.")
path, file = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err
audio_format = xsplitext(path)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
)
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
)
if file is None:
token_per_repo_id = token_per_repo_id or {}
source_url = path.split("::")[-1]
pattern = (
config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL
)
try:
repo_id = string_to_dict(source_url, pattern)["repo_id"]
token = token_per_repo_id[repo_id]
except (ValueError, KeyError):
token = None
download_config = DownloadConfig(token=token)
with xopen(path, "rb", download_config=download_config) as f:
array, sampling_rate = sf.read(f)
else:
array, sampling_rate = sf.read(file)
array = array.T
if self.mono:
array = librosa.to_mono(array)
if self.sampling_rate and self.sampling_rate != sampling_rate:
array = librosa.resample(array, orig_sr=sampling_rate, target_sr=self.sampling_rate)
sampling_rate = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""If in the decodable state, raise an error, otherwise flatten the feature into a dictionary."""
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature.")
return {
"bytes": Value("binary"),
"path": Value("string"),
}
def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
"""Cast an Arrow array to the Audio arrow storage type.
The Arrow types that can be converted to the Audio pyarrow storage type are:
- `pa.string()` - it must contain the "path" data
- `pa.binary()` - it must contain the audio bytes
- `pa.struct({"bytes": pa.binary()})`
- `pa.struct({"path": pa.string()})`
- `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
Args:
storage (`Union[pa.StringArray, pa.StructArray]`):
PyArrow array to cast.
Returns:
`pa.StructArray`: Array in the Audio arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`
"""
if pa.types.is_string(storage.type):
bytes_array = pa.array([None] * len(storage), type=pa.binary())
storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_binary(storage.type):
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"):
storage = pa.array([Audio().encode_example(x) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
bytes_array = storage.field("bytes")
else:
bytes_array = pa.array([None] * len(storage), type=pa.binary())
if storage.type.get_field_index("path") >= 0:
path_array = storage.field("path")
else:
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
return array_cast(storage, self.pa_type)
def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
"""Embed audio files into the Arrow array.
Args:
storage (`pa.StructArray`):
PyArrow array to embed.
Returns:
`pa.StructArray`: Array in the Audio arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
"""
@no_op_if_value_is_null
def path_to_bytes(path):
with xopen(path, "rb") as f:
bytes_ = f.read()
return bytes_
bytes_array = pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],
type=pa.binary(),
)
path_array = pa.array(
[os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
type=pa.string(),
)
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
return array_cast(storage, self.pa_type)
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/features/__init__.py | # flake8: noqa
__all__ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/readme.py | # loading package files: https://stackoverflow.com/a/20885799
import importlib.resources as pkg_resources
import logging
from pathlib import Path
from typing import Any, List, Tuple
import yaml
from . import resources
from .deprecation_utils import deprecated
BASE_REF_URL = "https://github.com/huggingface/datasets/tree/main/src/datasets/utils"
this_url = f"{BASE_REF_URL}/{__file__}"
logger = logging.getLogger(__name__)
def load_yaml_resource(resource: str) -> Tuple[Any, str]:
content = pkg_resources.read_text(resources, resource)
return yaml.safe_load(content), f"{BASE_REF_URL}/resources/{resource}"
readme_structure, known_readme_structure_url = load_yaml_resource("readme_structure.yaml")
FILLER_TEXT = [
"[Needs More Information]",
"[More Information Needed]",
"(https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)",
]
# Dictionary representation of section/readme, error_list, warning_list
ReadmeValidatorOutput = Tuple[dict, List[str], List[str]]
class Section:
def __init__(self, name: str, level: str, lines: List[str] = None, suppress_parsing_errors: bool = False):
self.name = name
self.level = level
self.lines = lines
self.text = ""
self.is_empty_text = True
self.content = {}
self.parsing_error_list = []
self.parsing_warning_list = []
if self.lines is not None:
self.parse(suppress_parsing_errors=suppress_parsing_errors)
def parse(self, suppress_parsing_errors: bool = False):
current_sub_level = ""
current_lines = []
code_start = False
for line in self.lines:
if line.strip(" \n") == "":
continue
elif line.strip(" \n")[:3] == "```":
code_start = not code_start
elif line.split()[0] == self.level + "#" and not code_start:
if current_sub_level != "":
self.content[current_sub_level] = Section(current_sub_level, self.level + "#", current_lines)
current_lines = []
else:
if current_lines != []:
self.text += "".join(current_lines).strip()
if self.text != "" and self.text not in FILLER_TEXT:
self.is_empty_text = False
current_lines = []
current_sub_level = " ".join(line.split()[1:]).strip(" \n")
else:
current_lines.append(line)
else:
if current_sub_level != "":
if current_sub_level in self.content:
self.parsing_error_list.append(
f"Multiple sections with the same heading `{current_sub_level}` have been found. Please keep only one of these sections."
)
self.content[current_sub_level] = Section(current_sub_level, self.level + "#", current_lines)
else:
if current_lines != []:
self.text += "".join(current_lines).strip()
if self.text != "" and self.text not in FILLER_TEXT:
self.is_empty_text = False
if self.level == "" and not suppress_parsing_errors:
if self.parsing_error_list != [] or self.parsing_warning_list != []:
errors = errors = "\n".join("-\t" + x for x in self.parsing_error_list + self.parsing_warning_list)
error_string = f"The following issues were found while parsing the README at `{self.name}`:\n" + errors
raise ValueError(error_string)
def validate(self, structure: dict) -> ReadmeValidatorOutput:
"""Validates a Section class object recursively using the structure provided as a dictionary.
Args:
structute (:obj: `dict`): The dictionary representing expected structure.
Returns:
:obj: `ReadmeValidatorOutput`: The dictionary representation of the section, and the errors.
"""
# Header text validation
error_list = []
warning_list = []
if structure["allow_empty"] is False:
# If content is expected
if self.is_empty_text and self.content == {}:
# If no content is found, mention it in the error_list
error_list.append(f"Expected some content in section `{self.name}` but it is empty.")
if structure["allow_empty_text"] is False:
# If some text is expected
if self.is_empty_text:
# If no text is found, mention it in the error_list
error_list.append(
f"Expected some text in section `{self.name}` but it is empty (text in subsections are ignored)."
)
# Subsections Validation
if structure["subsections"] is not None:
# If subsections are expected
if self.content == {}:
# If no subsections are present
values = [subsection["name"] for subsection in structure["subsections"]]
# Mention the expected values in the error_list
error_list.append(
f"Section `{self.name}` expected the following subsections: {', '.join(['`'+x+'`' for x in values])}. Found 'None'."
)
else:
# If some subsections are present
structure_names = [subsection["name"] for subsection in structure["subsections"]]
has_missing_subsections = False
for idx, name in enumerate(structure_names):
if name not in self.content:
# If the expected subsection is not present
error_list.append(f"Section `{self.name}` is missing subsection: `{name}`.")
has_missing_subsections = True
else:
# If the subsection is present, validate subsection, return the result
# and concat the errors from subsection to section error_list
# Skip sublevel validation if current level is `###`
if self.level == "###":
continue
else:
_, subsec_error_list, subsec_warning_list = self.content[name].validate(
structure["subsections"][idx]
)
error_list += subsec_error_list
warning_list += subsec_warning_list
if has_missing_subsections: # we only allow to have extra subsections if all the other ones are here
for name in self.content:
if name not in structure_names:
# If an extra subsection is present
warning_list.append(
f"`{self.name}` has an extra subsection: `{name}`. Skipping further validation checks for this subsection as expected structure is unknown."
)
if error_list:
# If there are errors, do not return the dictionary as it is invalid
return {}, error_list, warning_list
else:
return self.to_dict(), error_list, warning_list
def to_dict(self) -> dict:
"""Returns the dictionary representation of a section."""
return {
"name": self.name,
"text": self.text,
"is_empty_text": self.is_empty_text,
"subsections": [value.to_dict() for value in self.content.values()],
}
@deprecated("Use `huggingface_hub.DatasetCard` instead.")
class ReadMe(Section): # Level 0
def __init__(self, name: str, lines: List[str], structure: dict = None, suppress_parsing_errors: bool = False):
super().__init__(name=name, level="") # Not using lines here as we need to use a child class parse
self.structure = structure
self.yaml_tags_line_count = -2
self.tag_count = 0
self.lines = lines
if self.lines is not None:
self.parse(suppress_parsing_errors=suppress_parsing_errors)
def validate(self):
if self.structure is None:
content, error_list, warning_list = self._validate(readme_structure)
else:
content, error_list, warning_list = self._validate(self.structure)
if error_list != [] or warning_list != []:
errors = "\n".join(["-\t" + x for x in error_list + warning_list])
error_string = f"The following issues were found for the README at `{self.name}`:\n" + errors
raise ValueError(error_string)
@classmethod
def from_readme(cls, path: Path, structure: dict = None, suppress_parsing_errors: bool = False):
with open(path, encoding="utf-8") as f:
lines = f.readlines()
return cls(path, lines, structure, suppress_parsing_errors=suppress_parsing_errors)
@classmethod
def from_string(
cls, string: str, structure: dict = None, root_name: str = "root", suppress_parsing_errors: bool = False
):
lines = string.split("\n")
return cls(root_name, lines, structure, suppress_parsing_errors=suppress_parsing_errors)
def parse(self, suppress_parsing_errors: bool = False):
# Skip Tags
line_count = 0
for line in self.lines:
self.yaml_tags_line_count += 1
if line.strip(" \n") == "---":
self.tag_count += 1
if self.tag_count == 2:
break
line_count += 1
if self.tag_count == 2:
self.lines = self.lines[line_count + 1 :] # Get the last + 1 th item.
else:
self.lines = self.lines[self.tag_count :]
super().parse(suppress_parsing_errors=suppress_parsing_errors)
def __str__(self):
"""Returns the string of dictionary representation of the ReadMe."""
return str(self.to_dict())
def _validate(self, readme_structure):
error_list = []
warning_list = []
if self.yaml_tags_line_count == 0:
warning_list.append("Empty YAML markers are present in the README.")
elif self.tag_count == 0:
warning_list.append("No YAML markers are present in the README.")
elif self.tag_count == 1:
warning_list.append("Only the start of YAML tags present in the README.")
# Check how many first level sections are present.
num_first_level_keys = len(self.content.keys())
if num_first_level_keys > 1:
# If more than one, add to the error list, continue
error_list.append(
f"The README has several first-level headings: {', '.join(['`'+x+'`' for x in list(self.content.keys())])}. Only one heading is expected. Skipping further validation for this README."
)
elif num_first_level_keys < 1:
# If less than one, append error.
error_list.append(
"The README has no first-level headings. One heading is expected. Skipping further validation for this README."
)
else:
# If one exactly
start_key = list(self.content.keys())[0] # Get the key
if start_key.startswith("Dataset Card for"): # Check correct start
# If the starting is correct, validate all the sections
_, sec_error_list, sec_warning_list = self.content[start_key].validate(
readme_structure["subsections"][0]
)
error_list += sec_error_list
warning_list += sec_warning_list
else:
# If not found, append error
error_list.append(
"No first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
)
if error_list:
# If there are errors, do not return the dictionary as it is invalid
return {}, error_list, warning_list
else:
return self.to_dict(), error_list, warning_list
if __name__ == "__main__":
from argparse import ArgumentParser
ap = ArgumentParser(usage="Validate the content (excluding YAML tags) of a README.md file.")
ap.add_argument("readme_filepath")
args = ap.parse_args()
readme_filepath = Path(args.readme_filepath)
readme = ReadMe.from_readme(readme_filepath)
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/tf_utils.py | # Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF-specific utils import."""
import os
import warnings
from functools import partial
from math import ceil
from uuid import uuid4
import numpy as np
import pyarrow as pa
from multiprocess import get_context
try:
from multiprocess.shared_memory import SharedMemory
except ImportError:
SharedMemory = None # Version checks should prevent this being called on older Python versions
from .. import config
def minimal_tf_collate_fn(features):
if isinstance(features, dict): # case batch_size=None: nothing to collate
return features
elif config.TF_AVAILABLE:
import tensorflow as tf
else:
raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
first = features[0]
batch = {}
for k, v in first.items():
if isinstance(v, np.ndarray):
batch[k] = np.stack([f[k] for f in features])
elif isinstance(v, tf.Tensor):
batch[k] = tf.stack([f[k] for f in features])
else:
batch[k] = np.array([f[k] for f in features])
return batch
def minimal_tf_collate_fn_with_renaming(features):
batch = minimal_tf_collate_fn(features)
if "label" in batch:
batch["labels"] = batch["label"]
del batch["label"]
return batch
def is_numeric_pa_type(pa_type):
if pa.types.is_list(pa_type):
return is_numeric_pa_type(pa_type.value_type)
return pa.types.is_integer(pa_type) or pa.types.is_floating(pa_type) or pa.types.is_decimal(pa_type)
def is_numeric_feature(feature):
from .. import ClassLabel, Sequence, Value
from ..features.features import _ArrayXD
if isinstance(feature, Sequence):
return is_numeric_feature(feature.feature)
elif isinstance(feature, list):
return is_numeric_feature(feature[0])
elif isinstance(feature, _ArrayXD):
return is_numeric_pa_type(feature().storage_dtype)
elif isinstance(feature, Value):
return is_numeric_pa_type(feature())
elif isinstance(feature, ClassLabel):
return True
else:
return False
def np_get_batch(
indices, dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, return_dict=False
):
if not isinstance(indices, np.ndarray):
indices = indices.numpy()
is_batched = True
# Optimization - if we're loading a sequential batch, do it with slicing instead of a list of indices
if isinstance(indices, np.integer):
batch = dataset[indices.item()]
is_batched = False
elif np.all(np.diff(indices) == 1):
batch = dataset[indices[0] : indices[-1] + 1]
elif isinstance(indices, np.ndarray):
batch = dataset[indices]
else:
raise RuntimeError("Unexpected type for indices: {}".format(type(indices)))
if cols_to_retain is not None:
batch = {
key: value
for key, value in batch.items()
if key in cols_to_retain or key in ("label", "label_ids", "labels")
}
if is_batched:
actual_size = len(list(batch.values())[0]) # Get the length of one of the arrays, assume all same
# Our collators expect a list of dicts, not a dict of lists/arrays, so we invert
batch = [{key: value[i] for key, value in batch.items()} for i in range(actual_size)]
batch = collate_fn(batch, **collate_fn_args)
if return_dict:
out_batch = {}
for col, cast_dtype in columns_to_np_types.items():
# In case the collate_fn returns something strange
array = np.array(batch[col])
array = array.astype(cast_dtype)
out_batch[col] = array
else:
out_batch = []
for col, cast_dtype in columns_to_np_types.items():
# In case the collate_fn returns something strange
array = np.array(batch[col])
array = array.astype(cast_dtype)
out_batch.append(array)
return out_batch
def dataset_to_tf(
dataset,
cols_to_retain,
collate_fn,
collate_fn_args,
columns_to_np_types,
output_signature,
shuffle,
batch_size,
drop_remainder,
):
"""Create a tf.data.Dataset from the underlying Dataset. This is a single-process method - the multiprocess
equivalent is multiprocess_dataset_to_tf.
Args:
dataset (`Dataset`): Dataset to wrap with tf.data.Dataset.
cols_to_retain (`List[str]`): Dataset column(s) to load in the
tf.data.Dataset. It is acceptable to include column names that are created by the `collate_fn` and
that do not exist in the original dataset.
collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate
lists of samples into a batch.
collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the
`collate_fn`. Can be empty.
columns_to_np_types (`Dict[str, np.dtype]`): A `dict` mapping column names to numpy dtypes.
output_signature (`Dict[str, tf.TensorSpec]`): A `dict` mapping column names to
`tf.TensorSpec` objects.
shuffle(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for
validation/evaluation.
batch_size (`int`, default `None`): Size of batches to load from the dataset. Defaults to `None`, which implies that
the dataset won't be batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`.
drop_remainder(`bool`, default `None`): Drop the last incomplete batch when loading. If not provided,
defaults to the same setting as shuffle.
Returns:
`tf.data.Dataset`
"""
if config.TF_AVAILABLE:
import tensorflow as tf
else:
raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
# TODO Matt: When our minimum Python version is 3.8 or higher, we can delete all of this and move everything
# to the NumPy multiprocessing path.
if hasattr(tf, "random_index_shuffle"):
random_index_shuffle = tf.random_index_shuffle
elif hasattr(tf.random.experimental, "index_shuffle"):
random_index_shuffle = tf.random.experimental.index_shuffle
else:
if len(dataset) > 10_000_000:
warnings.warn(
"to_tf_dataset() can be memory-inefficient on versions of TensorFlow older than 2.9. "
"If you are iterating over a dataset with a very large number of samples, consider "
"upgrading to TF >= 2.9."
)
random_index_shuffle = None
getter_fn = partial(
np_get_batch,
dataset=dataset,
cols_to_retain=cols_to_retain,
collate_fn=collate_fn,
collate_fn_args=collate_fn_args,
columns_to_np_types=columns_to_np_types,
return_dict=False,
)
# This works because dictionaries always output in the same order
tout = [tf.dtypes.as_dtype(dtype) for dtype in columns_to_np_types.values()]
@tf.function(input_signature=[tf.TensorSpec(None, tf.int64)])
def fetch_function(indices):
output = tf.py_function(
getter_fn,
inp=[indices],
Tout=tout,
)
return {key: output[i] for i, key in enumerate(columns_to_np_types.keys())}
tf_dataset = tf.data.Dataset.range(len(dataset))
if shuffle and random_index_shuffle is not None:
base_seed = tf.fill((3,), value=tf.cast(-1, dtype=tf.int64))
def scan_random_index(state, index):
if tf.reduce_all(state == -1):
# This generates a new random seed once per epoch only,
# to ensure that we iterate over each sample exactly once per epoch
state = tf.random.uniform(shape=(3,), maxval=2**62, dtype=tf.int64)
shuffled_index = random_index_shuffle(index=index, seed=state, max_index=len(dataset) - 1)
return state, shuffled_index
tf_dataset = tf_dataset.scan(base_seed, scan_random_index)
elif shuffle:
tf_dataset = tf_dataset.shuffle(tf_dataset.cardinality())
if batch_size is not None:
tf_dataset = tf_dataset.batch(batch_size, drop_remainder=drop_remainder)
tf_dataset = tf_dataset.map(fetch_function)
if batch_size is not None:
def ensure_shapes(input_dict):
return {key: tf.ensure_shape(val, output_signature[key].shape) for key, val in input_dict.items()}
else:
# Ensure shape but remove batch dimension of output_signature[key].shape
def ensure_shapes(input_dict):
return {key: tf.ensure_shape(val, output_signature[key].shape[1:]) for key, val in input_dict.items()}
return tf_dataset.map(ensure_shapes)
class SharedMemoryContext:
# This is a context manager for creating shared memory that ensures cleanup happens even if a process is interrupted
# The process that creates shared memory is always the one responsible for unlinking it in the end
def __init__(self):
self.created_shms = []
self.opened_shms = []
def get_shm(self, name, size, create):
shm = SharedMemory(size=int(size), name=name, create=create)
if create:
# We only unlink the ones we created in this context
self.created_shms.append(shm)
else:
# If we didn't create it, we only close it when done, we don't unlink it
self.opened_shms.append(shm)
return shm
def get_array(self, name, shape, dtype, create):
shm = self.get_shm(name=name, size=np.prod(shape) * np.dtype(dtype).itemsize, create=create)
return np.ndarray(shape, dtype=dtype, buffer=shm.buf)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
for shm in self.created_shms:
shm.close()
shm.unlink()
for shm in self.opened_shms:
shm.close()
class NumpyMultiprocessingGenerator:
def __init__(
self,
dataset,
cols_to_retain,
collate_fn,
collate_fn_args,
columns_to_np_types,
output_signature,
shuffle,
batch_size,
drop_remainder,
num_workers,
):
self.dataset = dataset
self.cols_to_retain = cols_to_retain
self.collate_fn = collate_fn
self.collate_fn_args = collate_fn_args
self.string_columns = [col for col, dtype in columns_to_np_types.items() if dtype in (np.unicode_, np.str_)]
# Strings will be converted to arrays of single unicode chars, so that we can have a constant itemsize
self.columns_to_np_types = {
col: dtype if col not in self.string_columns else np.dtype("U1")
for col, dtype in columns_to_np_types.items()
}
self.output_signature = output_signature
self.shuffle = shuffle
self.batch_size = batch_size
self.drop_remainder = drop_remainder
self.num_workers = num_workers
# Because strings are converted to characters, we need to add one extra dimension to the shape
self.columns_to_ranks = {
col: int(spec.shape.rank) if col not in self.string_columns else int(spec.shape.rank) + 1
for col, spec in output_signature.items()
}
def __iter__(self):
# Make sure we only spawn workers if they have work to do
num_workers = min(self.num_workers, int(ceil(len(self.dataset) / self.batch_size)))
# Do the shuffling in iter so that it's done at the start of each epoch
per_worker_batches, final_batch, final_batch_worker = self.distribute_batches(
self.dataset, self.batch_size, self.drop_remainder, num_workers, self.shuffle
)
ctx = get_context("spawn")
names = []
shape_arrays = []
workers = []
array_ready_events = [ctx.Event() for _ in range(num_workers)]
array_loaded_events = [ctx.Event() for _ in range(num_workers)]
base_args = {
"dataset": self.dataset,
"cols_to_retain": self.cols_to_retain,
"collate_fn": self.collate_fn,
"collate_fn_args": self.collate_fn_args,
"columns_to_np_types": self.columns_to_np_types,
"columns_to_ranks": self.columns_to_ranks,
"string_columns": self.string_columns,
}
with SharedMemoryContext() as shm_ctx:
for i in range(num_workers):
worker_random_id = str(uuid4())
worker_name = f"dw_{i}_{worker_random_id}"[:10]
names.append(worker_name)
worker_shape_arrays = {
col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=True)
for col, rank in self.columns_to_ranks.items()
}
shape_arrays.append(worker_shape_arrays)
worker_indices = per_worker_batches[i]
if i == final_batch_worker and final_batch is not None:
final_batch_arg = final_batch
else:
final_batch_arg = None
worker_kwargs = {
"worker_name": worker_name,
"indices": worker_indices,
"extra_batch": final_batch_arg,
"array_ready_event": array_ready_events[i],
"array_loaded_event": array_loaded_events[i],
**base_args,
}
worker = ctx.Process(target=self.worker_loop, kwargs=worker_kwargs, daemon=True)
worker.start()
workers.append(worker)
end_signal_received = False
while not end_signal_received:
for i in range(num_workers):
if not array_ready_events[i].wait(timeout=60):
raise TimeoutError("Data loading worker timed out!")
array_ready_events[i].clear()
array_shapes = shape_arrays[i]
if any(np.any(shape < 0) for shape in array_shapes.values()):
# Child processes send negative array shapes to indicate
# that no more data is going to be sent
end_signal_received = True
break
# Matt: Because array shapes are variable we recreate the shared memory each iteration.
# I suspect repeatedly opening lots of shared memory is the bottleneck for the parent process.
# A future optimization, at the cost of some code complexity, could be to reuse shared memory
# between iterations, but this would require knowing in advance the maximum size, or having
# a system to only create a new memory block when a new maximum size is seen.
# Another potential optimization would be to figure out which memory copies are necessary,
# or whether we can yield objects straight out of shared memory.
with SharedMemoryContext() as batch_shm_ctx:
# This memory context only lasts long enough to copy everything out of the batch
arrays = {
col: batch_shm_ctx.get_array(
f"{names[i]}_{col}",
shape=shape,
dtype=self.columns_to_np_types[col],
create=False,
)
for col, shape in array_shapes.items()
}
# Copy everything out of shm because the memory
# will be unlinked by the child process at some point
arrays = {col: np.copy(arr) for col, arr in arrays.items()}
# Now we convert any unicode char arrays to strings
for string_col in self.string_columns:
arrays[string_col] = (
arrays[string_col].view(f"U{arrays[string_col].shape[-1]}").squeeze(-1)
)
yield arrays
array_loaded_events[i].set()
# Now we just do some cleanup
# Shared memory is cleaned up by the context manager, so we just make sure workers finish
for worker in workers:
worker.join()
def __call__(self):
return self
@staticmethod
def worker_loop(
dataset,
cols_to_retain,
collate_fn,
collate_fn_args,
columns_to_np_types,
columns_to_ranks,
string_columns,
indices,
extra_batch,
worker_name,
array_ready_event,
array_loaded_event,
):
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
if config.TF_AVAILABLE:
import tensorflow as tf
else:
raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
tf.config.set_visible_devices([], "GPU") # Make sure workers don't try to allocate GPU memory
def send_batch_to_parent(indices):
batch = np_get_batch(
indices=indices,
dataset=dataset,
cols_to_retain=cols_to_retain,
collate_fn=collate_fn,
collate_fn_args=collate_fn_args,
columns_to_np_types=columns_to_np_types,
return_dict=True,
)
# Now begins the fun part where we start shovelling shared memory at the parent process
out_arrays = {}
with SharedMemoryContext() as batch_shm_ctx:
# The batch shared memory context exists only as long as it takes for the parent process
# to read everything, after which it cleans everything up again
for col, cast_dtype in columns_to_np_types.items():
# Everything has to be np.array for this to work, even if the collate_fn is giving us tf.Tensor
array = batch[col]
if col in string_columns:
# We can't send unicode arrays over shared memory, so we convert to single chars ("U1")
# which have a fixed width of 4 bytes. The parent process will convert these back to strings.
array = array.view("U1").reshape(array.shape + (-1,))
shape_arrays[col][:] = array.shape
out_arrays[col] = batch_shm_ctx.get_array(
f"{worker_name}_{col}", shape=array.shape, dtype=cast_dtype, create=True
)
out_arrays[col][:] = array
array_ready_event.set()
array_loaded_event.wait()
array_loaded_event.clear()
with SharedMemoryContext() as shm_ctx:
shape_arrays = {
col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=False)
for col, rank in columns_to_ranks.items()
}
for batch in indices:
send_batch_to_parent(batch)
if extra_batch is not None:
send_batch_to_parent(extra_batch)
# Now we send a batsignal to the parent process that we're done
for col, array in shape_arrays.items():
array[:] = -1
array_ready_event.set()
@staticmethod
def distribute_batches(dataset, batch_size, drop_remainder, num_workers, shuffle):
indices = np.arange(len(dataset))
if shuffle:
np.random.shuffle(indices)
num_samples = len(indices)
# We distribute the batches so that reading from the workers in round-robin order yields the exact
# order specified in indices. This is only important when shuffle is False, but we do it regardless.
incomplete_batch_cutoff = num_samples - (num_samples % batch_size)
indices, last_incomplete_batch = np.split(indices, [incomplete_batch_cutoff])
if drop_remainder or len(last_incomplete_batch) == 0:
last_incomplete_batch = None
indices = indices.reshape(-1, batch_size)
num_batches = len(indices)
final_batches_cutoff = num_batches - (num_batches % num_workers)
indices, final_batches = np.split(indices, [final_batches_cutoff])
indices = indices.reshape(-1, num_workers, batch_size)
per_worker_indices = np.split(indices, indices.shape[1], axis=1)
per_worker_indices = [np.squeeze(worker_indices, 1) for worker_indices in per_worker_indices]
# Distribute the final batches to the first workers
for i in range(len(final_batches)):
# len(final_batches) can be zero, and is always less than num_workers
per_worker_indices[i] = np.concatenate([per_worker_indices[i], final_batches[i].reshape(1, -1)], axis=0)
# Add the last incomplete batch to the next worker, which might be the first worker
if last_incomplete_batch is not None:
incomplete_batch_worker_idx = len(final_batches)
else:
incomplete_batch_worker_idx = None
return per_worker_indices, last_incomplete_batch, incomplete_batch_worker_idx
def multiprocess_dataset_to_tf(
dataset,
cols_to_retain,
collate_fn,
collate_fn_args,
columns_to_np_types,
output_signature,
shuffle,
batch_size,
drop_remainder,
num_workers,
):
"""Create a tf.data.Dataset from the underlying Dataset. This is a multi-process method - the single-process
equivalent is dataset_to_tf.
Args:
dataset (`Dataset`): Dataset to wrap with tf.data.Dataset.
cols_to_retain (`List[str]`): Dataset column(s) to load in the
tf.data.Dataset. It is acceptable to include column names that are created by the `collate_fn` and
that do not exist in the original dataset.
collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate
lists of samples into a batch.
collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the
`collate_fn`. Can be empty.
columns_to_np_types (`Dict[str, np.dtype]`): A `dict` mapping column names to numpy dtypes.
output_signature (`Dict[str, tf.TensorSpec]`): A `dict` mapping column names to
`tf.TensorSpec` objects.
shuffle(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for
validation/evaluation.
batch_size (`int`, default `None`): Size of batches to load from the dataset. Defaults to `None`, which implies that
the dataset won't be batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`.
drop_remainder(`bool`, default `None`): Drop the last incomplete batch when loading. If not provided,
defaults to the same setting as shuffle.
num_workers (`int`): Number of workers to use for loading the dataset. Should be >= 1.
Returns:
`tf.data.Dataset`
"""
if config.TF_AVAILABLE:
import tensorflow as tf
else:
raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
data_generator = NumpyMultiprocessingGenerator(
dataset=dataset,
cols_to_retain=cols_to_retain,
collate_fn=collate_fn,
collate_fn_args=collate_fn_args,
columns_to_np_types=columns_to_np_types,
output_signature=output_signature,
shuffle=shuffle,
batch_size=batch_size,
drop_remainder=drop_remainder,
num_workers=num_workers,
)
tf_dataset = tf.data.Dataset.from_generator(data_generator, output_signature=output_signature)
if drop_remainder:
dataset_length = int(len(dataset) // batch_size)
else:
dataset_length = int(ceil(len(dataset) / batch_size))
return tf_dataset.apply(tf.data.experimental.assert_cardinality(dataset_length))
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/_filelock.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
"""Utilities to handle file locking in `datasets`."""
import os
from filelock import FileLock as FileLock_
from filelock import UnixFileLock
class FileLock(FileLock_):
"""
A `filelock.FileLock` initializer that handles long paths.
"""
MAX_FILENAME_LENGTH = 255
def __init__(self, lock_file, *args, **kwargs):
lock_file = self.hash_filename_if_too_long(lock_file)
super().__init__(lock_file, *args, **kwargs)
@classmethod
def hash_filename_if_too_long(cls, path: str) -> str:
filename = os.path.basename(path)
max_filename_length = cls.MAX_FILENAME_LENGTH
if issubclass(cls, UnixFileLock):
max_filename_length = min(max_filename_length, os.statvfs(os.path.dirname(path)).f_namemax)
if len(filename) > max_filename_length:
dirname = os.path.dirname(path)
hashed_filename = str(hash(filename))
new_filename = (
filename[: max_filename_length - len(hashed_filename) - 8] + "..." + hashed_filename + ".lock"
)
return os.path.join(dirname, new_filename)
else:
return path
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/doc_utils.py | from typing import Callable
def is_documented_by(function_with_docstring: Callable):
"""Decorator to share docstrings across common functions.
Args:
function_with_docstring (`Callable`): Name of the function with the docstring.
"""
def wrapper(target_function):
target_function.__doc__ = function_with_docstring.__doc__
return target_function
return wrapper
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/stratify.py | import numpy as np
def approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Args
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
(inds,) = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = rng.choice(inds, size=add_now, replace=False)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int64)
def stratified_shuffle_split_generate_indices(y, n_train, n_test, rng, n_splits=10):
"""
Provides train/test indices to split data in train/test sets.
It's reference is taken from StratifiedShuffleSplit implementation
of scikit-learn library.
Args
----------
n_train : int,
represents the absolute number of train samples.
n_test : int,
represents the absolute number of test samples.
random_state : int or RandomState instance, default=None
Controls the randomness of the training and testing indices produced.
Pass an int for reproducible output across multiple function calls.
n_splits : int, default=10
Number of re-shuffling & splitting iterations.
"""
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = np.bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("Minimum class count error")
if n_train < n_classes:
raise ValueError(
"The train_size = %d should be greater or " "equal to the number of classes = %d" % (n_train, n_classes)
)
if n_test < n_classes:
raise ValueError(
"The test_size = %d should be greater or " "equal to the number of classes = %d" % (n_test, n_classes)
)
class_indices = np.split(np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1])
for _ in range(n_splits):
n_i = approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
for i in range(n_classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = class_indices[i].take(permutation, mode="clip")
train.extend(perm_indices_class_i[: n_i[i]])
test.extend(perm_indices_class_i[n_i[i] : n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/download_manager.py | # deprecated, please use datasets.download.download_manager
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/beam_utils.py | import os
from apache_beam.io.filesystems import FileSystems
from apache_beam.pipeline import Pipeline
from .logging import get_logger
CHUNK_SIZE = 2 << 20 # 2mb
logger = get_logger(__name__)
class BeamPipeline(Pipeline):
"""Wrapper over `apache_beam.pipeline.Pipeline` for convenience"""
def is_local(self):
runner = self._options.get_all_options().get("runner")
return runner in [None, "DirectRunner", "PortableRunner"]
def upload_local_to_remote(local_file_path, remote_file_path, force_upload=False):
"""Use the Beam Filesystems to upload to a remote directory on gcs/s3/hdfs..."""
fs = FileSystems
if fs.exists(remote_file_path):
if force_upload:
logger.info(f"Remote path already exist: {remote_file_path}. Overwriting it as force_upload=True.")
else:
logger.info(f"Remote path already exist: {remote_file_path}. Skipping it as force_upload=False.")
return
with fs.create(remote_file_path) as remote_file:
with open(local_file_path, "rb") as local_file:
chunk = local_file.read(CHUNK_SIZE)
while chunk:
remote_file.write(chunk)
chunk = local_file.read(CHUNK_SIZE)
def download_remote_to_local(remote_file_path, local_file_path, force_download=False):
"""Use the Beam Filesystems to download from a remote directory on gcs/s3/hdfs..."""
fs = FileSystems
if os.path.exists(local_file_path):
if force_download:
logger.info(f"Local path already exist: {remote_file_path}. Overwriting it as force_upload=True.")
else:
logger.info(f"Local path already exist: {remote_file_path}. Skipping it as force_upload=False.")
return
with fs.open(remote_file_path) as remote_file:
with open(local_file_path, "wb") as local_file:
chunk = remote_file.read(CHUNK_SIZE)
while chunk:
local_file.write(chunk)
chunk = remote_file.read(CHUNK_SIZE)
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/deprecation_utils.py | import enum
import inspect
import warnings
from functools import wraps
from typing import Callable, Optional
from .logging import get_logger
_emitted_deprecation_warnings = set()
logger = get_logger(__name__)
def deprecated(help_message: Optional[str] = None):
"""Decorator to mark a class or a function as deprecated.
Args:
help_message (:obj:`str`, optional): An optional message to guide the user on how to
switch to non-deprecated usage of the library.
"""
def decorator(deprecated_class_or_function: Callable):
global _emitted_deprecation_warnings
if inspect.isclass(deprecated_class_or_function):
deprecated_function = deprecated_class_or_function.__init__
name = deprecated_class_or_function.__name__
else:
deprecated_function = deprecated_class_or_function
name = deprecated_function.__name__
# Support deprecating __init__ class method: class name instead
name = name if name != "__init__" else deprecated_function.__qualname__.split(".")[-2]
warning_msg = (
f"{name} is deprecated and will be removed in the next major version of datasets." + f" {help_message}"
if help_message
else ""
)
@wraps(deprecated_function)
def wrapper(*args, **kwargs):
func_hash = hash(deprecated_function)
if func_hash not in _emitted_deprecation_warnings:
warnings.warn(warning_msg, category=FutureWarning, stacklevel=2)
_emitted_deprecation_warnings.add(func_hash)
return deprecated_function(*args, **kwargs)
wrapper._decorator_name_ = "deprecated"
if inspect.isclass(deprecated_class_or_function):
deprecated_class_or_function.__init__ = wrapper
return deprecated_class_or_function
else:
return wrapper
return decorator
class OnAccess(enum.EnumMeta):
"""
Enum metaclass that calls a user-specified function whenever a member is accessed.
"""
def __getattribute__(cls, name):
obj = super().__getattribute__(name)
if isinstance(obj, enum.Enum) and obj._on_access:
obj._on_access()
return obj
def __getitem__(cls, name):
member = super().__getitem__(name)
if member._on_access:
member._on_access()
return member
def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):
obj = super().__call__(value, names, module=module, qualname=qualname, type=type, start=start)
if isinstance(obj, enum.Enum) and obj._on_access:
obj._on_access()
return obj
class DeprecatedEnum(enum.Enum, metaclass=OnAccess):
"""
Enum class that calls `deprecate` method whenever a member is accessed.
"""
def __new__(cls, value):
member = object.__new__(cls)
member._value_ = value
member._on_access = member.deprecate
return member
@property
def help_message(self):
return ""
def deprecate(self):
help_message = f" {self.help_message}" if self.help_message else ""
warnings.warn(
f"'{self.__objclass__.__name__}' is deprecated and will be removed in the next major version of datasets."
+ help_message,
FutureWarning,
stacklevel=3,
)
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/typing.py | import os
from typing import Dict, List, Tuple, TypeVar, Union
T = TypeVar("T")
ListLike = Union[List[T], Tuple[T, ...]]
NestedDataStructureLike = Union[T, List[T], Dict[str, T]]
PathLike = Union[str, bytes, os.PathLike]
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/extract.py | import bz2
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from ._filelock import FileLock
from .logging import get_logger
logger = get_logger(__name__)
class ExtractManager:
def __init__(self, cache_dir: Optional[str] = None):
self.extract_dir = (
os.path.join(cache_dir, config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
self.extractor = Extractor
def _get_output_path(self, path: str) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
abs_path = os.path.abspath(path)
return os.path.join(self.extract_dir, hash_url_to_filename(abs_path))
def _do_extract(self, output_path: str, force_extract: bool) -> bool:
return force_extract or (
not os.path.isfile(output_path) and not (os.path.isdir(output_path) and os.listdir(output_path))
)
def extract(self, input_path: str, force_extract: bool = False) -> str:
extractor_format = self.extractor.infer_extractor_format(input_path)
if not extractor_format:
return input_path
output_path = self._get_output_path(input_path)
if self._do_extract(output_path, force_extract):
self.extractor.extract(input_path, output_path, extractor_format)
return output_path
class BaseExtractor(ABC):
@classmethod
@abstractmethod
def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool:
...
@staticmethod
@abstractmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
...
class MagicNumberBaseExtractor(BaseExtractor, ABC):
magic_numbers: List[bytes] = []
@staticmethod
def read_magic_number(path: Union[Path, str], magic_number_length: int):
with open(path, "rb") as f:
return f.read(magic_number_length)
@classmethod
def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool:
if not magic_number:
magic_number_length = max(len(cls_magic_number) for cls_magic_number in cls.magic_numbers)
try:
magic_number = cls.read_magic_number(path, magic_number_length)
except OSError:
return False
return any(magic_number.startswith(cls_magic_number) for cls_magic_number in cls.magic_numbers)
class TarExtractor(BaseExtractor):
@classmethod
def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool:
return tarfile.is_tarfile(path)
@staticmethod
def safemembers(members, output_path):
"""
Fix for CVE-2007-4559
Desc:
Directory traversal vulnerability in the (1) extract and (2) extractall functions in the tarfile
module in Python allows user-assisted remote attackers to overwrite arbitrary files via a .. (dot dot)
sequence in filenames in a TAR archive, a related issue to CVE-2001-1267.
See: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2007-4559
From: https://stackoverflow.com/a/10077309
"""
def resolved(path: str) -> str:
return os.path.realpath(os.path.abspath(path))
def badpath(path: str, base: str) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(base, path)).startswith(base)
def badlink(info, base: str) -> bool:
# Links are interpreted relative to the directory containing the link
tip = resolved(os.path.join(base, os.path.dirname(info.name)))
return badpath(info.linkname, base=tip)
base = resolved(output_path)
for finfo in members:
if badpath(finfo.name, base):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)")
elif finfo.issym() and badlink(finfo, base):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}")
elif finfo.islnk() and badlink(finfo, base):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}")
else:
yield finfo
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
os.makedirs(output_path, exist_ok=True)
tar_file = tarfile.open(input_path)
tar_file.extractall(output_path, members=TarExtractor.safemembers(tar_file, output_path))
tar_file.close()
class GzipExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x1F\x8B"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
with gzip.open(input_path, "rb") as gzip_file:
with open(output_path, "wb") as extracted_file:
shutil.copyfileobj(gzip_file, extracted_file)
class ZipExtractor(MagicNumberBaseExtractor):
magic_numbers = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool:
if super().is_extractable(path, magic_number=magic_number):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(path, "rb") as fp:
endrec = _EndRecData(fp)
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
data = fp.read(sizeCentralDir) # CD is where we expect it to be
if len(data) == sizeCentralDir:
centdir = struct.unpack(structCentralDir, data) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
os.makedirs(output_path, exist_ok=True)
with zipfile.ZipFile(input_path, "r") as zip_file:
zip_file.extractall(output_path)
zip_file.close()
class XzExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
with lzma.open(input_path) as compressed_file:
with open(output_path, "wb") as extracted_file:
shutil.copyfileobj(compressed_file, extracted_file)
class RarExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile")
import rarfile
os.makedirs(output_path, exist_ok=True)
rf = rarfile.RarFile(input_path)
rf.extractall(output_path)
rf.close()
class ZstdExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard")
import zstandard as zstd
dctx = zstd.ZstdDecompressor()
with open(input_path, "rb") as ifh, open(output_path, "wb") as ofh:
dctx.copy_stream(ifh, ofh)
class Bzip2Extractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x42\x5A\x68"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
with bz2.open(input_path, "rb") as compressed_file:
with open(output_path, "wb") as extracted_file:
shutil.copyfileobj(compressed_file, extracted_file)
class SevenZipExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr")
import py7zr
os.makedirs(output_path, exist_ok=True)
with py7zr.SevenZipFile(input_path, "r") as archive:
archive.extractall(output_path)
class Lz4Extractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x04\x22\x4D\x18"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4")
import lz4.frame
with lz4.frame.open(input_path, "rb") as compressed_file:
with open(output_path, "wb") as extracted_file:
shutil.copyfileobj(compressed_file, extracted_file)
class Extractor:
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
extractors: Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": Bzip2Extractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": Lz4Extractor, # <Added version="2.4.0"/>
}
@classmethod
def _get_magic_number_max_length(cls):
return max(
len(extractor_magic_number)
for extractor in cls.extractors.values()
if issubclass(extractor, MagicNumberBaseExtractor)
for extractor_magic_number in extractor.magic_numbers
)
@staticmethod
def _read_magic_number(path: Union[Path, str], magic_number_length: int):
try:
return MagicNumberBaseExtractor.read_magic_number(path, magic_number_length=magic_number_length)
except OSError:
return b""
@classmethod
def is_extractable(cls, path: Union[Path, str], return_extractor: bool = False) -> bool:
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead.",
category=FutureWarning,
)
extractor_format = cls.infer_extractor_format(path)
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def infer_extractor_format(cls, path: Union[Path, str]) -> str: # <Added version="2.4.0"/>
magic_number_max_length = cls._get_magic_number_max_length()
magic_number = cls._read_magic_number(path, magic_number_max_length)
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(path, magic_number=magic_number):
return extractor_format
@classmethod
def extract(
cls,
input_path: Union[Path, str],
output_path: Union[Path, str],
extractor_format: Optional[str] = None, # <Added version="2.4.0"/>
extractor: Optional[BaseExtractor] = "deprecated",
) -> None:
os.makedirs(os.path.dirname(output_path), exist_ok=True)
# Prevent parallel extractions
lock_path = str(Path(output_path).with_suffix(".lock"))
with FileLock(lock_path):
shutil.rmtree(output_path, ignore_errors=True)
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(extractor_format, str): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead.",
category=FutureWarning,
)
extractor = extractor if extractor != "deprecated" else extractor_format
else:
extractor = cls.extractors[extractor_format]
return extractor.extract(input_path, output_path)
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0.",
category=FutureWarning,
)
for extractor in cls.extractors.values():
if extractor.is_extractable(input_path):
return extractor.extract(input_path, output_path)
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/py_utils.py | # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Some python utils function and classes.
"""
import copy
import functools
import itertools
import multiprocessing.pool
import os
import queue
import re
import types
import warnings
from contextlib import contextmanager
from dataclasses import fields, is_dataclass
from multiprocessing import Manager
from queue import Empty
from shutil import disk_usage
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, TypeVar, Union
from urllib.parse import urlparse
import multiprocess
import multiprocess.pool
import numpy as np
from tqdm.auto import tqdm
from .. import config
from ..parallel import parallel_map
from . import logging
from . import tqdm as hf_tqdm
from ._dill import ( # noqa: F401 # imported for backward compatibility. TODO: remove in 3.0.0
Pickler,
dump,
dumps,
pklregister,
)
try: # pragma: no branch
import typing_extensions as _typing_extensions
from typing_extensions import Final, Literal
except ImportError:
_typing_extensions = Literal = Final = None
logger = logging.get_logger(__name__)
# NOTE: When used on an instance method, the cache is shared across all
# instances and IS NOT per-instance.
# See
# https://stackoverflow.com/questions/14946264/python-lru-cache-decorator-per-instance
# For @property methods, use @memoized_property below.
memoize = functools.lru_cache
def size_str(size_in_bytes):
"""Returns a human readable size string.
If size_in_bytes is None, then returns "Unknown size".
For example `size_str(1.5 * datasets.units.GiB) == "1.50 GiB"`.
Args:
size_in_bytes: `int` or `None`, the size, in bytes, that we want to
format as a human-readable size string.
"""
if not size_in_bytes:
return "Unknown size"
_NAME_LIST = [("PiB", 2**50), ("TiB", 2**40), ("GiB", 2**30), ("MiB", 2**20), ("KiB", 2**10)]
size_in_bytes = float(size_in_bytes)
for name, size_bytes in _NAME_LIST:
value = size_in_bytes / size_bytes
if value >= 1.0:
return f"{value:.2f} {name}"
return f"{int(size_in_bytes)} bytes"
def convert_file_size_to_int(size: Union[int, str]) -> int:
"""
Converts a size expressed as a string with digits an unit (like `"50MB"`) to an integer (in bytes).
Args:
size (`int` or `str`): The size to convert. Will be directly returned if an `int`.
Example:
```py
>>> convert_file_size_to_int("1MiB")
1048576
```
"""
if isinstance(size, int):
return size
if size.upper().endswith("PIB"):
return int(size[:-3]) * (2**50)
if size.upper().endswith("TIB"):
return int(size[:-3]) * (2**40)
if size.upper().endswith("GIB"):
return int(size[:-3]) * (2**30)
if size.upper().endswith("MIB"):
return int(size[:-3]) * (2**20)
if size.upper().endswith("KIB"):
return int(size[:-3]) * (2**10)
if size.upper().endswith("PB"):
int_size = int(size[:-2]) * (10**15)
return int_size // 8 if size.endswith("b") else int_size
if size.upper().endswith("TB"):
int_size = int(size[:-2]) * (10**12)
return int_size // 8 if size.endswith("b") else int_size
if size.upper().endswith("GB"):
int_size = int(size[:-2]) * (10**9)
return int_size // 8 if size.endswith("b") else int_size
if size.upper().endswith("MB"):
int_size = int(size[:-2]) * (10**6)
return int_size // 8 if size.endswith("b") else int_size
if size.upper().endswith("KB"):
int_size = int(size[:-2]) * (10**3)
return int_size // 8 if size.endswith("b") else int_size
raise ValueError(f"`size={size}` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.")
def glob_pattern_to_regex(pattern):
# partially taken from fsspec:
# https://github.com/fsspec/filesystem_spec/blob/697d0f8133d8a5fbc3926e4761d7ecd51337ce50/fsspec/asyn.py#L735
return (
pattern.replace("\\", r"\\")
.replace(".", r"\.")
.replace("*", ".*")
.replace("+", r"\+")
.replace("//", "/")
.replace("(", r"\(")
.replace(")", r"\)")
.replace("|", r"\|")
.replace("^", r"\^")
.replace("$", r"\$")
.rstrip("/")
.replace("?", ".")
)
def string_to_dict(string: str, pattern: str) -> Dict[str, str]:
"""Un-format a string using a python f-string pattern.
From https://stackoverflow.com/a/36838374
Example::
>>> p = 'hello, my name is {name} and I am a {age} year old {what}'
>>> s = p.format(name='cody', age=18, what='quarterback')
>>> s
'hello, my name is cody and I am a 18 year old quarterback'
>>> string_to_dict(s, p)
{'age': '18', 'name': 'cody', 'what': 'quarterback'}
Args:
string (str): input string
pattern (str): pattern formatted like a python f-string
Returns:
Dict[str, str]: dictionary of variable -> value, retrieved from the input using the pattern
Raises:
ValueError: if the string doesn't match the pattern
"""
regex = re.sub(r"{(.+?)}", r"(?P<_\1>.+)", pattern)
result = re.search(regex, string)
if result is None:
raise ValueError(f"String {string} doesn't match the pattern {pattern}")
values = list(result.groups())
keys = re.findall(r"{(.+?)}", pattern)
_dict = dict(zip(keys, values))
return _dict
def asdict(obj):
"""Convert an object to its dictionary representation recursively.
<Added version="2.4.0"/>
"""
# Implementation based on https://docs.python.org/3/library/dataclasses.html#dataclasses.asdict
def _is_dataclass_instance(obj):
# https://docs.python.org/3/library/dataclasses.html#dataclasses.is_dataclass
return is_dataclass(obj) and not isinstance(obj, type)
def _asdict_inner(obj):
if _is_dataclass_instance(obj):
result = {}
for f in fields(obj):
value = _asdict_inner(getattr(obj, f.name))
if not f.init or value != f.default or f.metadata.get("include_in_asdict_even_if_is_default", False):
result[f.name] = value
return result
elif isinstance(obj, tuple) and hasattr(obj, "_fields"):
# obj is a namedtuple
return type(obj)(*[_asdict_inner(v) for v in obj])
elif isinstance(obj, (list, tuple)):
# Assume we can create an object of this type by passing in a
# generator (which is not true for namedtuples, handled
# above).
return type(obj)(_asdict_inner(v) for v in obj)
elif isinstance(obj, dict):
return {_asdict_inner(k): _asdict_inner(v) for k, v in obj.items()}
else:
return copy.deepcopy(obj)
if not isinstance(obj, dict) and not _is_dataclass_instance(obj):
raise TypeError(f"{obj} is not a dict or a dataclass")
return _asdict_inner(obj)
@contextmanager
def temporary_assignment(obj, attr, value):
"""Temporarily assign obj.attr to value."""
original = getattr(obj, attr, None)
setattr(obj, attr, value)
try:
yield
finally:
setattr(obj, attr, original)
@contextmanager
def temp_seed(seed: int, set_pytorch=False, set_tensorflow=False):
"""Temporarily set the random seed. This works for python numpy, pytorch and tensorflow."""
np_state = np.random.get_state()
np.random.seed(seed)
if set_pytorch and config.TORCH_AVAILABLE:
import torch
torch_state = torch.random.get_rng_state()
torch.random.manual_seed(seed)
if torch.cuda.is_available():
torch_cuda_states = torch.cuda.get_rng_state_all()
torch.cuda.manual_seed_all(seed)
if set_tensorflow and config.TF_AVAILABLE:
import tensorflow as tf
from tensorflow.python.eager import context as tfpycontext
tf_state = tf.random.get_global_generator()
temp_gen = tf.random.Generator.from_seed(seed)
tf.random.set_global_generator(temp_gen)
if not tf.executing_eagerly():
raise ValueError("Setting random seed for TensorFlow is only available in eager mode")
tf_context = tfpycontext.context() # eager mode context
tf_seed = tf_context._seed
tf_rng_initialized = hasattr(tf_context, "_rng")
if tf_rng_initialized:
tf_rng = tf_context._rng
tf_context._set_global_seed(seed)
try:
yield
finally:
np.random.set_state(np_state)
if set_pytorch and config.TORCH_AVAILABLE:
torch.random.set_rng_state(torch_state)
if torch.cuda.is_available():
torch.cuda.set_rng_state_all(torch_cuda_states)
if set_tensorflow and config.TF_AVAILABLE:
tf.random.set_global_generator(tf_state)
tf_context._seed = tf_seed
if tf_rng_initialized:
tf_context._rng = tf_rng
else:
delattr(tf_context, "_rng")
def unique_values(values):
"""Iterate over iterable and return only unique values in order."""
seen = set()
for value in values:
if value not in seen:
seen.add(value)
yield value
def no_op_if_value_is_null(func):
"""If the value is None, return None, else call `func`."""
def wrapper(value):
return func(value) if value is not None else None
return wrapper
def first_non_null_value(iterable):
"""Return the index and the value of the first non-null value in the iterable. If all values are None, return -1 as index."""
for i, value in enumerate(iterable):
if value is not None:
return i, value
return -1, None
def zip_dict(*dicts):
"""Iterate over items of dictionaries grouped by their keys."""
for key in unique_values(itertools.chain(*dicts)): # set merge all keys
# Will raise KeyError if the dict don't have the same keys
yield key, tuple(d[key] for d in dicts)
class NonMutableDict(dict):
"""Dict where keys can only be added but not modified.
Will raise an error if the user try to overwrite one key. The error message
can be customized during construction. It will be formatted using {key} for
the overwritten key.
"""
def __init__(self, *args, **kwargs):
self._error_msg = kwargs.pop(
"error_msg",
"Try to overwrite existing key: {key}",
)
if kwargs:
raise ValueError("NonMutableDict cannot be initialized with kwargs.")
super().__init__(*args, **kwargs)
def __setitem__(self, key, value):
if key in self:
raise ValueError(self._error_msg.format(key=key))
return super().__setitem__(key, value)
def update(self, other):
if any(k in self for k in other):
raise ValueError(self._error_msg.format(key=set(self) & set(other)))
return super().update(other)
class classproperty(property): # pylint: disable=invalid-name
"""Descriptor to be used as decorator for @classmethods."""
def __get__(self, obj, objtype=None):
return self.fget.__get__(None, objtype)()
def _single_map_nested(args):
"""Apply a function recursively to each element of a nested data struct."""
function, data_struct, types, rank, disable_tqdm, desc = args
# Singleton first to spare some computation
if not isinstance(data_struct, dict) and not isinstance(data_struct, types):
return function(data_struct)
# Reduce logging to keep things readable in multiprocessing with tqdm
if rank is not None and logging.get_verbosity() < logging.WARNING:
logging.set_verbosity_warning()
# Print at least one thing to fix tqdm in notebooks in multiprocessing
# see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308
if rank is not None and not disable_tqdm and any("notebook" in tqdm_cls.__name__ for tqdm_cls in tqdm.__mro__):
print(" ", end="", flush=True)
# Loop over single examples or batches and write to buffer/file if examples are to be updated
pbar_iterable = data_struct.items() if isinstance(data_struct, dict) else data_struct
pbar_desc = (desc + " " if desc is not None else "") + "#" + str(rank) if rank is not None else desc
with hf_tqdm(pbar_iterable, disable=disable_tqdm, position=rank, unit="obj", desc=pbar_desc) as pbar:
if isinstance(data_struct, dict):
return {k: _single_map_nested((function, v, types, None, True, None)) for k, v in pbar}
else:
mapped = [_single_map_nested((function, v, types, None, True, None)) for v in pbar]
if isinstance(data_struct, list):
return mapped
elif isinstance(data_struct, tuple):
return tuple(mapped)
else:
return np.array(mapped)
def map_nested(
function: Callable[[Any], Any],
data_struct: Any,
dict_only: bool = False,
map_list: bool = True,
map_tuple: bool = False,
map_numpy: bool = False,
num_proc: Optional[int] = None,
parallel_min_length: int = 2,
types: Optional[tuple] = None,
disable_tqdm: bool = True,
desc: Optional[str] = None,
) -> Any:
"""Apply a function recursively to each element of a nested data struct.
Use multiprocessing if num_proc > 1 and the length of data_struct is greater than or equal to
`parallel_min_length`.
<Changed version="2.5.0">
Before version 2.5.0, multiprocessing was not used if `num_proc` was greater than or equal to ``len(iterable)``.
Now, if `num_proc` is greater than or equal to ``len(iterable)``, `num_proc` is set to ``len(iterable)`` and
multiprocessing is used.
</Changed>
Args:
function (`Callable`): Function to be applied to `data_struct`.
data_struct (`Any`): Data structure to apply `function` to.
dict_only (`bool`, default `False`): Whether only apply `function` recursively to `dict` values in
`data_struct`.
map_list (`bool`, default `True`): Whether also apply `function` recursively to `list` elements (besides `dict`
values).
map_tuple (`bool`, default `False`): Whether also apply `function` recursively to `tuple` elements (besides
`dict` values).
map_numpy (`bool, default `False`): Whether also apply `function` recursively to `numpy.array` elements (besides
`dict` values).
num_proc (`int`, *optional*): Number of processes.
parallel_min_length (`int`, default `2`): Minimum length of `data_struct` required for parallel
processing.
<Added version="2.5.0"/>
types (`tuple`, *optional*): Additional types (besides `dict` values) to apply `function` recursively to their
elements.
disable_tqdm (`bool`, default `True`): Whether to disable the tqdm progressbar.
desc (`str`, *optional*): Prefix for the tqdm progressbar.
Returns:
`Any`
"""
if types is None:
types = []
if not dict_only:
if map_list:
types.append(list)
if map_tuple:
types.append(tuple)
if map_numpy:
types.append(np.ndarray)
types = tuple(types)
# Singleton
if not isinstance(data_struct, dict) and not isinstance(data_struct, types):
return function(data_struct)
iterable = list(data_struct.values()) if isinstance(data_struct, dict) else data_struct
if num_proc is None:
num_proc = 1
if any(isinstance(v, types) and len(v) > len(iterable) for v in iterable):
mapped = [
map_nested(
function=function,
data_struct=obj,
num_proc=num_proc,
parallel_min_length=parallel_min_length,
types=types,
)
for obj in iterable
]
elif num_proc != -1 and num_proc <= 1 or len(iterable) < parallel_min_length:
mapped = [
_single_map_nested((function, obj, types, None, True, None))
for obj in hf_tqdm(iterable, disable=disable_tqdm, desc=desc)
]
else:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=".* is experimental and might be subject to breaking changes in the future\\.$",
category=UserWarning,
)
mapped = parallel_map(function, iterable, num_proc, types, disable_tqdm, desc, _single_map_nested)
if isinstance(data_struct, dict):
return dict(zip(data_struct.keys(), mapped))
else:
if isinstance(data_struct, list):
return mapped
elif isinstance(data_struct, tuple):
return tuple(mapped)
else:
return np.array(mapped)
class NestedDataStructure:
def __init__(self, data=None):
self.data = data if data is not None else []
def flatten(self, data=None):
data = data if data is not None else self.data
if isinstance(data, dict):
return self.flatten(list(data.values()))
elif isinstance(data, (list, tuple)):
return [flattened for item in data for flattened in self.flatten(item)]
else:
return [data]
def has_sufficient_disk_space(needed_bytes, directory="."):
try:
free_bytes = disk_usage(os.path.abspath(directory)).free
except OSError:
return True
return needed_bytes < free_bytes
def _convert_github_url(url_path: str) -> Tuple[str, Optional[str]]:
"""Convert a link to a file on a github repo in a link to the raw github object."""
parsed = urlparse(url_path)
sub_directory = None
if parsed.scheme in ("http", "https", "s3") and parsed.netloc == "github.com":
if "blob" in url_path:
if not url_path.endswith(".py"):
raise ValueError(f"External import from github at {url_path} should point to a file ending with '.py'")
url_path = url_path.replace("blob", "raw") # Point to the raw file
else:
# Parse github url to point to zip
github_path = parsed.path[1:]
repo_info, branch = github_path.split("/tree/") if "/tree/" in github_path else (github_path, "master")
repo_owner, repo_name = repo_info.split("/")
url_path = f"https://github.com/{repo_owner}/{repo_name}/archive/{branch}.zip"
sub_directory = f"{repo_name}-{branch}"
return url_path, sub_directory
def get_imports(file_path: str) -> Tuple[str, str, str, str]:
"""Find whether we should import or clone additional files for a given processing script.
And list the import.
We allow:
- library dependencies,
- local dependencies and
- external dependencies whose url is specified with a comment starting from "# From:' followed by the raw url to a file, an archive or a github repository.
external dependencies will be downloaded (and extracted if needed in the dataset folder).
We also add an `__init__.py` to each sub-folder of a downloaded folder so the user can import from them in the script.
Note that only direct import in the dataset processing script will be handled
We don't recursively explore the additional import to download further files.
Example::
import tensorflow
import .c4_utils
import .clicr.dataset-code.build_json_dataset # From: https://raw.githubusercontent.com/clips/clicr/master/dataset-code/build_json_dataset
"""
lines = []
with open(file_path, encoding="utf-8") as f:
lines.extend(f.readlines())
logger.debug(f"Checking {file_path} for additional imports.")
imports: List[Tuple[str, str, str, Optional[str]]] = []
is_in_docstring = False
for line in lines:
docstr_start_match = re.findall(r'[\s\S]*?"""[\s\S]*?', line)
if len(docstr_start_match) == 1:
# flip True <=> False only if doctstring
# starts at line without finishing
is_in_docstring = not is_in_docstring
if is_in_docstring:
# import statements in doctstrings should
# not be added as required dependencies
continue
match = re.match(r"^import\s+(\.?)([^\s\.]+)[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE)
if match is None:
match = re.match(
r"^from\s+(\.?)([^\s\.]+)(?:[^\s]*)\s+import\s+[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)",
line,
flags=re.MULTILINE,
)
if match is None:
continue
if match.group(1):
# The import starts with a '.', we will download the relevant file
if any(imp[1] == match.group(2) for imp in imports):
# We already have this import
continue
if match.group(3):
# The import has a comment with 'From:', we'll retrieve it from the given url
url_path = match.group(3)
url_path, sub_directory = _convert_github_url(url_path)
imports.append(("external", match.group(2), url_path, sub_directory))
elif match.group(2):
# The import should be at the same place as the file
imports.append(("internal", match.group(2), match.group(2), None))
else:
if match.group(3):
# The import has a comment with `From: git+https:...`, asks user to pip install from git.
url_path = match.group(3)
imports.append(("library", match.group(2), url_path, None))
else:
imports.append(("library", match.group(2), match.group(2), None))
return imports
def copyfunc(func):
result = types.FunctionType(func.__code__, func.__globals__, func.__name__, func.__defaults__, func.__closure__)
result.__kwdefaults__ = func.__kwdefaults__
return result
Y = TypeVar("Y")
def _write_generator_to_queue(queue: queue.Queue, func: Callable[..., Iterable[Y]], kwargs: dict) -> int:
for i, result in enumerate(func(**kwargs)):
queue.put(result)
return i
def _get_pool_pid(pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool]) -> Set[int]:
return {f.pid for f in pool._pool}
def iflatmap_unordered(
pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool],
func: Callable[..., Iterable[Y]],
*,
kwargs_iterable: Iterable[dict],
) -> Iterable[Y]:
initial_pool_pid = _get_pool_pid(pool)
pool_changed = False
manager_cls = Manager if isinstance(pool, multiprocessing.pool.Pool) else multiprocess.Manager
with manager_cls() as manager:
queue = manager.Queue()
async_results = [
pool.apply_async(_write_generator_to_queue, (queue, func, kwargs)) for kwargs in kwargs_iterable
]
try:
while True:
try:
yield queue.get(timeout=0.05)
except Empty:
if all(async_result.ready() for async_result in async_results) and queue.empty():
break
if _get_pool_pid(pool) != initial_pool_pid:
pool_changed = True
# One of the subprocesses has died. We should not wait forever.
raise RuntimeError(
"One of the subprocesses has abruptly died during map operation."
"To debug the error, disable multiprocessing."
)
finally:
if not pool_changed:
# we get the result in case there's an error to raise
[async_result.get(timeout=0.05) for async_result in async_results]
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/filelock.py | # deprecated, please use the `filelock` package instead
from filelock import ( # noqa: F401 # imported for backward compatibility TODO: remove in 3.0.0
BaseFileLock,
SoftFileLock,
Timeout,
UnixFileLock,
WindowsFileLock,
)
from ._filelock import FileLock # noqa: F401 # imported for backward compatibility. TODO: remove in 3.0.0
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/track.py | from collections.abc import Iterator
from typing import Iterable
class tracked_str(str):
origins = {}
def set_origin(self, origin: str):
if super().__repr__() not in self.origins:
self.origins[super().__repr__()] = origin
def get_origin(self):
return self.origins.get(super().__repr__(), str(self))
def __repr__(self) -> str:
if super().__repr__() not in self.origins or self.origins[super().__repr__()] == self:
return super().__repr__()
else:
return f"{str(self)} (origin={self.origins[super().__repr__()]})"
class tracked_list(list):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.last_item = None
def __iter__(self) -> Iterator:
for x in super().__iter__():
self.last_item = x
yield x
self.last_item = None
def __repr__(self) -> str:
if self.last_item is None:
return super().__repr__()
else:
return f"{self.__class__.__name__}(current={self.last_item})"
class TrackedIterable(Iterable):
def __init__(self) -> None:
super().__init__()
self.last_item = None
def __repr__(self) -> str:
if self.last_item is None:
super().__repr__()
else:
return f"{self.__class__.__name__}(current={self.last_item})"
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/experimental.py | """Contains utilities to flag a feature as "experimental" in datasets."""
import warnings
from functools import wraps
from typing import Callable
def experimental(fn: Callable) -> Callable:
"""Decorator to flag a feature as experimental.
An experimental feature trigger a warning when used as it might be subject to breaking changes in the future.
Args:
fn (`Callable`):
The function to flag as experimental.
Returns:
`Callable`: The decorated function.
Example:
```python
>>> from datasets.utils import experimental
>>> @experimental
... def my_function():
... print("Hello world!")
>>> my_function()
UserWarning: 'my_function' is experimental and might be subject to breaking changes in the future.
Hello world!
```
"""
@wraps(fn)
def _inner_fn(*args, **kwargs):
warnings.warn(
(f"'{fn.__name__}' is experimental and might be subject to breaking changes in the future."),
UserWarning,
)
return fn(*args, **kwargs)
return _inner_fn
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/tqdm.py | """Utility helpers to handle progress bars in `datasets`.
Example:
1. Use `datasets.utils.tqdm` as you would use `tqdm.tqdm` or `tqdm.auto.tqdm`.
2. To disable progress bars, either use `disable_progress_bars()` helper or set the
environment variable `HF_DATASETS_DISABLE_PROGRESS_BARS` to 1.
3. To re-enable progress bars, use `enable_progress_bars()`.
4. To check whether progress bars are disabled, use `are_progress_bars_disabled()`.
NOTE: Environment variable `HF_DATASETS_DISABLE_PROGRESS_BARS` has the priority.
Example:
```py
from datasets.utils import (
are_progress_bars_disabled,
disable_progress_bars,
enable_progress_bars,
tqdm,
)
# Disable progress bars globally
disable_progress_bars()
# Use as normal `tqdm`
for _ in tqdm(range(5)):
do_something()
# Still not showing progress bars, as `disable=False` is overwritten to `True`.
for _ in tqdm(range(5), disable=False):
do_something()
are_progress_bars_disabled() # True
# Re-enable progress bars globally
enable_progress_bars()
# Progress bar will be shown !
for _ in tqdm(range(5)):
do_something()
```
"""
import warnings
from tqdm.auto import tqdm as old_tqdm
from ..config import HF_DATASETS_DISABLE_PROGRESS_BARS
# `HF_DATASETS_DISABLE_PROGRESS_BARS` is `Optional[bool]` while `_hf_datasets_progress_bars_disabled`
# is a `bool`. If `HF_DATASETS_DISABLE_PROGRESS_BARS` is set to True or False, it has priority.
# If `HF_DATASETS_DISABLE_PROGRESS_BARS` is None, it means the user have not set the
# environment variable and is free to enable/disable progress bars programmatically.
# TL;DR: env variable has priority over code.
#
# By default, progress bars are enabled.
_hf_datasets_progress_bars_disabled: bool = HF_DATASETS_DISABLE_PROGRESS_BARS or False
def disable_progress_bars() -> None:
"""
Disable globally progress bars used in `datasets` except if `HF_DATASETS_DISABLE_PROGRESS_BAR` environment
variable has been set.
Use [`~utils.enable_progress_bars`] to re-enable them.
"""
if HF_DATASETS_DISABLE_PROGRESS_BARS is False:
warnings.warn(
"Cannot disable progress bars: environment variable `HF_DATASETS_DISABLE_PROGRESS_BAR=0` is set and has"
" priority."
)
return
global _hf_datasets_progress_bars_disabled
_hf_datasets_progress_bars_disabled = True
def enable_progress_bars() -> None:
"""
Enable globally progress bars used in `datasets` except if `HF_DATASETS_DISABLE_PROGRESS_BAR` environment
variable has been set.
Use [`~utils.disable_progress_bars`] to disable them.
"""
if HF_DATASETS_DISABLE_PROGRESS_BARS is True:
warnings.warn(
"Cannot enable progress bars: environment variable `HF_DATASETS_DISABLE_PROGRESS_BAR=1` is set and has"
" priority."
)
return
global _hf_datasets_progress_bars_disabled
_hf_datasets_progress_bars_disabled = False
def are_progress_bars_disabled() -> bool:
"""Return whether progress bars are globally disabled or not.
Progress bars used in `datasets` can be enable or disabled globally using [`~utils.enable_progress_bars`]
and [`~utils.disable_progress_bars`] or by setting `HF_DATASETS_DISABLE_PROGRESS_BAR` as environment variable.
"""
global _hf_datasets_progress_bars_disabled
return _hf_datasets_progress_bars_disabled
class tqdm(old_tqdm):
"""
Class to override `disable` argument in case progress bars are globally disabled.
Taken from https://github.com/tqdm/tqdm/issues/619#issuecomment-619639324.
"""
def __init__(self, *args, **kwargs):
if are_progress_bars_disabled():
kwargs["disable"] = True
super().__init__(*args, **kwargs)
def __delattr__(self, attr: str) -> None:
"""Fix for https://github.com/huggingface/datasets/issues/6066"""
try:
super().__delattr__(attr)
except AttributeError:
if attr != "_lock":
raise
# backward compatibility
enable_progress_bar = enable_progress_bars
disable_progress_bar = disable_progress_bars
def is_progress_bar_enabled():
return not are_progress_bars_disabled()
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/metadata.py | import textwrap
from collections import Counter
from itertools import groupby
from operator import itemgetter
from pathlib import Path
from typing import Any, ClassVar, Dict, List, Optional, Tuple, Union
import yaml
from huggingface_hub import DatasetCardData
from ..config import METADATA_CONFIGS_FIELD
from ..info import DatasetInfo, DatasetInfosDict
from ..utils.logging import get_logger
from .deprecation_utils import deprecated
logger = get_logger(__name__)
class _NoDuplicateSafeLoader(yaml.SafeLoader):
def _check_no_duplicates_on_constructed_node(self, node):
keys = [self.constructed_objects[key_node] for key_node, _ in node.value]
keys = [tuple(key) if isinstance(key, list) else key for key in keys]
counter = Counter(keys)
duplicate_keys = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}")
def construct_mapping(self, node, deep=False):
mapping = super().construct_mapping(node, deep=deep)
self._check_no_duplicates_on_constructed_node(node)
return mapping
def _split_yaml_from_readme(readme_content: str) -> Tuple[Optional[str], str]:
full_content = list(readme_content.splitlines())
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
sep_idx = full_content[1:].index("---") + 1
yamlblock = "\n".join(full_content[1:sep_idx])
return yamlblock, "\n".join(full_content[sep_idx + 1 :])
return None, "\n".join(full_content)
@deprecated("Use `huggingface_hub.DatasetCardData` instead.")
class DatasetMetadata(dict):
# class attributes
_FIELDS_WITH_DASHES = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def from_readme(cls, path: Union[Path, str]) -> "DatasetMetadata":
"""Loads and validates the dataset metadata from its dataset card (README.md)
Args:
path (:obj:`Path`): Path to the dataset card (its README.md file)
Returns:
:class:`DatasetMetadata`: The dataset's metadata
Raises:
:obj:`TypeError`: If the dataset's metadata is invalid
"""
with open(path, encoding="utf-8") as readme_file:
yaml_string, _ = _split_yaml_from_readme(readme_file.read())
if yaml_string is not None:
return cls.from_yaml_string(yaml_string)
else:
return cls()
def to_readme(self, path: Path):
if path.exists():
with open(path, encoding="utf-8") as readme_file:
readme_content = readme_file.read()
else:
readme_content = None
updated_readme_content = self._to_readme(readme_content)
with open(path, "w", encoding="utf-8") as readme_file:
readme_file.write(updated_readme_content)
def _to_readme(self, readme_content: Optional[str] = None) -> str:
if readme_content is not None:
_, content = _split_yaml_from_readme(readme_content)
full_content = "---\n" + self.to_yaml_string() + "---\n" + content
else:
full_content = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def from_yaml_string(cls, string: str) -> "DatasetMetadata":
"""Loads and validates the dataset metadata from a YAML string
Args:
string (:obj:`str`): The YAML string
Returns:
:class:`DatasetMetadata`: The dataset's metadata
Raises:
:obj:`TypeError`: If the dataset's metadata is invalid
"""
metadata_dict = yaml.load(string, Loader=_NoDuplicateSafeLoader) or {}
# Convert the YAML keys to DatasetMetadata fields
metadata_dict = {
(key.replace("-", "_") if key.replace("-", "_") in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**metadata_dict)
def to_yaml_string(self) -> str:
return yaml.safe_dump(
{
(key.replace("_", "-") if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},
sort_keys=False,
allow_unicode=True,
encoding="utf-8",
).decode("utf-8")
class MetadataConfigs(Dict[str, Dict[str, Any]]):
"""Should be in format {config_name: {**config_params}}."""
FIELD_NAME: ClassVar[str] = METADATA_CONFIGS_FIELD
@staticmethod
def _raise_if_data_files_field_not_valid(metadata_config: dict):
yaml_data_files = metadata_config.get("data_files")
if yaml_data_files is not None:
yaml_error_message = textwrap.dedent(
f"""
Expected data_files in YAML to be either a string or a list of strings
or a list of dicts with two keys: 'split' and 'path', but got {yaml_data_files}
Examples of data_files in YAML:
data_files: data.csv
data_files: data/*.png
data_files:
- part0/*
- part1/*
data_files:
- split: train
path: train/*
- split: test
path: test/*
data_files:
- split: train
path:
- train/part1/*
- train/part2/*
- split: test
path: test/*
"""
)
if not isinstance(yaml_data_files, (list, str)):
raise ValueError(yaml_error_message)
if isinstance(yaml_data_files, list):
for yaml_data_files_item in yaml_data_files:
if (
not isinstance(yaml_data_files_item, (str, dict))
or isinstance(yaml_data_files_item, dict)
and not (
len(yaml_data_files_item) == 2
and "split" in yaml_data_files_item
and isinstance(yaml_data_files_item.get("path"), (str, list))
)
):
raise ValueError(yaml_error_message)
@classmethod
def _from_exported_parquet_files_and_dataset_infos(
cls,
revision: str,
exported_parquet_files: List[Dict[str, Any]],
dataset_infos: DatasetInfosDict,
) -> "MetadataConfigs":
metadata_configs = {
config_name: {
"data_files": [
{
"split": split_name,
"path": [
parquet_file["url"].replace("refs%2Fconvert%2Fparquet", revision)
for parquet_file in parquet_files_for_split
],
}
for split_name, parquet_files_for_split in groupby(parquet_files_for_config, itemgetter("split"))
],
"version": str(dataset_infos.get(config_name, DatasetInfo()).version or "0.0.0"),
}
for config_name, parquet_files_for_config in groupby(exported_parquet_files, itemgetter("config"))
}
if dataset_infos:
# Preserve order of configs and splits
metadata_configs = {
config_name: {
"data_files": [
data_file
for split_name in dataset_info.splits
for data_file in metadata_configs[config_name]["data_files"]
if data_file["split"] == split_name
],
"version": metadata_configs[config_name]["version"],
}
for config_name, dataset_info in dataset_infos.items()
}
return cls(metadata_configs)
@classmethod
def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "MetadataConfigs":
if dataset_card_data.get(cls.FIELD_NAME):
metadata_configs = dataset_card_data[cls.FIELD_NAME]
if not isinstance(metadata_configs, list):
raise ValueError(f"Expected {cls.FIELD_NAME} to be a list, but got '{metadata_configs}'")
for metadata_config in metadata_configs:
if "config_name" not in metadata_config:
raise ValueError(
f"Each config must include `config_name` field with a string name of a config, "
f"but got {metadata_config}. "
)
cls._raise_if_data_files_field_not_valid(metadata_config)
return cls(
{
config["config_name"]: {param: value for param, value in config.items() if param != "config_name"}
for config in metadata_configs
}
)
return cls()
def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None:
if self:
for metadata_config in self.values():
self._raise_if_data_files_field_not_valid(metadata_config)
current_metadata_configs = self.from_dataset_card_data(dataset_card_data)
total_metadata_configs = dict(sorted({**current_metadata_configs, **self}.items()))
for config_name, config_metadata in total_metadata_configs.items():
config_metadata.pop("config_name", None)
dataset_card_data[self.FIELD_NAME] = [
{"config_name": config_name, **config_metadata}
for config_name, config_metadata in total_metadata_configs.items()
]
def get_default_config_name(self) -> Optional[str]:
default_config_name = None
for config_name, metadata_config in self.items():
if config_name == "default" or metadata_config.get("default"):
if default_config_name is None:
default_config_name = config_name
else:
raise ValueError(
f"Dataset has several default configs: '{default_config_name}' and '{config_name}'."
)
return default_config_name
# DEPRECATED - just here to support old versions of evaluate like 0.2.2
# To support new tasks on the Hugging Face Hub, please open a PR for this file:
# https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/pipelines.ts
known_task_ids = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
ap = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
args = ap.parse_args()
readme_filepath = Path(args.readme_filepath)
dataset_metadata = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/__init__.py | # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
# Lint as: python3
from . import tqdm as _tqdm # _tqdm is the module
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
from .tqdm import (
disable_progress_bars,
enable_progress_bars,
are_progress_bars_disabled,
tqdm,
)
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/logging.py | # Copyright 2020 Optuna, Hugging Face
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Logging utilities. """
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from .tqdm import ( # noqa: F401 # imported for backward compatibility
disable_progress_bar,
enable_progress_bar,
is_progress_bar_enabled,
tqdm,
)
log_levels = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
_default_log_level = logging.WARNING
def _get_default_logging_level():
"""
If DATASETS_VERBOSITY env var is set to one of the valid choices return that as the new default level.
If it is not - fall back to ``_default_log_level``
"""
env_level_str = os.getenv("DATASETS_VERBOSITY", None)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option DATASETS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys()) }"
)
return _default_log_level
def _get_library_name() -> str:
return __name__.split(".")[0]
def _get_library_root_logger() -> logging.Logger:
return logging.getLogger(_get_library_name())
def _configure_library_root_logger() -> None:
# Apply our default configuration to the library root logger.
library_root_logger = _get_library_root_logger()
library_root_logger.addHandler(logging.StreamHandler())
library_root_logger.setLevel(_get_default_logging_level())
def _reset_library_root_logger() -> None:
library_root_logger = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET)
def get_logger(name: Optional[str] = None) -> logging.Logger:
"""Return a logger with the specified name.
This function can be used in dataset scripts.
"""
if name is None:
name = _get_library_name()
return logging.getLogger(name)
def get_verbosity() -> int:
"""Return the current level for the HuggingFace datasets library's root logger.
Returns:
Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`.
<Tip>
HuggingFace datasets library has following logging levels:
- `datasets.logging.CRITICAL`, `datasets.logging.FATAL`
- `datasets.logging.ERROR`
- `datasets.logging.WARNING`, `datasets.logging.WARN`
- `datasets.logging.INFO`
- `datasets.logging.DEBUG`
</Tip>
"""
return _get_library_root_logger().getEffectiveLevel()
def set_verbosity(verbosity: int) -> None:
"""Set the level for the Hugging Face Datasets library's root logger.
Args:
verbosity:
Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`.
"""
_get_library_root_logger().setLevel(verbosity)
def set_verbosity_info():
"""Set the level for the Hugging Face datasets library's root logger to `INFO`.
This will display most of the logging information and tqdm bars.
Shortcut to `datasets.logging.set_verbosity(datasets.logging.INFO)`.
"""
return set_verbosity(INFO)
def set_verbosity_warning():
"""Set the level for the Hugging Face datasets library's root logger to `WARNING`.
This will display only the warning and errors logging information and tqdm bars.
Shortcut to `datasets.logging.set_verbosity(datasets.logging.WARNING)`.
"""
return set_verbosity(WARNING)
def set_verbosity_debug():
"""Set the level for the Hugging Face datasets library's root logger to `DEBUG`.
This will display all the logging information and tqdm bars.
Shortcut to `datasets.logging.set_verbosity(datasets.logging.DEBUG)`.
"""
return set_verbosity(DEBUG)
def set_verbosity_error():
"""Set the level for the Hugging Face datasets library's root logger to `ERROR`.
This will display only the errors logging information and tqdm bars.
Shortcut to `datasets.logging.set_verbosity(datasets.logging.ERROR)`.
"""
return set_verbosity(ERROR)
def disable_propagation() -> None:
"""Disable propagation of the library log outputs.
Note that log propagation is disabled by default.
"""
_get_library_root_logger().propagate = False
def enable_propagation() -> None:
"""Enable propagation of the library log outputs.
Please disable the Hugging Face datasets library's default handler to prevent double logging if the root logger has
been configured.
"""
_get_library_root_logger().propagate = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/hub.py | import time
from functools import partial
from huggingface_hub import HfApi, hf_hub_url
from huggingface_hub.hf_api import RepoFile
from packaging import version
from requests import ConnectionError, HTTPError
from .. import config
from . import logging
logger = logging.get_logger(__name__)
# Retry `preupload_lfs_files` in `huggingface_hub<0.20.0` on the "500 (Internal Server Error)" and "503 (Service Unavailable)" HTTP errors
if config.HF_HUB_VERSION.release < version.parse("0.20.0").release:
def preupload_lfs_files(hf_api: HfApi, **kwargs):
max_retries = 5
base_wait_time = 1
max_wait_time = 8
retry = 0
while True:
try:
hf_api.preupload_lfs_files(**kwargs)
except (RuntimeError, HTTPError, ConnectionError) as err:
if isinstance(err, RuntimeError):
if isinstance(err.__cause__, (HTTPError, ConnectionError)):
err = err.__cause__
else:
raise err
if retry >= max_retries or err.response and err.response.status_code not in [500, 503]:
raise err
else:
sleep_time = min(max_wait_time, base_wait_time * 2**retry) # Exponential backoff
logger.info(
f"{hf_api.preupload_lfs_files} timed out, retrying in {sleep_time}s... [{retry/max_retries}]"
)
time.sleep(sleep_time)
retry += 1
else:
break
else:
def preupload_lfs_files(hf_api: HfApi, **kwargs):
hf_api.preupload_lfs_files(**kwargs)
# `list_files_info` is deprecated in favor of `list_repo_tree` in `huggingface_hub>=0.20.0`
if config.HF_HUB_VERSION.release < version.parse("0.20.0").release:
def list_files_info(hf_api: HfApi, **kwargs):
yield from hf_api.list_files_info(**kwargs)
else:
def list_files_info(hf_api: HfApi, **kwargs):
kwargs = {**kwargs, "recursive": True}
for repo_path in hf_api.list_repo_tree(**kwargs):
if isinstance(repo_path, RepoFile):
yield repo_path
# bakckward compatibility
hf_hub_url = partial(hf_hub_url, repo_type="dataset")
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/sharding.py | from typing import List
import numpy as np
def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int:
"""Return the number of possible shards according to the input gen_kwargs"""
# Having lists of different sizes makes sharding ambigious, raise an error in this case
# until we decide how to define sharding without ambiguity for users
lists_lengths = {key: len(value) for key, value in gen_kwargs.items() if isinstance(value, list)}
if len(set(lists_lengths.values())) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items())
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
)
)
max_length = max(lists_lengths.values(), default=0)
return max(1, max_length)
def _distribute_shards(num_shards: int, max_num_jobs: int) -> List[range]:
"""
Get the range of shard indices per job.
If num_shards<max_num_jobs, then num_shards jobs are given a range of one shard.
The shards indices order is preserved: e.g. all the first shards are given the first job.
Moreover all the jobs are given approximately the same number of shards.
Example:
```python
>>> _distribute_shards(2, max_num_jobs=4)
[range(0, 1), range(1, 2)]
>>> _distribute_shards(10, max_num_jobs=3)
[range(0, 4), range(4, 7), range(7, 10)]
```
"""
shards_indices_per_group = []
for group_idx in range(max_num_jobs):
num_shards_to_add = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
start = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
shard_indices = range(start, start + num_shards_to_add)
shards_indices_per_group.append(shard_indices)
return shards_indices_per_group
def _split_gen_kwargs(gen_kwargs: dict, max_num_jobs: int) -> List[dict]:
"""Split the gen_kwargs into `max_num_job` gen_kwargs"""
# Having lists of different sizes makes sharding ambigious, raise an error in this case
num_shards = _number_of_shards_in_gen_kwargs(gen_kwargs)
if num_shards == 1:
return [dict(gen_kwargs)]
else:
shard_indices_per_group = _distribute_shards(num_shards=num_shards, max_num_jobs=max_num_jobs)
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(value, list)
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(shard_indices_per_group))
]
def _merge_gen_kwargs(gen_kwargs_list: List[dict]) -> dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key], list)
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _shuffle_gen_kwargs(rng: np.random.Generator, gen_kwargs: dict) -> dict:
"""Return a shuffled copy of the input gen_kwargs"""
# We must shuffle all the lists, and lists of the same size must have the same shuffling.
# This way entangled lists of (shard, shard_metadata) are still in the right order.
# First, let's generate the shuffled indices per list size
list_sizes = {len(value) for value in gen_kwargs.values() if isinstance(value, list)}
indices_per_size = {}
for size in list_sizes:
indices_per_size[size] = list(range(size))
rng.shuffle(indices_per_size[size])
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
shuffled_kwargs = dict(gen_kwargs)
for key, value in shuffled_kwargs.items():
if isinstance(value, list):
shuffled_kwargs[key] = [value[i] for i in indices_per_size[len(value)]]
return shuffled_kwargs
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/_dill.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extends `dill` to support pickling more types and produce more consistent dumps."""
import os
import sys
from io import BytesIO
from types import CodeType, FunctionType
import dill
from packaging import version
from .. import config
class Pickler(dill.Pickler):
dispatch = dill._dill.MetaCatchingDict(dill.Pickler.dispatch.copy())
_legacy_no_dict_keys_sorting = False
def save(self, obj, save_persistent_id=True):
obj_type = type(obj)
if obj_type not in self.dispatch:
if "regex" in sys.modules:
import regex # type: ignore
if obj_type is regex.Pattern:
pklregister(obj_type)(_save_regexPattern)
if "spacy" in sys.modules:
import spacy # type: ignore
if issubclass(obj_type, spacy.Language):
pklregister(obj_type)(_save_spacyLanguage)
if "tiktoken" in sys.modules:
import tiktoken # type: ignore
if obj_type is tiktoken.Encoding:
pklregister(obj_type)(_save_tiktokenEncoding)
if "torch" in sys.modules:
import torch # type: ignore
if issubclass(obj_type, torch.Tensor):
pklregister(obj_type)(_save_torchTensor)
if obj_type is torch.Generator:
pklregister(obj_type)(_save_torchGenerator)
# Unwrap `torch.compile`-ed modules
if issubclass(obj_type, torch.nn.Module):
obj = getattr(obj, "_orig_mod", obj)
if "transformers" in sys.modules:
import transformers # type: ignore
if issubclass(obj_type, transformers.PreTrainedTokenizerBase):
pklregister(obj_type)(_save_transformersPreTrainedTokenizerBase)
# Unwrap `torch.compile`-ed functions
if obj_type is FunctionType:
obj = getattr(obj, "_torchdynamo_orig_callable", obj)
dill.Pickler.save(self, obj, save_persistent_id=save_persistent_id)
def _batch_setitems(self, items):
if self._legacy_no_dict_keys_sorting:
return super()._batch_setitems(items)
# Ignore the order of keys in a dict
try:
# Faster, but fails for unorderable elements
items = sorted(items)
except Exception: # TypeError, decimal.InvalidOperation, etc.
from datasets.fingerprint import Hasher
items = sorted(items, key=lambda x: Hasher.hash(x[0]))
dill.Pickler._batch_setitems(self, items)
def memoize(self, obj):
# Don't memoize strings since two identical strings can have different Python ids
if type(obj) is not str: # noqa: E721
dill.Pickler.memoize(self, obj)
def pklregister(t):
"""Register a custom reducer for the type."""
def proxy(func):
Pickler.dispatch[t] = func
return func
return proxy
def dump(obj, file):
"""Pickle an object to a file."""
Pickler(file, recurse=True).dump(obj)
def dumps(obj):
"""Pickle an object to a string."""
file = BytesIO()
dump(obj, file)
return file.getvalue()
if config.DILL_VERSION < version.parse("0.3.6"):
def log(pickler, msg):
dill._dill.log.info(msg)
elif config.DILL_VERSION.release[:3] in [version.parse("0.3.6").release, version.parse("0.3.7").release]:
def log(pickler, msg):
dill._dill.logger.trace(pickler, msg)
@pklregister(set)
def _save_set(pickler, obj):
log(pickler, f"Se: {obj}")
try:
# Faster, but fails for unorderable elements
args = (sorted(obj),)
except Exception: # TypeError, decimal.InvalidOperation, etc.
from datasets.fingerprint import Hasher
args = (sorted(obj, key=Hasher.hash),)
pickler.save_reduce(set, args, obj=obj)
log(pickler, "# Se")
def _save_regexPattern(pickler, obj):
import regex # type: ignore
log(pickler, f"Re: {obj}")
args = (obj.pattern, obj.flags)
pickler.save_reduce(regex.compile, args, obj=obj)
log(pickler, "# Re")
def _save_tiktokenEncoding(pickler, obj):
import tiktoken # type: ignore
log(pickler, f"Enc: {obj}")
args = (obj.name, obj._pat_str, obj._mergeable_ranks, obj._special_tokens)
pickler.save_reduce(tiktoken.Encoding, args, obj=obj)
log(pickler, "# Enc")
def _save_torchTensor(pickler, obj):
import torch # type: ignore
# `torch.from_numpy` is not picklable in `torch>=1.11.0`
def create_torchTensor(np_array):
return torch.from_numpy(np_array)
log(pickler, f"To: {obj}")
args = (obj.detach().cpu().numpy(),)
pickler.save_reduce(create_torchTensor, args, obj=obj)
log(pickler, "# To")
def _save_torchGenerator(pickler, obj):
import torch # type: ignore
def create_torchGenerator(state):
generator = torch.Generator()
generator.set_state(state)
return generator
log(pickler, f"Ge: {obj}")
args = (obj.get_state(),)
pickler.save_reduce(create_torchGenerator, args, obj=obj)
log(pickler, "# Ge")
def _save_spacyLanguage(pickler, obj):
import spacy # type: ignore
def create_spacyLanguage(config, bytes):
lang_cls = spacy.util.get_lang_class(config["nlp"]["lang"])
lang_inst = lang_cls.from_config(config)
return lang_inst.from_bytes(bytes)
log(pickler, f"Sp: {obj}")
args = (obj.config, obj.to_bytes())
pickler.save_reduce(create_spacyLanguage, args, obj=obj)
log(pickler, "# Sp")
def _save_transformersPreTrainedTokenizerBase(pickler, obj):
log(pickler, f"Tok: {obj}")
# Ignore the `cache` attribute
state = obj.__dict__
if "cache" in state and isinstance(state["cache"], dict):
state["cache"] = {}
pickler.save_reduce(type(obj), (), state=state, obj=obj)
log(pickler, "# Tok")
if config.DILL_VERSION < version.parse("0.3.6"):
@pklregister(CodeType)
def _save_code(pickler, obj):
"""
From dill._dill.save_code
This is a modified version that removes the origin (filename + line no.)
of functions created in notebooks or shells for example.
"""
dill._dill.log.info(f"Co: {obj}")
# The filename of a function is the .py file where it is defined.
# Filenames of functions created in notebooks or shells start with '<'
# ex: <ipython-input-13-9ed2afe61d25> for ipython, and <stdin> for shell
# Filenames of functions created in ipykernel the filename
# look like f"{tempdir}/ipykernel_{id1}/{id2}.py"
# Moreover lambda functions have a special name: '<lambda>'
# ex: (lambda x: x).__code__.co_name == "<lambda>" # True
#
# For the hashing mechanism we ignore where the function has been defined
# More specifically:
# - we ignore the filename of special functions (filename starts with '<')
# - we always ignore the line number
# - we only use the base name of the file instead of the whole path,
# to be robust in case a script is moved for example.
#
# Only those two lines are different from the original implementation:
co_filename = (
""
if obj.co_filename.startswith("<")
or (
len(obj.co_filename.split(os.path.sep)) > 1
and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_")
)
or obj.co_name == "<lambda>"
else os.path.basename(obj.co_filename)
)
co_firstlineno = 1
# The rest is the same as in the original dill implementation
if dill._dill.PY3:
if hasattr(obj, "co_posonlyargcount"):
args = (
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename,
obj.co_name,
co_firstlineno,
obj.co_lnotab,
obj.co_freevars,
obj.co_cellvars,
)
else:
args = (
obj.co_argcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename,
obj.co_name,
co_firstlineno,
obj.co_lnotab,
obj.co_freevars,
obj.co_cellvars,
)
else:
args = (
obj.co_argcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename,
obj.co_name,
co_firstlineno,
obj.co_lnotab,
obj.co_freevars,
obj.co_cellvars,
)
pickler.save_reduce(CodeType, args, obj=obj)
dill._dill.log.info("# Co")
return
elif config.DILL_VERSION.release[:3] in [version.parse("0.3.6").release, version.parse("0.3.7").release]:
# From: https://github.com/uqfoundation/dill/blob/dill-0.3.6/dill/_dill.py#L1104
@pklregister(CodeType)
def save_code(pickler, obj):
dill._dill.logger.trace(pickler, "Co: %s", obj)
############################################################################################################
# Modification here for huggingface/datasets
# The filename of a function is the .py file where it is defined.
# Filenames of functions created in notebooks or shells start with '<'
# ex: <ipython-input-13-9ed2afe61d25> for ipython, and <stdin> for shell
# Filenames of functions created in ipykernel the filename
# look like f"{tempdir}/ipykernel_{id1}/{id2}.py"
# Moreover lambda functions have a special name: '<lambda>'
# ex: (lambda x: x).__code__.co_name == "<lambda>" # True
#
# For the hashing mechanism we ignore where the function has been defined
# More specifically:
# - we ignore the filename of special functions (filename starts with '<')
# - we always ignore the line number
# - we only use the base name of the file instead of the whole path,
# to be robust in case a script is moved for example.
#
# Only those two lines are different from the original implementation:
co_filename = (
""
if obj.co_filename.startswith("<")
or (
len(obj.co_filename.split(os.path.sep)) > 1
and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_")
)
or obj.co_name == "<lambda>"
else os.path.basename(obj.co_filename)
)
co_firstlineno = 1
# The rest is the same as in the original dill implementation, except for the replacements:
# - obj.co_filename => co_filename
# - obj.co_firstlineno => co_firstlineno
############################################################################################################
if hasattr(obj, "co_endlinetable"): # python 3.11a (20 args)
args = (
obj.co_lnotab, # for < python 3.10 [not counted in args]
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename, # Modification for huggingface/datasets ############################################
obj.co_name,
obj.co_qualname,
co_firstlineno, # Modification for huggingface/datasets #########################################
obj.co_linetable,
obj.co_endlinetable,
obj.co_columntable,
obj.co_exceptiontable,
obj.co_freevars,
obj.co_cellvars,
)
elif hasattr(obj, "co_exceptiontable"): # python 3.11 (18 args)
args = (
obj.co_lnotab, # for < python 3.10 [not counted in args]
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename, # Modification for huggingface/datasets ############################################
obj.co_name,
obj.co_qualname,
co_firstlineno, # Modification for huggingface/datasets #########################################
obj.co_linetable,
obj.co_exceptiontable,
obj.co_freevars,
obj.co_cellvars,
)
elif hasattr(obj, "co_linetable"): # python 3.10 (16 args)
args = (
obj.co_lnotab, # for < python 3.10 [not counted in args]
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename, # Modification for huggingface/datasets ############################################
obj.co_name,
co_firstlineno, # Modification for huggingface/datasets #########################################
obj.co_linetable,
obj.co_freevars,
obj.co_cellvars,
)
elif hasattr(obj, "co_posonlyargcount"): # python 3.8 (16 args)
args = (
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename, # Modification for huggingface/datasets ############################################
obj.co_name,
co_firstlineno, # Modification for huggingface/datasets #########################################
obj.co_lnotab,
obj.co_freevars,
obj.co_cellvars,
)
else: # python 3.7 (15 args)
args = (
obj.co_argcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename, # Modification for huggingface/datasets ############################################
obj.co_name,
co_firstlineno, # Modification for huggingface/datasets #########################################
obj.co_lnotab,
obj.co_freevars,
obj.co_cellvars,
)
pickler.save_reduce(dill._dill._create_code, args, obj=obj)
dill._dill.logger.trace(pickler, "# Co")
return
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/version.py | # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Version utils."""
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_VERSION_REG = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class Version:
"""Dataset version `MAJOR.MINOR.PATCH`.
Args:
version_str (`str`):
The dataset version.
description (`str`):
A description of what is new in this version.
major (`str`):
minor (`str`):
patch (`str`):
Example:
```py
>>> VERSION = datasets.Version("1.0.0")
```
"""
version_str: str
description: Optional[str] = None
major: Optional[Union[str, int]] = None
minor: Optional[Union[str, int]] = None
patch: Optional[Union[str, int]] = None
def __post_init__(self):
self.major, self.minor, self.patch = _str_to_version_tuple(self.version_str)
def __repr__(self):
return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def tuple(self):
return self.major, self.minor, self.patch
def _validate_operand(self, other):
if isinstance(other, str):
return Version(other)
elif isinstance(other, Version):
return other
raise TypeError(f"{other} (type {type(other)}) cannot be compared to version.")
def __eq__(self, other):
try:
other = self._validate_operand(other)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__(self, other):
other = self._validate_operand(other)
return self.tuple < other.tuple
def __hash__(self):
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def from_dict(cls, dic):
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _to_yaml_string(self) -> str:
return self.version_str
def _str_to_version_tuple(version_str):
"""Return the tuple (major, minor, patch) version extracted from the str."""
res = _VERSION_REG.match(version_str)
if not res:
raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.")
return tuple(int(v) for v in [res.group("major"), res.group("minor"), res.group("patch")])
def _version_tuple_to_str(version_tuple):
"""Return the str version from the version tuple (major, minor, patch)."""
return ".".join(str(v) for v in version_tuple)
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/_datasets_server.py | from typing import Any, Dict, List, Optional, Union
from .. import config
from ..exceptions import DatasetsError
from .file_utils import (
get_authentication_headers_for_url,
http_get,
)
from .logging import get_logger
logger = get_logger(__name__)
class DatasetsServerError(DatasetsError):
"""Dataset-server error.
Raised when trying to use the Datasets-server HTTP API and when trying to access:
- a missing dataset, or
- a private/gated dataset and the user is not authenticated.
- unavailable /parquet or /info responses
"""
def get_exported_parquet_files(dataset: str, revision: str, token: Optional[Union[str, bool]]) -> List[Dict[str, Any]]:
"""
Get the dataset exported parquet files
Docs: https://huggingface.co/docs/datasets-server/parquet
"""
datasets_server_parquet_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/parquet?dataset="
try:
parquet_data_files_response = http_get(
url=datasets_server_parquet_url + dataset,
temp_file=None,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
timeout=100.0,
max_retries=3,
)
parquet_data_files_response.raise_for_status()
if "X-Revision" in parquet_data_files_response.headers:
if parquet_data_files_response.headers["X-Revision"] == revision or revision is None:
parquet_data_files_response_json = parquet_data_files_response.json()
if (
parquet_data_files_response_json.get("partial") is False
and not parquet_data_files_response_json.get("pending", True)
and not parquet_data_files_response_json.get("failed", True)
and "parquet_files" in parquet_data_files_response_json
):
return parquet_data_files_response_json["parquet_files"]
else:
logger.debug(f"Parquet export for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Parquet export for {dataset} is available but outdated (revision='{parquet_data_files_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the datasets-server and consider the parquet export doesn't exist
logger.debug(f"No parquet export for {dataset} available ({type(e).__name__}: {e})")
raise DatasetsServerError("No exported Parquet files available.")
def get_exported_dataset_infos(
dataset: str, revision: str, token: Optional[Union[str, bool]]
) -> Dict[str, Dict[str, Any]]:
"""
Get the dataset information, can be useful to get e.g. the dataset features.
Docs: https://huggingface.co/docs/datasets-server/info
"""
datasets_server_info_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/info?dataset="
try:
info_response = http_get(
url=datasets_server_info_url + dataset,
temp_file=None,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
timeout=100.0,
max_retries=3,
)
info_response.raise_for_status()
if "X-Revision" in info_response.headers:
if info_response.headers["X-Revision"] == revision or revision is None:
info_response = info_response.json()
if (
info_response.get("partial") is False
and not info_response.get("pending", True)
and not info_response.get("failed", True)
and "dataset_info" in info_response
):
return info_response["dataset_info"]
else:
logger.debug(f"Dataset info for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Dataset info for {dataset} is available but outdated (revision='{info_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the datasets-server and consider the dataset info doesn't exist
logger.debug(f"No dataset info for {dataset} available ({type(e).__name__}: {e})")
raise DatasetsServerError("No exported dataset infos available.")
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/patching.py | from importlib import import_module
from .logging import get_logger
logger = get_logger(__name__)
class _PatchedModuleObj:
"""Set all the modules components as attributes of the _PatchedModuleObj object."""
def __init__(self, module, attrs=None):
attrs = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__"):
setattr(self, key, getattr(module, key))
self._original_module = module._original_module if isinstance(module, _PatchedModuleObj) else module
class patch_submodule:
"""
Patch a submodule attribute of an object, by keeping all other submodules intact at all levels.
Example::
>>> import importlib
>>> from datasets.load import dataset_module_factory
>>> from datasets.streaming import patch_submodule, xjoin
>>>
>>> dataset_module = dataset_module_factory("snli")
>>> snli_module = importlib.import_module(dataset_module.module_path)
>>> patcher = patch_submodule(snli_module, "os.path.join", xjoin)
>>> patcher.start()
>>> assert snli_module.os.path.join is xjoin
"""
_active_patches = []
def __init__(self, obj, target: str, new, attrs=None):
self.obj = obj
self.target = target
self.new = new
self.key = target.split(".")[0]
self.original = {}
self.attrs = attrs or []
def __enter__(self):
*submodules, target_attr = self.target.split(".")
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(submodules)):
try:
submodule = import_module(".".join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
obj_attr = getattr(self.obj, attr)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
isinstance(obj_attr, _PatchedModuleObj) and obj_attr._original_module is submodule
):
self.original[attr] = obj_attr
# patch at top level
setattr(self.obj, attr, _PatchedModuleObj(obj_attr, attrs=self.attrs))
patched = getattr(self.obj, attr)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(patched, key, _PatchedModuleObj(getattr(patched, key, None), attrs=self.attrs))
patched = getattr(patched, key)
# finally set the target attribute
setattr(patched, target_attr, self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
attr_value = getattr(import_module(".".join(submodules)), target_attr)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj, attr) is attr_value:
self.original[attr] = getattr(self.obj, attr)
setattr(self.obj, attr, self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
self.original[target_attr] = globals()["__builtins__"][target_attr]
setattr(self.obj, target_attr, self.new)
else:
raise RuntimeError(f"Tried to patch attribute {target_attr} instead of a submodule.")
def __exit__(self, *exc_info):
for attr in list(self.original):
setattr(self.obj, attr, self.original.pop(attr))
def start(self):
"""Activate a patch."""
self.__enter__()
self._active_patches.append(self)
def stop(self):
"""Stop an active patch."""
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/info_utils.py | import enum
import os
from typing import Optional
from huggingface_hub.utils import insecure_hashlib
from .. import config
from .logging import get_logger
logger = get_logger(__name__)
class VerificationMode(enum.Enum):
"""`Enum` that specifies which verification checks to run.
The default mode is `BASIC_CHECKS`, which will perform only rudimentary checks to avoid slowdowns
when generating/downloading a dataset for the first time.
The verification modes:
| | Verification checks |
|---------------------------|------------------------------------------------------------------------------ |
| `ALL_CHECKS` | Split checks, uniqueness of the keys yielded in case of the GeneratorBuilder |
| | and the validity (number of files, checksums, etc.) of downloaded files |
| `BASIC_CHECKS` (default) | Same as `ALL_CHECKS` but without checking downloaded files |
| `NO_CHECKS` | None |
"""
ALL_CHECKS = "all_checks"
BASIC_CHECKS = "basic_checks"
NO_CHECKS = "no_checks"
class ChecksumVerificationException(Exception):
"""Exceptions during checksums verifications of downloaded files."""
class UnexpectedDownloadedFile(ChecksumVerificationException):
"""Some downloaded files were not expected."""
class ExpectedMoreDownloadedFiles(ChecksumVerificationException):
"""Some files were supposed to be downloaded but were not."""
class NonMatchingChecksumError(ChecksumVerificationException):
"""The downloaded file checksum don't match the expected checksum."""
def verify_checksums(expected_checksums: Optional[dict], recorded_checksums: dict, verification_name=None):
if expected_checksums is None:
logger.info("Unable to verify checksums.")
return
if len(set(expected_checksums) - set(recorded_checksums)) > 0:
raise ExpectedMoreDownloadedFiles(str(set(expected_checksums) - set(recorded_checksums)))
if len(set(recorded_checksums) - set(expected_checksums)) > 0:
raise UnexpectedDownloadedFile(str(set(recorded_checksums) - set(expected_checksums)))
bad_urls = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
for_verification_name = " for " + verification_name if verification_name is not None else ""
if len(bad_urls) > 0:
raise NonMatchingChecksumError(
f"Checksums didn't match{for_verification_name}:\n"
f"{bad_urls}\n"
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error"
)
logger.info("All the checksums matched successfully" + for_verification_name)
class SplitsVerificationException(Exception):
"""Exceptions during splis verifications"""
class UnexpectedSplits(SplitsVerificationException):
"""The expected splits of the downloaded file is missing."""
class ExpectedMoreSplits(SplitsVerificationException):
"""Some recorded splits are missing."""
class NonMatchingSplitsSizesError(SplitsVerificationException):
"""The splits sizes don't match the expected splits sizes."""
def verify_splits(expected_splits: Optional[dict], recorded_splits: dict):
if expected_splits is None:
logger.info("Unable to verify splits sizes.")
return
if len(set(expected_splits) - set(recorded_splits)) > 0:
raise ExpectedMoreSplits(str(set(expected_splits) - set(recorded_splits)))
if len(set(recorded_splits) - set(expected_splits)) > 0:
raise UnexpectedSplits(str(set(recorded_splits) - set(expected_splits)))
bad_splits = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(bad_splits) > 0:
raise NonMatchingSplitsSizesError(str(bad_splits))
logger.info("All the splits matched successfully.")
def get_size_checksum_dict(path: str, record_checksum: bool = True) -> dict:
"""Compute the file size and the sha256 checksum of a file"""
if record_checksum:
m = insecure_hashlib.sha256()
with open(path, "rb") as f:
for chunk in iter(lambda: f.read(1 << 20), b""):
m.update(chunk)
checksum = m.hexdigest()
else:
checksum = None
return {"num_bytes": os.path.getsize(path), "checksum": checksum}
def is_small_dataset(dataset_size):
"""Check if `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`.
Args:
dataset_size (int): Dataset size in bytes.
Returns:
bool: Whether `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`.
"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/utils/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import copy
import io
import json
import multiprocessing
import os
import posixpath
import re
import shutil
import sys
import time
import urllib
import warnings
from contextlib import closing, contextmanager
from functools import partial
from pathlib import Path
from typing import Optional, TypeVar, Union
from unittest.mock import patch
from urllib.parse import urljoin, urlparse
import fsspec
import huggingface_hub
import requests
from fsspec.core import strip_protocol
from fsspec.utils import can_be_local
from huggingface_hub.utils import insecure_hashlib
from packaging import version
from .. import __version__, config
from ..download.download_config import DownloadConfig
from . import _tqdm, logging
from . import tqdm as hf_tqdm
from ._filelock import FileLock
from .extract import ExtractManager
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
INCOMPLETE_SUFFIX = ".incomplete"
T = TypeVar("T", str, Path)
def init_hf_modules(hf_modules_cache: Optional[Union[Path, str]] = None) -> str:
"""
Add hf_modules_cache to the python path.
By default hf_modules_cache='~/.cache/huggingface/modules'.
It can also be set with the environment variable HF_MODULES_CACHE.
This is used to add modules such as `datasets_modules`
"""
hf_modules_cache = hf_modules_cache if hf_modules_cache is not None else config.HF_MODULES_CACHE
hf_modules_cache = str(hf_modules_cache)
if hf_modules_cache not in sys.path:
sys.path.append(hf_modules_cache)
os.makedirs(hf_modules_cache, exist_ok=True)
if not os.path.exists(os.path.join(hf_modules_cache, "__init__.py")):
with open(os.path.join(hf_modules_cache, "__init__.py"), "w"):
pass
return hf_modules_cache
def is_remote_url(url_or_filename: str) -> bool:
return urlparse(url_or_filename).scheme != "" and not os.path.ismount(urlparse(url_or_filename).scheme + ":/")
def is_local_path(url_or_filename: str) -> bool:
# On unix the scheme of a local path is empty (for both absolute and relative),
# while on windows the scheme is the drive name (ex: "c") for absolute paths.
# for details on the windows behavior, see https://bugs.python.org/issue42215
return urlparse(url_or_filename).scheme == "" or os.path.ismount(urlparse(url_or_filename).scheme + ":/")
def is_relative_path(url_or_filename: str) -> bool:
return urlparse(url_or_filename).scheme == "" and not os.path.isabs(url_or_filename)
def relative_to_absolute_path(path: T) -> T:
"""Convert relative path to absolute path."""
abs_path_str = os.path.abspath(os.path.expanduser(os.path.expandvars(str(path))))
return Path(abs_path_str) if isinstance(path, Path) else abs_path_str
def hf_bucket_url(identifier: str, filename: str, use_cdn=False, dataset=True) -> str:
if dataset:
endpoint = config.CLOUDFRONT_DATASETS_DISTRIB_PREFIX if use_cdn else config.S3_DATASETS_BUCKET_PREFIX
else:
endpoint = config.CLOUDFRONT_METRICS_DISTRIB_PREFIX if use_cdn else config.S3_METRICS_BUCKET_PREFIX
return "/".join((endpoint, identifier, filename))
def head_hf_s3(
identifier: str, filename: str, use_cdn=False, dataset=True, max_retries=0
) -> Union[requests.Response, Exception]:
return http_head(
hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset),
max_retries=max_retries,
)
def hf_github_url(path: str, name: str, dataset=True, revision: Optional[str] = None) -> str:
default_revision = "main" if version.parse(__version__).is_devrelease else __version__
revision = revision or default_revision
if dataset:
return config.REPO_DATASETS_URL.format(revision=revision, path=path, name=name)
else:
return config.REPO_METRICS_URL.format(revision=revision, path=path, name=name)
def url_or_path_join(base_name: str, *pathnames: str) -> str:
if is_remote_url(base_name):
return posixpath.join(base_name, *(str(pathname).replace(os.sep, "/").lstrip("/") for pathname in pathnames))
else:
return Path(base_name, *pathnames).as_posix()
def url_or_path_parent(url_or_path: str) -> str:
if is_remote_url(url_or_path):
return url_or_path[: url_or_path.rindex("/")]
else:
return os.path.dirname(url_or_path)
def hash_url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name
so that TF 2.0 can identify it as a HDF5 file
(see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)
"""
url_bytes = url.encode("utf-8")
url_hash = insecure_hashlib.sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = insecure_hashlib.sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
if url.endswith(".py"):
filename += ".py"
return filename
def cached_path(
url_or_filename,
download_config=None,
**download_kwargs,
) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
Return:
Local path (string)
Raises:
FileNotFoundError: in case of non-recoverable file
(non-existent or no cache on disk)
ConnectionError: in case of unreachable url
and no cache on disk
ValueError: if it couldn't parse the url or filename correctly
requests.exceptions.ConnectionError: in case of internet connection issue
"""
if download_config is None:
download_config = DownloadConfig(**download_kwargs)
cache_dir = download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
# Convert fsspec URL in the format "file://local/path" to "local/path"
if can_be_local(url_or_filename):
url_or_filename = strip_protocol(url_or_filename)
if is_remote_url(url_or_filename):
# URL, so get it from the cache (downloading if necessary)
output_path = get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=download_config.force_download,
proxies=download_config.proxies,
resume_download=download_config.resume_download,
user_agent=download_config.user_agent,
local_files_only=download_config.local_files_only,
use_etag=download_config.use_etag,
max_retries=download_config.max_retries,
token=download_config.token,
ignore_url_params=download_config.ignore_url_params,
storage_options=download_config.storage_options,
download_desc=download_config.download_desc,
)
elif os.path.exists(url_or_filename):
# File, and it exists.
output_path = url_or_filename
elif is_local_path(url_or_filename):
# File, but it doesn't exist.
raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist")
else:
# Something unknown
raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path")
if output_path is None:
return output_path
if download_config.extract_compressed_file:
output_path = ExtractManager(cache_dir=download_config.cache_dir).extract(
output_path, force_extract=download_config.force_extract
)
return relative_to_absolute_path(output_path)
def get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str:
ua = f"datasets/{__version__}"
ua += f"; python/{config.PY_VERSION}"
ua += f"; huggingface_hub/{huggingface_hub.__version__}"
ua += f"; pyarrow/{config.PYARROW_VERSION}"
if config.TORCH_AVAILABLE:
ua += f"; torch/{config.TORCH_VERSION}"
if config.TF_AVAILABLE:
ua += f"; tensorflow/{config.TF_VERSION}"
if config.JAX_AVAILABLE:
ua += f"; jax/{config.JAX_VERSION}"
if config.BEAM_AVAILABLE:
ua += f"; apache_beam/{config.BEAM_VERSION}"
if isinstance(user_agent, dict):
ua += f"; {'; '.join(f'{k}/{v}' for k, v in user_agent.items())}"
elif isinstance(user_agent, str):
ua += "; " + user_agent
return ua
def get_authentication_headers_for_url(
url: str, token: Optional[Union[str, bool]] = None, use_auth_token: Optional[Union[str, bool]] = "deprecated"
) -> dict:
"""Handle the HF authentication"""
if use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={use_auth_token}' instead.",
FutureWarning,
)
token = use_auth_token
if url.startswith(config.HF_ENDPOINT):
return huggingface_hub.utils.build_hf_headers(
token=token, library_name="datasets", library_version=__version__
)
else:
return {}
class OfflineModeIsEnabled(ConnectionError):
pass
def _raise_if_offline_mode_is_enabled(msg: Optional[str] = None):
"""Raise an OfflineModeIsEnabled error (subclass of ConnectionError) if HF_DATASETS_OFFLINE is True."""
if config.HF_DATASETS_OFFLINE:
raise OfflineModeIsEnabled(
"Offline mode is enabled." if msg is None else "Offline mode is enabled. " + str(msg)
)
def _request_with_retry(
method: str,
url: str,
max_retries: int = 0,
base_wait_time: float = 0.5,
max_wait_time: float = 2,
timeout: float = 10.0,
**params,
) -> requests.Response:
"""Wrapper around requests to retry in case it fails with a ConnectTimeout, with exponential backoff.
Note that if the environment variable HF_DATASETS_OFFLINE is set to 1, then a OfflineModeIsEnabled error is raised.
Args:
method (str): HTTP method, such as 'GET' or 'HEAD'.
url (str): The URL of the resource to fetch.
max_retries (int): Maximum number of retries, defaults to 0 (no retries).
base_wait_time (float): Duration (in seconds) to wait before retrying the first time. Wait time between
retries then grows exponentially, capped by max_wait_time.
max_wait_time (float): Maximum amount of time between two retries, in seconds.
**params (additional keyword arguments): Params to pass to :obj:`requests.request`.
"""
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
tries, success = 0, False
while not success:
tries += 1
try:
response = requests.request(method=method.upper(), url=url, timeout=timeout, **params)
success = True
except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError) as err:
if tries > max_retries:
raise err
else:
logger.info(f"{method} request to {url} timed out, retrying... [{tries/max_retries}]")
sleep_time = min(max_wait_time, base_wait_time * 2 ** (tries - 1)) # Exponential backoff
time.sleep(sleep_time)
return response
def fsspec_head(url, storage_options=None):
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
fs, _, paths = fsspec.get_fs_token_paths(url, storage_options=storage_options)
if len(paths) > 1:
raise ValueError(f"HEAD can be called with at most one path but was called with {paths}")
return fs.info(paths[0])
def stack_multiprocessing_download_progress_bars():
# Stack downloads progress bars automatically using HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS=1
# We use environment variables since the download may happen in a subprocess
return patch.dict(os.environ, {"HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS": "1"})
class TqdmCallback(fsspec.callbacks.TqdmCallback):
def __init__(self, tqdm_kwargs=None, *args, **kwargs):
super().__init__(tqdm_kwargs, *args, **kwargs)
self._tqdm = _tqdm # replace tqdm.tqdm by datasets.tqdm.tqdm
def fsspec_get(url, temp_file, storage_options=None, desc=None):
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
fs, _, paths = fsspec.get_fs_token_paths(url, storage_options=storage_options)
if len(paths) > 1:
raise ValueError(f"GET can be called with at most one path but was called with {paths}")
callback = TqdmCallback(
tqdm_kwargs={
"desc": desc or "Downloading",
"unit": "B",
"unit_scale": True,
"position": multiprocessing.current_process()._identity[-1] # contains the ranks of subprocesses
if os.environ.get("HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS") == "1"
and multiprocessing.current_process()._identity
else None,
}
)
fs.get_file(paths[0], temp_file.name, callback=callback)
def ftp_head(url, timeout=10.0):
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
try:
with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
r.read(1)
except Exception:
return False
return True
def ftp_get(url, temp_file, timeout=10.0):
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
try:
logger.info(f"Getting through FTP {url} into {temp_file.name}")
with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
shutil.copyfileobj(r, temp_file)
except urllib.error.URLError as e:
raise ConnectionError(e) from None
def http_get(
url, temp_file, proxies=None, resume_size=0, headers=None, cookies=None, timeout=100.0, max_retries=0, desc=None
) -> Optional[requests.Response]:
headers = dict(headers) if headers is not None else {}
headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
if resume_size > 0:
headers["Range"] = f"bytes={resume_size:d}-"
response = _request_with_retry(
method="GET",
url=url,
stream=True,
proxies=proxies,
headers=headers,
cookies=cookies,
max_retries=max_retries,
timeout=timeout,
)
if temp_file is None:
return response
if response.status_code == 416: # Range not satisfiable
return
content_length = response.headers.get("Content-Length")
total = resume_size + int(content_length) if content_length is not None else None
with hf_tqdm(
unit="B",
unit_scale=True,
total=total,
initial=resume_size,
desc=desc or "Downloading",
position=multiprocessing.current_process()._identity[-1] # contains the ranks of subprocesses
if os.environ.get("HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS") == "1"
and multiprocessing.current_process()._identity
else None,
) as progress:
for chunk in response.iter_content(chunk_size=1024):
progress.update(len(chunk))
temp_file.write(chunk)
def http_head(
url, proxies=None, headers=None, cookies=None, allow_redirects=True, timeout=10.0, max_retries=0
) -> requests.Response:
headers = copy.deepcopy(headers) or {}
headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
response = _request_with_retry(
method="HEAD",
url=url,
proxies=proxies,
headers=headers,
cookies=cookies,
allow_redirects=allow_redirects,
timeout=timeout,
max_retries=max_retries,
)
return response
def request_etag(
url: str, token: Optional[Union[str, bool]] = None, use_auth_token: Optional[Union[str, bool]] = "deprecated"
) -> Optional[str]:
if use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={use_auth_token}' instead.",
FutureWarning,
)
token = use_auth_token
if urlparse(url).scheme not in ("http", "https"):
return None
headers = get_authentication_headers_for_url(url, token=token)
response = http_head(url, headers=headers, max_retries=3)
response.raise_for_status()
etag = response.headers.get("ETag") if response.ok else None
return etag
def get_from_cache(
url,
cache_dir=None,
force_download=False,
proxies=None,
etag_timeout=100,
resume_download=False,
user_agent=None,
local_files_only=False,
use_etag=True,
max_retries=0,
token=None,
use_auth_token="deprecated",
ignore_url_params=False,
storage_options=None,
download_desc=None,
) -> str:
"""
Given a URL, look for the corresponding file in the local cache.
If it's not there, download it. Then return the path to the cached file.
Return:
Local path (string)
Raises:
FileNotFoundError: in case of non-recoverable file
(non-existent or no cache on disk)
ConnectionError: in case of unreachable url
and no cache on disk
"""
if use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={use_auth_token}' instead.",
FutureWarning,
)
token = use_auth_token
if cache_dir is None:
cache_dir = config.HF_DATASETS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
if ignore_url_params:
# strip all query parameters and #fragments from the URL
cached_url = urljoin(url, urlparse(url).path)
else:
cached_url = url # additional parameters may be added to the given URL
connected = False
response = None
cookies = None
etag = None
head_error = None
scheme = None
# Try a first time to file the file on the local file system without eTag (None)
# if we don't ask for 'force_download' then we spare a request
filename = hash_url_to_filename(cached_url, etag=None)
cache_path = os.path.join(cache_dir, filename)
if os.path.exists(cache_path) and not force_download and not use_etag:
return cache_path
# Prepare headers for authentication
headers = get_authentication_headers_for_url(url, token=token)
if user_agent is not None:
headers["user-agent"] = user_agent
# We don't have the file locally or we need an eTag
if not local_files_only:
scheme = urlparse(url).scheme
if scheme == "ftp":
connected = ftp_head(url)
elif scheme not in ("http", "https"):
response = fsspec_head(url, storage_options=storage_options)
# s3fs uses "ETag", gcsfs uses "etag"
etag = (response.get("ETag", None) or response.get("etag", None)) if use_etag else None
connected = True
try:
response = http_head(
url,
allow_redirects=True,
proxies=proxies,
timeout=etag_timeout,
max_retries=max_retries,
headers=headers,
)
if response.status_code == 200: # ok
etag = response.headers.get("ETag") if use_etag else None
for k, v in response.cookies.items():
# In some edge cases, we need to get a confirmation token
if k.startswith("download_warning") and "drive.google.com" in url:
url += "&confirm=" + v
cookies = response.cookies
connected = True
# Fix Google Drive URL to avoid Virus scan warning
if "drive.google.com" in url and "confirm=" not in url:
url += "&confirm=t"
# In some edge cases, head request returns 400 but the connection is actually ok
elif (
(response.status_code == 400 and "firebasestorage.googleapis.com" in url)
or (response.status_code == 405 and "drive.google.com" in url)
or (
response.status_code == 403
and (
re.match(r"^https?://github.com/.*?/.*?/releases/download/.*?/.*?$", url)
or re.match(r"^https://.*?s3.*?amazonaws.com/.*?$", response.url)
)
)
or (response.status_code == 403 and "ndownloader.figstatic.com" in url)
):
connected = True
logger.info(f"Couldn't get ETag version for url {url}")
elif response.status_code == 401 and config.HF_ENDPOINT in url and token is None:
raise ConnectionError(
f"Unauthorized for URL {url}. Please use the parameter `token=True` after logging in with `huggingface-cli login`"
)
except (OSError, requests.exceptions.Timeout) as e:
# not connected
head_error = e
pass
# connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if not connected:
if os.path.exists(cache_path) and not force_download:
return cache_path
if local_files_only:
raise FileNotFoundError(
f"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been"
" disabled. To enable file online look-ups, set 'local_files_only' to False."
)
elif response is not None and response.status_code == 404:
raise FileNotFoundError(f"Couldn't find file at {url}")
_raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
if head_error is not None:
raise ConnectionError(f"Couldn't reach {url} ({repr(head_error)})")
elif response is not None:
raise ConnectionError(f"Couldn't reach {url} (error {response.status_code})")
else:
raise ConnectionError(f"Couldn't reach {url}")
# Try a second time
filename = hash_url_to_filename(cached_url, etag)
cache_path = os.path.join(cache_dir, filename)
if os.path.exists(cache_path) and not force_download:
return cache_path
# From now on, connected is True.
# Prevent parallel downloads of the same file with a lock.
lock_path = cache_path + ".lock"
with FileLock(lock_path):
# Retry in case previously locked processes just enter after the precedent process releases the lock
if os.path.exists(cache_path) and not force_download:
return cache_path
incomplete_path = cache_path + ".incomplete"
@contextmanager
def temp_file_manager(mode="w+b"):
with open(incomplete_path, mode) as f:
yield f
resume_size = 0
if resume_download:
temp_file_manager = partial(temp_file_manager, mode="a+b")
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
# Download to temporary file, then copy to cache path once finished.
# Otherwise, you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}")
# GET file object
if scheme == "ftp":
ftp_get(url, temp_file)
elif scheme not in ("http", "https"):
fsspec_get(url, temp_file, storage_options=storage_options, desc=download_desc)
else:
http_get(
url,
temp_file=temp_file,
proxies=proxies,
resume_size=resume_size,
headers=headers,
cookies=cookies,
max_retries=max_retries,
desc=download_desc,
)
logger.info(f"storing {url} in cache at {cache_path}")
shutil.move(temp_file.name, cache_path)
umask = os.umask(0o666)
os.umask(umask)
os.chmod(cache_path, 0o666 & ~umask)
logger.info(f"creating metadata file for {cache_path}")
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w", encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
return cache_path
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = "".join(docstr) + "\n\n" + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "\n\n" + "".join(docstr)
return fn
return docstring_decorator
def estimate_dataset_size(paths):
return sum(path.stat().st_size for path in paths)
def readline(f: io.RawIOBase):
# From: https://github.com/python/cpython/blob/d27e2f4d118e7a9909b6a3e5da06c5ff95806a85/Lib/_pyio.py#L525
res = bytearray()
while True:
b = f.read(1)
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
| 0 |
hf_public_repos/datasets/src/datasets/utils | hf_public_repos/datasets/src/datasets/utils/resources/creators.json | {
"language": [
"found",
"crowdsourced",
"expert-generated",
"machine-generated",
"other"
],
"annotations": [
"found",
"crowdsourced",
"expert-generated",
"machine-generated",
"no-annotation",
"other"
]
}
| 0 |
hf_public_repos/datasets/src/datasets/utils | hf_public_repos/datasets/src/datasets/utils/resources/size_categories.json | [
"unknown",
"n<1K",
"1K<n<10K",
"10K<n<100K",
"100K<n<1M",
"1M<n<10M",
"10M<n<100M",
"100M<n<1B",
"1B<n<10B",
"10B<n<100B",
"100B<n<1T",
"n>1T"
]
| 0 |
hf_public_repos/datasets/src/datasets/utils | hf_public_repos/datasets/src/datasets/utils/resources/languages.json | {
"code": "Programming language (C++, Java, Javascript, Python, etc.)",
"aa": "Afar",
"aaa": "Ghotuo",
"aab": "Alumu-Tesu",
"aac": "Ari",
"aad": "Amal",
"aae": "Arbëreshë Albanian",
"aaf": "Aranadan",
"aag": "Ambrak",
"aah": "Abu' Arapesh",
"aai": "Arifama-Miniafia",
"aak": "Ankave",
"aal": "Afade",
"aan": "Anambé",
"aao": "Algerian Saharan Arabic",
"aap": "Pará Arára",
"aaq": "Eastern Abnaki",
"aas": "Aasáx",
"aat": "Arvanitika Albanian",
"aau": "Abau",
"aav": "Austro-Asiatic languages",
"aaw": "Solong",
"aax": "Mandobo Atas",
"aaz": "Amarasi",
"ab": "Abkhazian",
"aba": "Abé",
"abb": "Bankon",
"abc": "Ambala Ayta",
"abd": "Manide",
"abe": "Western Abnaki",
"abf": "Abai Sungai",
"abg": "Abaga",
"abh": "Tajiki Arabic",
"abi": "Abidji",
"abj": "Aka-Bea",
"abl": "Lampung Nyo",
"abm": "Abanyom",
"abn": "Abua",
"abo": "Abon",
"abp": "Abellen Ayta",
"abq": "Abaza",
"abr": "Abron",
"abs": "Ambonese Malay",
"abt": "Ambulas",
"abu": "Abure",
"abv": "Baharna Arabic",
"abw": "Pal",
"abx": "Inabaknon",
"aby": "Aneme Wake",
"abz": "Abui",
"aca": "Achagua",
"acb": "Áncá",
"acd": "Gikyode",
"ace": "Achinese",
"acf": "Saint Lucian Creole French",
"ach": "Acoli",
"aci": "Aka-Cari",
"ack": "Aka-Kora",
"acl": "Akar-Bale",
"acm": "Mesopotamian Arabic",
"acn": "Achang",
"acp": "Eastern Acipa",
"acq": "Ta'izzi-Adeni Arabic",
"acr": "Achi",
"acs": "Acroá",
"act": "Achterhoeks",
"acu": "Achuar-Shiwiar",
"acv": "Achumawi",
"acw": "Hijazi Arabic",
"acx": "Omani Arabic",
"acy": "Cypriot Arabic",
"acz": "Acheron",
"ada": "Adangme",
"adb": "Atauran",
"add": "Lidzonka; Dzodinka",
"ade": "Adele",
"adf": "Dhofari Arabic",
"adg": "Andegerebinha",
"adh": "Adhola",
"adi": "Adi",
"adj": "Adioukrou",
"adl": "Galo",
"adn": "Adang",
"ado": "Abu",
"adq": "Adangbe",
"adr": "Adonara",
"ads": "Adamorobe Sign Language",
"adt": "Adnyamathanha",
"adu": "Aduge",
"adw": "Amundava",
"adx": "Amdo Tibetan",
"ady": "Adyghe; Adygei",
"adz": "Adzera",
"ae": "Avestan",
"aea": "Areba",
"aeb": "Tunisian Arabic",
"aec": "Saidi Arabic",
"aed": "Argentine Sign Language",
"aee": "Northeast Pashai; Northeast Pashayi",
"aek": "Haeke",
"ael": "Ambele",
"aem": "Arem",
"aen": "Armenian Sign Language",
"aeq": "Aer",
"aer": "Eastern Arrernte",
"aes": "Alsea",
"aeu": "Akeu",
"aew": "Ambakich",
"aey": "Amele",
"aez": "Aeka",
"af": "Afrikaans",
"afa": "Afro-Asiatic languages",
"afb": "Gulf Arabic",
"afd": "Andai",
"afe": "Putukwam",
"afg": "Afghan Sign Language",
"afh": "Afrihili",
"afi": "Akrukay; Chini",
"afk": "Nanubae",
"afn": "Defaka",
"afo": "Eloyi",
"afp": "Tapei",
"afs": "Afro-Seminole Creole",
"aft": "Afitti",
"afu": "Awutu",
"afz": "Obokuitai",
"aga": "Aguano",
"agb": "Legbo",
"agc": "Agatu",
"agd": "Agarabi",
"age": "Angal",
"agf": "Arguni",
"agg": "Angor",
"agh": "Ngelima",
"agi": "Agariya",
"agj": "Argobba",
"agk": "Isarog Agta",
"agl": "Fembe",
"agm": "Angaataha",
"agn": "Agutaynen",
"ago": "Tainae",
"agq": "Aghem",
"agr": "Aguaruna",
"ags": "Esimbi",
"agt": "Central Cagayan Agta",
"agu": "Aguacateco",
"agv": "Remontado Dumagat",
"agw": "Kahua",
"agx": "Aghul",
"agy": "Southern Alta",
"agz": "Mt. Iriga Agta",
"aha": "Ahanta",
"ahb": "Axamb",
"ahg": "Qimant",
"ahh": "Aghu",
"ahi": "Tiagbamrin Aizi",
"ahk": "Akha",
"ahl": "Igo",
"ahm": "Mobumrin Aizi",
"ahn": "Àhàn",
"aho": "Ahom",
"ahp": "Aproumu Aizi",
"ahr": "Ahirani",
"ahs": "Ashe",
"aht": "Ahtena",
"aia": "Arosi",
"aib": "Ainu (China)",
"aic": "Ainbai",
"aid": "Alngith",
"aie": "Amara",
"aif": "Agi",
"aig": "Antigua and Barbuda Creole English",
"aih": "Ai-Cham",
"aii": "Assyrian Neo-Aramaic",
"aij": "Lishanid Noshan",
"aik": "Ake",
"ail": "Aimele",
"aim": "Aimol",
"ain": "Ainu (Japan)",
"aio": "Aiton",
"aip": "Burumakok",
"aiq": "Aimaq",
"air": "Airoran",
"ait": "Arikem",
"aiw": "Aari",
"aix": "Aighon",
"aiy": "Ali",
"aja": "Aja (South Sudan)",
"ajg": "Aja (Benin)",
"aji": "Ajië",
"ajn": "Andajin",
"ajp": "South Levantine Arabic",
"ajs": "Algerian Jewish Sign Language",
"aju": "Judeo-Moroccan Arabic",
"ajw": "Ajawa",
"ajz": "Amri Karbi",
"ak": "Akan",
"akb": "Batak Angkola",
"akc": "Mpur",
"akd": "Ukpet-Ehom",
"ake": "Akawaio",
"akf": "Akpa",
"akg": "Anakalangu",
"akh": "Angal Heneng",
"aki": "Aiome",
"akj": "Aka-Jeru",
"akk": "Akkadian",
"akl": "Aklanon",
"akm": "Aka-Bo",
"ako": "Akurio",
"akp": "Siwu",
"akq": "Ak",
"akr": "Araki",
"aks": "Akaselem",
"akt": "Akolet",
"aku": "Akum",
"akv": "Akhvakh",
"akw": "Akwa",
"akx": "Aka-Kede",
"aky": "Aka-Kol",
"akz": "Alabama",
"ala": "Alago",
"alc": "Qawasqar",
"ald": "Alladian",
"ale": "Aleut",
"alf": "Alege",
"alg": "Algonquian languages",
"alh": "Alawa",
"ali": "Amaimon",
"alj": "Alangan",
"alk": "Alak",
"all": "Allar",
"alm": "Amblong",
"aln": "Gheg Albanian",
"alo": "Larike-Wakasihu",
"alp": "Alune",
"alq": "Algonquin",
"alr": "Alutor",
"als": "Tosk Albanian",
"alt": "Southern Altai",
"alu": "'Are'are",
"alv": "Atlantic-Congo languages",
"alw": "Alaba-K’abeena; Wanbasana",
"alx": "Amol",
"aly": "Alyawarr",
"alz": "Alur",
"am": "Amharic",
"ama": "Amanayé",
"amb": "Ambo",
"amc": "Amahuaca",
"ame": "Yanesha'",
"amf": "Hamer-Banna",
"amg": "Amurdak",
"ami": "Amis",
"amj": "Amdang",
"amk": "Ambai",
"aml": "War-Jaintia",
"amm": "Ama (Papua New Guinea)",
"amn": "Amanab",
"amo": "Amo",
"amp": "Alamblak",
"amq": "Amahai",
"amr": "Amarakaeri",
"ams": "Southern Amami-Oshima",
"amt": "Amto",
"amu": "Guerrero Amuzgo",
"amv": "Ambelau",
"amw": "Western Neo-Aramaic",
"amx": "Anmatyerre",
"amy": "Ami",
"amz": "Atampaya",
"an": "Aragonese",
"ana": "Andaqui",
"anb": "Andoa",
"anc": "Ngas",
"and": "Ansus",
"ane": "Xârâcùù",
"anf": "Animere",
"ang": "Old English (ca. 450-1100)",
"anh": "Nend",
"ani": "Andi",
"anj": "Anor",
"ank": "Goemai",
"anl": "Anu-Hkongso Chin",
"anm": "Anal",
"ann": "Obolo",
"ano": "Andoque",
"anp": "Angika",
"anq": "Jarawa (India)",
"anr": "Andh",
"ans": "Anserma",
"ant": "Antakarinya; Antikarinya",
"anu": "Anuak",
"anv": "Denya",
"anw": "Anaang",
"anx": "Andra-Hus",
"any": "Anyin",
"anz": "Anem",
"aoa": "Angolar",
"aob": "Abom",
"aoc": "Pemon",
"aod": "Andarum",
"aoe": "Angal Enen",
"aof": "Bragat",
"aog": "Angoram",
"aoi": "Anindilyakwa",
"aoj": "Mufian",
"aok": "Arhö",
"aol": "Alor",
"aom": "Ömie",
"aon": "Bumbita Arapesh",
"aor": "Aore",
"aos": "Taikat",
"aot": "Atong (India); A'tong",
"aou": "A'ou",
"aox": "Atorada",
"aoz": "Uab Meto",
"apa": "Apache languages",
"apb": "Sa'a",
"apc": "North Levantine Arabic",
"apd": "Sudanese Arabic",
"ape": "Bukiyip",
"apf": "Pahanan Agta",
"apg": "Ampanang",
"aph": "Athpariya",
"api": "Apiaká",
"apj": "Jicarilla Apache",
"apk": "Kiowa Apache",
"apl": "Lipan Apache",
"apm": "Mescalero-Chiricahua Apache",
"apn": "Apinayé",
"apo": "Ambul",
"app": "Apma",
"apq": "A-Pucikwar",
"apr": "Arop-Lokep",
"aps": "Arop-Sissano",
"apt": "Apatani",
"apu": "Apurinã",
"apv": "Alapmunte",
"apw": "Western Apache",
"apx": "Aputai",
"apy": "Apalaí",
"apz": "Safeyoka",
"aqa": "Alacalufan languages",
"aqc": "Archi",
"aqd": "Ampari Dogon",
"aqg": "Arigidi",
"aqk": "Aninka",
"aql": "Algic languages",
"aqm": "Atohwaim",
"aqn": "Northern Alta",
"aqp": "Atakapa",
"aqr": "Arhâ",
"aqt": "Angaité",
"aqz": "Akuntsu",
"ar": "Arabic",
"arb": "Standard Arabic",
"arc": "Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)",
"ard": "Arabana",
"are": "Western Arrarnta",
"arh": "Arhuaco",
"ari": "Arikara",
"arj": "Arapaso",
"ark": "Arikapú",
"arl": "Arabela",
"arn": "Mapudungun; Mapuche",
"aro": "Araona",
"arp": "Arapaho",
"arq": "Algerian Arabic",
"arr": "Karo (Brazil)",
"ars": "Najdi Arabic",
"art": "Artificial languages",
"aru": "Aruá (Amazonas State); Arawá",
"arv": "Arbore",
"arw": "Arawak",
"arx": "Aruá (Rodonia State)",
"ary": "Moroccan Arabic",
"arz": "Egyptian Arabic",
"as": "Assamese",
"asa": "Asu (Tanzania)",
"asb": "Assiniboine",
"asc": "Casuarina Coast Asmat",
"ase": "American Sign Language",
"asf": "Auslan; Australian Sign Language",
"asg": "Cishingini",
"ash": "Abishira",
"asi": "Buruwai",
"asj": "Sari",
"ask": "Ashkun",
"asl": "Asilulu",
"asn": "Xingú Asuriní",
"aso": "Dano",
"asp": "Algerian Sign Language",
"asq": "Austrian Sign Language",
"asr": "Asuri",
"ass": "Ipulo",
"ast": "Asturian; Asturleonese; Bable; Leonese",
"asu": "Tocantins Asurini",
"asv": "Asoa",
"asw": "Australian Aborigines Sign Language",
"asx": "Muratayak",
"asy": "Yaosakor Asmat",
"asz": "As",
"ata": "Pele-Ata",
"atb": "Zaiwa",
"atc": "Atsahuaca",
"atd": "Ata Manobo",
"ate": "Atemble",
"atg": "Ivbie North-Okpela-Arhe",
"ath": "Athapascan languages",
"ati": "Attié",
"atj": "Atikamekw",
"atk": "Ati",
"atl": "Mt. Iraya Agta",
"atm": "Ata",
"atn": "Ashtiani",
"ato": "Atong (Cameroon)",
"atp": "Pudtol Atta",
"atq": "Aralle-Tabulahan",
"atr": "Waimiri-Atroari",
"ats": "Gros Ventre",
"att": "Pamplona Atta",
"atu": "Reel",
"atv": "Northern Altai",
"atw": "Atsugewi",
"atx": "Arutani",
"aty": "Aneityum",
"atz": "Arta",
"aua": "Asumboa",
"aub": "Alugu",
"auc": "Waorani",
"aud": "Anuta",
"auf": "Arauan languages",
"aug": "Aguna",
"auh": "Aushi",
"aui": "Anuki",
"auj": "Awjilah",
"auk": "Heyo",
"aul": "Aulua",
"aum": "Asu (Nigeria)",
"aun": "Molmo One",
"auo": "Auyokawa",
"aup": "Makayam",
"auq": "Anus; Korur",
"aur": "Aruek",
"aus": "Australian languages",
"aut": "Austral",
"auu": "Auye",
"auw": "Awyi",
"aux": "Aurá",
"auy": "Awiyaana",
"auz": "Uzbeki Arabic",
"av": "Avaric",
"avb": "Avau",
"avd": "Alviri-Vidari",
"avi": "Avikam",
"avk": "Kotava",
"avl": "Eastern Egyptian Bedawi Arabic",
"avm": "Angkamuthi",
"avn": "Avatime",
"avo": "Agavotaguerra",
"avs": "Aushiri",
"avt": "Au",
"avu": "Avokaya",
"avv": "Avá-Canoeiro",
"awa": "Awadhi",
"awb": "Awa (Papua New Guinea)",
"awc": "Cicipu",
"awd": "Arawakan languages",
"awe": "Awetí",
"awg": "Anguthimri",
"awh": "Awbono",
"awi": "Aekyom",
"awk": "Awabakal",
"awm": "Arawum",
"awn": "Awngi",
"awo": "Awak",
"awr": "Awera",
"aws": "South Awyu",
"awt": "Araweté",
"awu": "Central Awyu",
"awv": "Jair Awyu",
"aww": "Awun",
"awx": "Awara",
"awy": "Edera Awyu",
"axb": "Abipon",
"axe": "Ayerrerenge",
"axg": "Mato Grosso Arára",
"axk": "Yaka (Central African Republic)",
"axl": "Lower Southern Aranda",
"axm": "Middle Armenian",
"axx": "Xârâgurè",
"ay": "Aymara",
"aya": "Awar",
"ayb": "Ayizo Gbe",
"ayc": "Southern Aymara",
"ayd": "Ayabadhu",
"aye": "Ayere",
"ayg": "Ginyanga",
"ayh": "Hadrami Arabic",
"ayi": "Leyigha",
"ayk": "Akuku",
"ayl": "Libyan Arabic",
"ayn": "Sanaani Arabic",
"ayo": "Ayoreo",
"ayp": "North Mesopotamian Arabic",
"ayq": "Ayi (Papua New Guinea)",
"ayr": "Central Aymara",
"ays": "Sorsogon Ayta",
"ayt": "Magbukun Ayta",
"ayu": "Ayu",
"ayz": "Mai Brat",
"az": "Azerbaijani",
"aza": "Azha",
"azb": "South Azerbaijani",
"azc": "Uto-Aztecan languages",
"azd": "Eastern Durango Nahuatl",
"azg": "San Pedro Amuzgos Amuzgo",
"azj": "North Azerbaijani",
"azm": "Ipalapa Amuzgo",
"azn": "Western Durango Nahuatl",
"azo": "Awing",
"azt": "Faire Atta",
"azz": "Highland Puebla Nahuatl",
"ba": "Bashkir",
"baa": "Babatana",
"bab": "Bainouk-Gunyuño",
"bac": "Badui",
"bad": "Banda languages",
"bae": "Baré",
"baf": "Nubaca",
"bag": "Tuki",
"bah": "Bahamas Creole English",
"bai": "Bamileke languages",
"baj": "Barakai",
"bal": "Baluchi",
"ban": "Balinese",
"bao": "Waimaha",
"bap": "Bantawa",
"bar": "Bavarian",
"bas": "Basa (Cameroon)",
"bat": "Baltic languages",
"bau": "Bada (Nigeria)",
"bav": "Vengo",
"baw": "Bambili-Bambui",
"bax": "Bamun",
"bay": "Batuley",
"bba": "Baatonum",
"bbb": "Barai",
"bbc": "Batak Toba",
"bbd": "Bau",
"bbe": "Bangba",
"bbf": "Baibai",
"bbg": "Barama",
"bbh": "Bugan",
"bbi": "Barombi",
"bbj": "Ghomálá'",
"bbk": "Babanki",
"bbl": "Bats",
"bbm": "Babango",
"bbn": "Uneapa",
"bbo": "Northern Bobo Madaré; Konabéré",
"bbp": "West Central Banda",
"bbq": "Bamali",
"bbr": "Girawa",
"bbs": "Bakpinka",
"bbt": "Mburku",
"bbu": "Kulung (Nigeria)",
"bbv": "Karnai",
"bbw": "Baba",
"bbx": "Bubia",
"bby": "Befang",
"bca": "Central Bai",
"bcb": "Bainouk-Samik",
"bcc": "Southern Balochi",
"bcd": "North Babar",
"bce": "Bamenyam",
"bcf": "Bamu",
"bcg": "Baga Pokur",
"bch": "Bariai",
"bci": "Baoulé",
"bcj": "Bardi",
"bck": "Bunuba",
"bcl": "Central Bikol",
"bcm": "Bannoni",
"bcn": "Bali (Nigeria)",
"bco": "Kaluli",
"bcp": "Bali (Democratic Republic of Congo)",
"bcq": "Bench",
"bcr": "Babine",
"bcs": "Kohumono",
"bct": "Bendi",
"bcu": "Awad Bing",
"bcv": "Shoo-Minda-Nye",
"bcw": "Bana",
"bcy": "Bacama",
"bcz": "Bainouk-Gunyaamolo",
"bda": "Bayot",
"bdb": "Basap",
"bdc": "Emberá-Baudó",
"bdd": "Bunama",
"bde": "Bade",
"bdf": "Biage",
"bdg": "Bonggi",
"bdh": "Baka (South Sudan)",
"bdi": "Burun",
"bdj": "Bai (South Sudan); Bai",
"bdk": "Budukh",
"bdl": "Indonesian Bajau",
"bdm": "Buduma",
"bdn": "Baldemu",
"bdo": "Morom",
"bdp": "Bende",
"bdq": "Bahnar",
"bdr": "West Coast Bajau",
"bds": "Burunge",
"bdt": "Bokoto",
"bdu": "Oroko",
"bdv": "Bodo Parja",
"bdw": "Baham",
"bdx": "Budong-Budong",
"bdy": "Bandjalang",
"bdz": "Badeshi",
"be": "Belarusian",
"bea": "Beaver",
"beb": "Bebele",
"bec": "Iceve-Maci",
"bed": "Bedoanas",
"bee": "Byangsi",
"bef": "Benabena",
"beg": "Belait",
"beh": "Biali",
"bei": "Bekati'",
"bej": "Beja; Bedawiyet",
"bek": "Bebeli",
"bem": "Bemba (Zambia)",
"beo": "Beami",
"bep": "Besoa",
"beq": "Beembe",
"ber": "Berber languages",
"bes": "Besme",
"bet": "Guiberoua Béte",
"beu": "Blagar",
"bev": "Daloa Bété",
"bew": "Betawi",
"bex": "Jur Modo",
"bey": "Beli (Papua New Guinea)",
"bez": "Bena (Tanzania)",
"bfa": "Bari",
"bfb": "Pauri Bareli",
"bfc": "Panyi Bai; Northern Bai",
"bfd": "Bafut",
"bfe": "Betaf; Tena",
"bff": "Bofi",
"bfg": "Busang Kayan",
"bfh": "Blafe",
"bfi": "British Sign Language",
"bfj": "Bafanji",
"bfk": "Ban Khor Sign Language",
"bfl": "Banda-Ndélé",
"bfm": "Mmen",
"bfn": "Bunak",
"bfo": "Malba Birifor",
"bfp": "Beba",
"bfq": "Badaga",
"bfr": "Bazigar",
"bfs": "Southern Bai",
"bft": "Balti",
"bfu": "Gahri",
"bfw": "Bondo",
"bfx": "Bantayanon",
"bfy": "Bagheli",
"bfz": "Mahasu Pahari",
"bg": "Bulgarian",
"bga": "Gwamhi-Wuri",
"bgb": "Bobongko",
"bgc": "Haryanvi",
"bgd": "Rathwi Bareli",
"bge": "Bauria",
"bgf": "Bangandu",
"bgg": "Bugun",
"bgi": "Giangan",
"bgj": "Bangolan",
"bgk": "Bit; Buxinhua",
"bgl": "Bo (Laos)",
"bgn": "Western Balochi",
"bgo": "Baga Koga",
"bgp": "Eastern Balochi",
"bgq": "Bagri",
"bgr": "Bawm Chin",
"bgs": "Tagabawa",
"bgt": "Bughotu",
"bgu": "Mbongno",
"bgv": "Warkay-Bipim",
"bgw": "Bhatri",
"bgx": "Balkan Gagauz Turkish",
"bgy": "Benggoi",
"bgz": "Banggai",
"bh": "Bihari languages",
"bha": "Bharia",
"bhb": "Bhili",
"bhc": "Biga",
"bhd": "Bhadrawahi",
"bhe": "Bhaya",
"bhf": "Odiai",
"bhg": "Binandere",
"bhh": "Bukharic",
"bhi": "Bhilali",
"bhj": "Bahing",
"bhl": "Bimin",
"bhm": "Bathari",
"bhn": "Bohtan Neo-Aramaic",
"bho": "Bhojpuri",
"bhp": "Bima",
"bhq": "Tukang Besi South",
"bhr": "Bara Malagasy",
"bhs": "Buwal",
"bht": "Bhattiyali",
"bhu": "Bhunjia",
"bhv": "Bahau",
"bhw": "Biak",
"bhx": "Bhalay",
"bhy": "Bhele",
"bhz": "Bada (Indonesia)",
"bi": "Bislama",
"bia": "Badimaya",
"bib": "Bissa; Bisa",
"bid": "Bidiyo",
"bie": "Bepour",
"bif": "Biafada",
"big": "Biangai",
"bik": "Bikol",
"bil": "Bile",
"bim": "Bimoba",
"bin": "Bini; Edo",
"bio": "Nai",
"bip": "Bila",
"biq": "Bipi",
"bir": "Bisorio",
"bit": "Berinomo",
"biu": "Biete",
"biv": "Southern Birifor",
"biw": "Kol (Cameroon)",
"bix": "Bijori",
"biy": "Birhor",
"biz": "Baloi",
"bja": "Budza",
"bjb": "Banggarla",
"bjc": "Bariji",
"bje": "Biao-Jiao Mien",
"bjf": "Barzani Jewish Neo-Aramaic",
"bjg": "Bidyogo",
"bjh": "Bahinemo",
"bji": "Burji",
"bjj": "Kanauji",
"bjk": "Barok",
"bjl": "Bulu (Papua New Guinea)",
"bjm": "Bajelani",
"bjn": "Banjar",
"bjo": "Mid-Southern Banda",
"bjp": "Fanamaket",
"bjr": "Binumarien",
"bjs": "Bajan",
"bjt": "Balanta-Ganja",
"bju": "Busuu",
"bjv": "Bedjond",
"bjw": "Bakwé",
"bjx": "Banao Itneg",
"bjy": "Bayali",
"bjz": "Baruga",
"bka": "Kyak",
"bkc": "Baka (Cameroon)",
"bkd": "Binukid; Talaandig",
"bkf": "Beeke",
"bkg": "Buraka",
"bkh": "Bakoko",
"bki": "Baki",
"bkj": "Pande",
"bkk": "Brokskat",
"bkl": "Berik",
"bkm": "Kom (Cameroon)",
"bkn": "Bukitan",
"bko": "Kwa'",
"bkp": "Boko (Democratic Republic of Congo)",
"bkq": "Bakairí",
"bkr": "Bakumpai",
"bks": "Northern Sorsoganon",
"bkt": "Boloki",
"bku": "Buhid",
"bkv": "Bekwarra",
"bkw": "Bekwel",
"bkx": "Baikeno",
"bky": "Bokyi",
"bkz": "Bungku",
"bla": "Siksika",
"blb": "Bilua",
"blc": "Bella Coola",
"bld": "Bolango",
"ble": "Balanta-Kentohe",
"blf": "Buol",
"blh": "Kuwaa",
"bli": "Bolia",
"blj": "Bolongan",
"blk": "Pa'o Karen; Pa'O",
"bll": "Biloxi",
"blm": "Beli (South Sudan)",
"bln": "Southern Catanduanes Bikol",
"blo": "Anii",
"blp": "Blablanga",
"blq": "Baluan-Pam",
"blr": "Blang",
"bls": "Balaesang",
"blt": "Tai Dam",
"blv": "Kibala; Bolo",
"blw": "Balangao",
"blx": "Mag-Indi Ayta",
"bly": "Notre",
"blz": "Balantak",
"bm": "Bambara",
"bma": "Lame",
"bmb": "Bembe",
"bmc": "Biem",
"bmd": "Baga Manduri",
"bme": "Limassa",
"bmf": "Bom-Kim",
"bmg": "Bamwe",
"bmh": "Kein",
"bmi": "Bagirmi",
"bmj": "Bote-Majhi",
"bmk": "Ghayavi",
"bml": "Bomboli",
"bmm": "Northern Betsimisaraka Malagasy",
"bmn": "Bina (Papua New Guinea)",
"bmo": "Bambalang",
"bmp": "Bulgebi",
"bmq": "Bomu",
"bmr": "Muinane",
"bms": "Bilma Kanuri",
"bmt": "Biao Mon",
"bmu": "Somba-Siawari",
"bmv": "Bum",
"bmw": "Bomwali",
"bmx": "Baimak",
"bmz": "Baramu",
"bn": "Bengali; Bangla",
"bna": "Bonerate",
"bnb": "Bookan",
"bnc": "Bontok",
"bnd": "Banda (Indonesia)",
"bne": "Bintauna",
"bnf": "Masiwang",
"bng": "Benga",
"bni": "Bangi",
"bnj": "Eastern Tawbuid",
"bnk": "Bierebo",
"bnl": "Boon",
"bnm": "Batanga",
"bnn": "Bunun",
"bno": "Bantoanon",
"bnp": "Bola",
"bnq": "Bantik",
"bnr": "Butmas-Tur",
"bns": "Bundeli",
"bnt": "Bantu languages",
"bnu": "Bentong",
"bnv": "Bonerif; Beneraf; Edwas",
"bnw": "Bisis",
"bnx": "Bangubangu",
"bny": "Bintulu",
"bnz": "Beezen",
"bo": "Tibetan",
"boa": "Bora",
"bob": "Aweer",
"boe": "Mundabli",
"bof": "Bolon",
"bog": "Bamako Sign Language",
"boh": "Boma",
"boi": "Barbareño",
"boj": "Anjam",
"bok": "Bonjo",
"bol": "Bole",
"bom": "Berom",
"bon": "Bine",
"boo": "Tiemacèwè Bozo",
"bop": "Bonkiman",
"boq": "Bogaya",
"bor": "Borôro",
"bot": "Bongo",
"bou": "Bondei",
"bov": "Tuwuli",
"bow": "Rema",
"box": "Buamu",
"boy": "Bodo (Central African Republic)",
"boz": "Tiéyaxo Bozo",
"bpa": "Daakaka",
"bpc": "Mbuk",
"bpd": "Banda-Banda",
"bpe": "Bauni",
"bpg": "Bonggo",
"bph": "Botlikh",
"bpi": "Bagupi",
"bpj": "Binji",
"bpk": "Orowe; 'Ôrôê",
"bpl": "Broome Pearling Lugger Pidgin",
"bpm": "Biyom",
"bpn": "Dzao Min",
"bpo": "Anasi",
"bpp": "Kaure",
"bpq": "Banda Malay",
"bpr": "Koronadal Blaan",
"bps": "Sarangani Blaan",
"bpt": "Barrow Point",
"bpu": "Bongu",
"bpv": "Bian Marind",
"bpw": "Bo (Papua New Guinea)",
"bpx": "Palya Bareli",
"bpy": "Bishnupriya",
"bpz": "Bilba",
"bqa": "Tchumbuli",
"bqb": "Bagusa",
"bqc": "Boko (Benin); Boo",
"bqd": "Bung",
"bqf": "Baga Kaloum",
"bqg": "Bago-Kusuntu",
"bqh": "Baima",
"bqi": "Bakhtiari",
"bqj": "Bandial",
"bqk": "Banda-Mbrès",
"bql": "Bilakura",
"bqm": "Wumboko",
"bqn": "Bulgarian Sign Language",
"bqo": "Balo",
"bqp": "Busa",
"bqq": "Biritai",
"bqr": "Burusu",
"bqs": "Bosngun",
"bqt": "Bamukumbit",
"bqu": "Boguru",
"bqv": "Koro Wachi; Begbere-Ejar",
"bqw": "Buru (Nigeria)",
"bqx": "Baangi",
"bqy": "Bengkala Sign Language",
"bqz": "Bakaka",
"br": "Breton",
"bra": "Braj",
"brb": "Brao; Lave",
"brc": "Berbice Creole Dutch",
"brd": "Baraamu",
"brf": "Bira",
"brg": "Baure",
"brh": "Brahui",
"bri": "Mokpwe",
"brj": "Bieria",
"brk": "Birked",
"brl": "Birwa",
"brm": "Barambu",
"brn": "Boruca",
"bro": "Brokkat",
"brp": "Barapasi",
"brq": "Breri",
"brr": "Birao",
"brs": "Baras",
"brt": "Bitare",
"bru": "Eastern Bru",
"brv": "Western Bru",
"brw": "Bellari",
"brx": "Bodo (India)",
"bry": "Burui",
"brz": "Bilbil",
"bs": "Bosnian",
"bsa": "Abinomn",
"bsb": "Brunei Bisaya",
"bsc": "Bassari; Oniyan",
"bse": "Wushi",
"bsf": "Bauchi",
"bsg": "Bashkardi",
"bsh": "Kati",
"bsi": "Bassossi",
"bsj": "Bangwinji",
"bsk": "Burushaski",
"bsl": "Basa-Gumna",
"bsm": "Busami",
"bsn": "Barasana-Eduria",
"bso": "Buso",
"bsp": "Baga Sitemu",
"bsq": "Bassa",
"bsr": "Bassa-Kontagora",
"bss": "Akoose",
"bst": "Basketo",
"bsu": "Bahonsuai",
"bsv": "Baga Sobané",
"bsw": "Baiso",
"bsx": "Yangkam",
"bsy": "Sabah Bisaya",
"bta": "Bata",
"btc": "Bati (Cameroon)",
"btd": "Batak Dairi",
"bte": "Gamo-Ningi",
"btf": "Birgit",
"btg": "Gagnoa Bété",
"bth": "Biatah Bidayuh",
"bti": "Burate",
"btj": "Bacanese Malay",
"btk": "Batak languages",
"btm": "Batak Mandailing",
"btn": "Ratagnon",
"bto": "Rinconada Bikol",
"btp": "Budibud",
"btq": "Batek",
"btr": "Baetora",
"bts": "Batak Simalungun",
"btt": "Bete-Bendi",
"btu": "Batu",
"btv": "Bateri",
"btw": "Butuanon",
"btx": "Batak Karo",
"bty": "Bobot",
"btz": "Batak Alas-Kluet",
"bua": "Buriat",
"bub": "Bua",
"buc": "Bushi",
"bud": "Ntcham",
"bue": "Beothuk",
"buf": "Bushoong",
"bug": "Buginese",
"buh": "Younuo Bunu",
"bui": "Bongili",
"buj": "Basa-Gurmana",
"buk": "Bugawac",
"bum": "Bulu (Cameroon)",
"bun": "Sherbro",
"buo": "Terei",
"bup": "Busoa",
"buq": "Brem",
"bus": "Bokobaru",
"but": "Bungain",
"buu": "Budu",
"buv": "Bun",
"buw": "Bubi",
"bux": "Boghom",
"buy": "Bullom So",
"buz": "Bukwen",
"bva": "Barein",
"bvb": "Bube",
"bvc": "Baelelea",
"bvd": "Baeggu",
"bve": "Berau Malay",
"bvf": "Boor",
"bvg": "Bonkeng",
"bvh": "Bure",
"bvi": "Belanda Viri",
"bvj": "Baan",
"bvk": "Bukat",
"bvl": "Bolivian Sign Language",
"bvm": "Bamunka",
"bvn": "Buna",
"bvo": "Bolgo",
"bvp": "Bumang",
"bvq": "Birri",
"bvr": "Burarra",
"bvt": "Bati (Indonesia)",
"bvu": "Bukit Malay",
"bvv": "Baniva",
"bvw": "Boga",
"bvx": "Dibole",
"bvy": "Baybayanon",
"bvz": "Bauzi",
"bwa": "Bwatoo",
"bwb": "Namosi-Naitasiri-Serua",
"bwc": "Bwile",
"bwd": "Bwaidoka",
"bwe": "Bwe Karen",
"bwf": "Boselewa",
"bwg": "Barwe",
"bwh": "Bishuo",
"bwi": "Baniwa",
"bwj": "Láá Láá Bwamu",
"bwk": "Bauwaki",
"bwl": "Bwela",
"bwm": "Biwat",
"bwn": "Wunai Bunu",
"bwo": "Boro (Ethiopia); Borna (Ethiopia)",
"bwp": "Mandobo Bawah",
"bwq": "Southern Bobo Madaré",
"bwr": "Bura-Pabir",
"bws": "Bomboma",
"bwt": "Bafaw-Balong",
"bwu": "Buli (Ghana)",
"bww": "Bwa",
"bwx": "Bu-Nao Bunu",
"bwy": "Cwi Bwamu",
"bwz": "Bwisi",
"bxa": "Tairaha",
"bxb": "Belanda Bor",
"bxc": "Molengue",
"bxd": "Pela",
"bxe": "Birale",
"bxf": "Bilur; Minigir",
"bxg": "Bangala",
"bxh": "Buhutu",
"bxi": "Pirlatapa",
"bxj": "Bayungu",
"bxk": "Bukusu; Lubukusu",
"bxl": "Jalkunan",
"bxm": "Mongolia Buriat",
"bxn": "Burduna",
"bxo": "Barikanchi",
"bxp": "Bebil",
"bxq": "Beele",
"bxr": "Russia Buriat",
"bxs": "Busam",
"bxu": "China Buriat",
"bxv": "Berakou",
"bxw": "Bankagooma",
"bxz": "Binahari",
"bya": "Batak",
"byb": "Bikya",
"byc": "Ubaghara",
"byd": "Benyadu'",
"bye": "Pouye",
"byf": "Bete",
"byg": "Baygo",
"byh": "Bhujel",
"byi": "Buyu",
"byj": "Bina (Nigeria)",
"byk": "Biao",
"byl": "Bayono",
"bym": "Bidjara",
"byn": "Bilin; Blin",
"byo": "Biyo",
"byp": "Bumaji",
"byq": "Basay",
"byr": "Baruya; Yipma",
"bys": "Burak",
"byt": "Berti",
"byv": "Medumba",
"byw": "Belhariya",
"byx": "Qaqet",
"byz": "Banaro",
"bza": "Bandi",
"bzb": "Andio",
"bzc": "Southern Betsimisaraka Malagasy",
"bzd": "Bribri",
"bze": "Jenaama Bozo",
"bzf": "Boikin",
"bzg": "Babuza",
"bzh": "Mapos Buang",
"bzi": "Bisu",
"bzj": "Belize Kriol English",
"bzk": "Nicaragua Creole English",
"bzl": "Boano (Sulawesi)",
"bzm": "Bolondo",
"bzn": "Boano (Maluku)",
"bzo": "Bozaba",
"bzp": "Kemberano",
"bzq": "Buli (Indonesia)",
"bzr": "Biri",
"bzs": "Brazilian Sign Language",
"bzt": "Brithenig",
"bzu": "Burmeso",
"bzv": "Naami",
"bzw": "Basa (Nigeria)",
"bzx": "Kɛlɛngaxo Bozo",
"bzy": "Obanliku",
"bzz": "Evant",
"ca": "Catalan; Valencian",
"caa": "Chortí",
"cab": "Garifuna",
"cac": "Chuj",
"cad": "Caddo",
"cae": "Lehar; Laalaa",
"caf": "Southern Carrier",
"cag": "Nivaclé",
"cah": "Cahuarano",
"cai": "Central American Indian languages",
"caj": "Chané",
"cak": "Kaqchikel; Cakchiquel",
"cal": "Carolinian",
"cam": "Cemuhî",
"can": "Chambri",
"cao": "Chácobo",
"cap": "Chipaya",
"caq": "Car Nicobarese",
"car": "Galibi Carib",
"cas": "Tsimané",
"cau": "Caucasian languages",
"cav": "Cavineña",
"caw": "Callawalla",
"cax": "Chiquitano",
"cay": "Cayuga",
"caz": "Canichana",
"cba": "Chibchan languages",
"cbb": "Cabiyarí",
"cbc": "Carapana",
"cbd": "Carijona",
"cbg": "Chimila",
"cbi": "Chachi",
"cbj": "Ede Cabe",
"cbk": "Chavacano",
"cbl": "Bualkhaw Chin",
"cbn": "Nyahkur",
"cbo": "Izora",
"cbq": "Tsucuba; Cuba",
"cbr": "Cashibo-Cacataibo",
"cbs": "Cashinahua",
"cbt": "Chayahuita",
"cbu": "Candoshi-Shapra",
"cbv": "Cacua",
"cbw": "Kinabalian",
"cby": "Carabayo",
"ccc": "Chamicuro",
"ccd": "Cafundo Creole",
"cce": "Chopi",
"ccg": "Samba Daka",
"cch": "Atsam",
"ccj": "Kasanga",
"ccl": "Cutchi-Swahili",
"ccm": "Malaccan Creole Malay",
"ccn": "North Caucasian languages",
"cco": "Comaltepec Chinantec",
"ccp": "Chakma",
"ccr": "Cacaopera",
"ccs": "South Caucasian languages",
"cda": "Choni",
"cdc": "Chadic languages",
"cdd": "Caddoan languages",
"cde": "Chenchu",
"cdf": "Chiru",
"cdh": "Chambeali",
"cdi": "Chodri",
"cdj": "Churahi",
"cdm": "Chepang",
"cdn": "Chaudangsi",
"cdo": "Min Dong Chinese",
"cdr": "Cinda-Regi-Tiyal",
"cds": "Chadian Sign Language",
"cdy": "Chadong",
"cdz": "Koda",
"ce": "Chechen",
"cea": "Lower Chehalis",
"ceb": "Cebuano",
"ceg": "Chamacoco",
"cek": "Eastern Khumi Chin",
"cel": "Celtic languages",
"cen": "Cen",
"cet": "Centúúm",
"cey": "Ekai Chin",
"cfa": "Dijim-Bwilim",
"cfd": "Cara",
"cfg": "Como Karim",
"cfm": "Falam Chin",
"cga": "Changriwa",
"cgc": "Kagayanen",
"cgg": "Chiga",
"cgk": "Chocangacakha",
"ch": "Chamorro",
"chb": "Chibcha",
"chc": "Catawba",
"chd": "Highland Oaxaca Chontal",
"chf": "Tabasco Chontal",
"chg": "Chagatai",
"chh": "Chinook",
"chj": "Ojitlán Chinantec",
"chk": "Chuukese",
"chl": "Cahuilla",
"chm": "Mari (Russia)",
"chn": "Chinook jargon",
"cho": "Choctaw",
"chp": "Chipewyan; Dene Suline",
"chq": "Quiotepec Chinantec",
"chr": "Cherokee",
"cht": "Cholón",
"chw": "Chuwabu",
"chx": "Chantyal",
"chy": "Cheyenne",
"chz": "Ozumacín Chinantec",
"cia": "Cia-Cia",
"cib": "Ci Gbe",
"cic": "Chickasaw",
"cid": "Chimariko",
"cie": "Cineni",
"cih": "Chinali",
"cik": "Chitkuli Kinnauri",
"cim": "Cimbrian",
"cin": "Cinta Larga",
"cip": "Chiapanec",
"cir": "Tiri; Haméa; Méa",
"ciw": "Chippewa",
"ciy": "Chaima",
"cja": "Western Cham",
"cje": "Chru",
"cjh": "Upper Chehalis",
"cji": "Chamalal",
"cjk": "Chokwe",
"cjm": "Eastern Cham",
"cjn": "Chenapian",
"cjo": "Ashéninka Pajonal",
"cjp": "Cabécar",
"cjs": "Shor",
"cjv": "Chuave",
"cjy": "Jinyu Chinese",
"ckb": "Central Kurdish",
"ckh": "Chak",
"ckl": "Cibak",
"ckm": "Chakavian",
"ckn": "Kaang Chin",
"cko": "Anufo",
"ckq": "Kajakse",
"ckr": "Kairak",
"cks": "Tayo",
"ckt": "Chukot",
"cku": "Koasati",
"ckv": "Kavalan",
"ckx": "Caka",
"cky": "Cakfem-Mushere",
"ckz": "Cakchiquel-Quiché Mixed Language",
"cla": "Ron",
"clc": "Chilcotin",
"cld": "Chaldean Neo-Aramaic",
"cle": "Lealao Chinantec",
"clh": "Chilisso",
"cli": "Chakali",
"clj": "Laitu Chin",
"clk": "Idu-Mishmi",
"cll": "Chala",
"clm": "Clallam",
"clo": "Lowland Oaxaca Chontal",
"clt": "Lautu Chin",
"clu": "Caluyanun",
"clw": "Chulym",
"cly": "Eastern Highland Chatino",
"cma": "Maa",
"cmc": "Chamic languages",
"cme": "Cerma",
"cmg": "Classical Mongolian",
"cmi": "Emberá-Chamí",
"cml": "Campalagian",
"cmm": "Michigamea",
"cmn": "Mandarin Chinese",
"cmo": "Central Mnong",
"cmr": "Mro-Khimi Chin",
"cms": "Messapic",
"cmt": "Camtho",
"cna": "Changthang",
"cnb": "Chinbon Chin",
"cnc": "Côông",
"cng": "Northern Qiang",
"cnh": "Hakha Chin; Haka Chin",
"cni": "Asháninka",
"cnk": "Khumi Chin",
"cnl": "Lalana Chinantec",
"cno": "Con",
"cnp": "Northern Ping Chinese; Northern Pinghua",
"cnq": "Chung",
"cnr": "Montenegrin",
"cns": "Central Asmat",
"cnt": "Tepetotutla Chinantec",
"cnu": "Chenoua",
"cnw": "Ngawn Chin",
"cnx": "Middle Cornish",
"co": "Corsican",
"coa": "Cocos Islands Malay",
"cob": "Chicomuceltec",
"coc": "Cocopa",
"cod": "Cocama-Cocamilla",
"coe": "Koreguaje",
"cof": "Colorado",
"cog": "Chong",
"coh": "Chonyi-Dzihana-Kauma; Chichonyi-Chidzihana-Chikauma",
"coj": "Cochimi",
"cok": "Santa Teresa Cora",
"col": "Columbia-Wenatchi",
"com": "Comanche",
"con": "Cofán",
"coo": "Comox",
"cop": "Coptic",
"coq": "Coquille",
"cot": "Caquinte",
"cou": "Wamey",
"cov": "Cao Miao",
"cow": "Cowlitz",
"cox": "Nanti",
"coz": "Chochotec",
"cpa": "Palantla Chinantec",
"cpb": "Ucayali-Yurúa Ashéninka",
"cpc": "Ajyíninka Apurucayali",
"cpe": "English-based creoles and pidgins",
"cpf": "French-based creoles and pidgins",
"cpg": "Cappadocian Greek",
"cpi": "Chinese Pidgin English",
"cpn": "Cherepon",
"cpo": "Kpeego",
"cpp": "Portuguese-based creoles and pidgins",
"cps": "Capiznon",
"cpu": "Pichis Ashéninka",
"cpx": "Pu-Xian Chinese",
"cpy": "South Ucayali Ashéninka",
"cqd": "Chuanqiandian Cluster Miao",
"cr": "Cree",
"cra": "Chara",
"crb": "Island Carib",
"crc": "Lonwolwol",
"crd": "Coeur d'Alene",
"crf": "Caramanta",
"crg": "Michif",
"crh": "Crimean Tatar; Crimean Turkish",
"cri": "Sãotomense",
"crj": "Southern East Cree",
"crk": "Plains Cree",
"crl": "Northern East Cree",
"crm": "Moose Cree",
"crn": "El Nayar Cora",
"cro": "Crow",
"crp": "Creoles and pidgins",
"crq": "Iyo'wujwa Chorote",
"crr": "Carolina Algonquian",
"crs": "Seselwa Creole French",
"crt": "Iyojwa'ja Chorote",
"crv": "Chaura",
"crw": "Chrau",
"crx": "Carrier",
"cry": "Cori",
"crz": "Cruzeño",
"cs": "Czech",
"csa": "Chiltepec Chinantec",
"csb": "Kashubian",
"csc": "Catalan Sign Language; Lengua de señas catalana; Llengua de Signes Catalana",
"csd": "Chiangmai Sign Language",
"cse": "Czech Sign Language",
"csf": "Cuba Sign Language",
"csg": "Chilean Sign Language",
"csh": "Asho Chin",
"csi": "Coast Miwok",
"csj": "Songlai Chin",
"csk": "Jola-Kasa",
"csl": "Chinese Sign Language",
"csm": "Central Sierra Miwok",
"csn": "Colombian Sign Language",
"cso": "Sochiapam Chinantec; Sochiapan Chinantec",
"csp": "Southern Ping Chinese; Southern Pinghua",
"csq": "Croatia Sign Language",
"csr": "Costa Rican Sign Language",
"css": "Southern Ohlone",
"cst": "Northern Ohlone",
"csu": "Central Sudanic languages",
"csv": "Sumtu Chin",
"csw": "Swampy Cree",
"csx": "Cambodian Sign Language",
"csy": "Siyin Chin",
"csz": "Coos",
"cta": "Tataltepec Chatino",
"ctc": "Chetco",
"ctd": "Tedim Chin",
"cte": "Tepinapa Chinantec",
"ctg": "Chittagonian",
"cth": "Thaiphum Chin",
"ctl": "Tlacoatzintepec Chinantec",
"ctm": "Chitimacha",
"ctn": "Chhintange",
"cto": "Emberá-Catío",
"ctp": "Western Highland Chatino",
"cts": "Northern Catanduanes Bikol",
"ctt": "Wayanad Chetti",
"ctu": "Chol",
"cty": "Moundadan Chetty",
"ctz": "Zacatepec Chatino",
"cu": "Church Slavic; Church Slavonic; Old Bulgarian; Old Church Slavonic; Old Slavonic",
"cua": "Cua",
"cub": "Cubeo",
"cuc": "Usila Chinantec",
"cuh": "Chuka; Gichuka",
"cui": "Cuiba",
"cuj": "Mashco Piro",
"cuk": "San Blas Kuna",
"cul": "Culina; Kulina",
"cuo": "Cumanagoto",
"cup": "Cupeño",
"cuq": "Cun",
"cur": "Chhulung",
"cus": "Cushitic languages",
"cut": "Teutila Cuicatec",
"cuu": "Tai Ya",
"cuv": "Cuvok",
"cuw": "Chukwa",
"cux": "Tepeuxila Cuicatec",
"cuy": "Cuitlatec",
"cv": "Chuvash",
"cvg": "Chug",
"cvn": "Valle Nacional Chinantec",
"cwa": "Kabwa",
"cwb": "Maindo",
"cwd": "Woods Cree",
"cwe": "Kwere",
"cwg": "Chewong; Cheq Wong",
"cwt": "Kuwaataay",
"cy": "Welsh",
"cya": "Nopala Chatino",
"cyb": "Cayubaba",
"cyo": "Cuyonon",
"czh": "Huizhou Chinese",
"czk": "Knaanic",
"czn": "Zenzontepec Chatino",
"czo": "Min Zhong Chinese",
"czt": "Zotung Chin",
"da": "Danish",
"daa": "Dangaléat",
"dac": "Dambi",
"dad": "Marik",
"dae": "Duupa",
"dag": "Dagbani",
"dah": "Gwahatike",
"dai": "Day",
"daj": "Dar Fur Daju",
"dak": "Dakota",
"dal": "Dahalo",
"dam": "Damakawa",
"dao": "Daai Chin",
"daq": "Dandami Maria",
"dar": "Dargwa",
"das": "Daho-Doo",
"dau": "Dar Sila Daju",
"dav": "Taita; Dawida",
"daw": "Davawenyo",
"dax": "Dayi",
"day": "Land Dayak languages",
"daz": "Dao",
"dba": "Bangime",
"dbb": "Deno",
"dbd": "Dadiya",
"dbe": "Dabe",
"dbf": "Edopi",
"dbg": "Dogul Dom Dogon",
"dbi": "Doka",
"dbj": "Ida'an",
"dbl": "Dyirbal",
"dbm": "Duguri",
"dbn": "Duriankere",
"dbo": "Dulbu",
"dbp": "Duwai",
"dbq": "Daba",
"dbr": "Dabarre",
"dbt": "Ben Tey Dogon",
"dbu": "Bondum Dom Dogon",
"dbv": "Dungu",
"dbw": "Bankan Tey Dogon",
"dby": "Dibiyaso",
"dcc": "Deccan",
"dcr": "Negerhollands",
"dda": "Dadi Dadi",
"ddd": "Dongotono",
"dde": "Doondo",
"ddg": "Fataluku",
"ddi": "West Goodenough",
"ddj": "Jaru",
"ddn": "Dendi (Benin)",
"ddo": "Dido",
"ddr": "Dhudhuroa",
"dds": "Donno So Dogon",
"ddw": "Dawera-Daweloor",
"de": "German",
"dec": "Dagik",
"ded": "Dedua",
"dee": "Dewoin",
"def": "Dezfuli",
"deg": "Degema",
"deh": "Dehwari",
"dei": "Demisa",
"dek": "Dek",
"del": "Delaware",
"dem": "Dem",
"den": "Slave (Athapascan)",
"dep": "Pidgin Delaware",
"deq": "Dendi (Central African Republic)",
"der": "Deori",
"des": "Desano",
"dev": "Domung",
"dez": "Dengese",
"dga": "Southern Dagaare",
"dgb": "Bunoge Dogon",
"dgc": "Casiguran Dumagat Agta",
"dgd": "Dagaari Dioula",
"dge": "Degenan",
"dgg": "Doga",
"dgh": "Dghwede",
"dgi": "Northern Dagara",
"dgk": "Dagba",
"dgl": "Andaandi; Dongolawi",
"dgn": "Dagoman",
"dgo": "Dogri (individual language)",
"dgr": "Dogrib; Tłı̨chǫ",
"dgs": "Dogoso",
"dgt": "Ndra'ngith",
"dgw": "Daungwurrung",
"dgx": "Doghoro",
"dgz": "Daga",
"dhd": "Dhundari",
"dhg": "Dhangu-Djangu; Dhangu; Djangu",
"dhi": "Dhimal",
"dhl": "Dhalandji",
"dhm": "Zemba",
"dhn": "Dhanki",
"dho": "Dhodia",
"dhr": "Dhargari",
"dhs": "Dhaiso",
"dhu": "Dhurga",
"dhv": "Dehu; Drehu",
"dhw": "Dhanwar (Nepal)",
"dhx": "Dhungaloo",
"dia": "Dia",
"dib": "South Central Dinka",
"dic": "Lakota Dida",
"did": "Didinga",
"dif": "Dieri; Diyari",
"dig": "Digo; Chidigo",
"dih": "Kumiai",
"dii": "Dimbong",
"dij": "Dai",
"dik": "Southwestern Dinka",
"dil": "Dilling",
"dim": "Dime",
"din": "Dinka",
"dio": "Dibo",
"dip": "Northeastern Dinka",
"diq": "Dimli (individual language)",
"dir": "Dirim",
"dis": "Dimasa",
"diu": "Diriku",
"diw": "Northwestern Dinka",
"dix": "Dixon Reef",
"diy": "Diuwe",
"diz": "Ding",
"dja": "Djadjawurrung",
"djb": "Djinba",
"djc": "Dar Daju Daju",
"djd": "Djamindjung; Ngaliwurru",
"dje": "Zarma",
"djf": "Djangun",
"dji": "Djinang",
"djj": "Djeebbana",
"djk": "Eastern Maroon Creole; Businenge Tongo; Nenge",
"djm": "Jamsay Dogon",
"djn": "Jawoyn; Djauan",
"djo": "Jangkang",
"djr": "Djambarrpuyngu",
"dju": "Kapriman",
"djw": "Djawi",
"dka": "Dakpakha",
"dkg": "Kadung",
"dkk": "Dakka",
"dkr": "Kuijau",
"dks": "Southeastern Dinka",
"dkx": "Mazagway",
"dlg": "Dolgan",
"dlk": "Dahalik",
"dlm": "Dalmatian",
"dln": "Darlong",
"dma": "Duma",
"dmb": "Mombo Dogon",
"dmc": "Gavak",
"dmd": "Madhi Madhi",
"dme": "Dugwor",
"dmf": "Medefaidrin",
"dmg": "Upper Kinabatangan",
"dmk": "Domaaki",
"dml": "Dameli",
"dmm": "Dama",
"dmn": "Mande languages",
"dmo": "Kemedzung",
"dmr": "East Damar",
"dms": "Dampelas",
"dmu": "Dubu; Tebi",
"dmv": "Dumpas",
"dmw": "Mudburra",
"dmx": "Dema",
"dmy": "Demta; Sowari",
"dna": "Upper Grand Valley Dani",
"dnd": "Daonda",
"dne": "Ndendeule",
"dng": "Dungan",
"dni": "Lower Grand Valley Dani",
"dnj": "Dan",
"dnk": "Dengka",
"dnn": "Dzùùngoo",
"dno": "Ndrulo; Northern Lendu",
"dnr": "Danaru",
"dnt": "Mid Grand Valley Dani",
"dnu": "Danau",
"dnv": "Danu",
"dnw": "Western Dani",
"dny": "Dení",
"doa": "Dom",
"dob": "Dobu",
"doc": "Northern Dong",
"doe": "Doe",
"dof": "Domu",
"doh": "Dong",
"doi": "Dogri (macrolanguage)",
"dok": "Dondo",
"dol": "Doso",
"don": "Toura (Papua New Guinea)",
"doo": "Dongo",
"dop": "Lukpa",
"doq": "Dominican Sign Language",
"dor": "Dori'o",
"dos": "Dogosé",
"dot": "Dass",
"dov": "Dombe",
"dow": "Doyayo",
"dox": "Bussa",
"doy": "Dompo",
"doz": "Dorze",
"dpp": "Papar",
"dra": "Dravidian languages",
"drb": "Dair",
"drc": "Minderico",
"drd": "Darmiya",
"dre": "Dolpo",
"drg": "Rungus",
"dri": "C'Lela",
"drl": "Paakantyi",
"drn": "West Damar",
"dro": "Daro-Matu Melanau",
"drq": "Dura",
"drs": "Gedeo",
"drt": "Drents",
"dru": "Rukai",
"dry": "Darai",
"dsb": "Lower Sorbian",
"dse": "Dutch Sign Language",
"dsh": "Daasanach",
"dsi": "Disa",
"dsl": "Danish Sign Language",
"dsn": "Dusner",
"dso": "Desiya",
"dsq": "Tadaksahak",
"dsz": "Mardin Sign Language",
"dta": "Daur",
"dtb": "Labuk-Kinabatangan Kadazan",
"dtd": "Ditidaht",
"dth": "Adithinngithigh",
"dti": "Ana Tinga Dogon",
"dtk": "Tene Kan Dogon",
"dtm": "Tomo Kan Dogon",
"dtn": "Daatsʼíin",
"dto": "Tommo So Dogon",
"dtp": "Kadazan Dusun; Central Dusun",
"dtr": "Lotud",
"dts": "Toro So Dogon",
"dtt": "Toro Tegu Dogon",
"dtu": "Tebul Ure Dogon",
"dty": "Dotyali",
"dua": "Duala",
"dub": "Dubli",
"duc": "Duna",
"due": "Umiray Dumaget Agta",
"duf": "Dumbea; Drubea",
"dug": "Duruma; Chiduruma",
"duh": "Dungra Bhil",
"dui": "Dumun",
"duk": "Uyajitaya",
"dul": "Alabat Island Agta",
"dum": "Middle Dutch (ca. 1050-1350)",
"dun": "Dusun Deyah",
"duo": "Dupaninan Agta",
"dup": "Duano",
"duq": "Dusun Malang",
"dur": "Dii",
"dus": "Dumi",
"duu": "Drung",
"duv": "Duvle",
"duw": "Dusun Witu",
"dux": "Duungooma",
"duy": "Dicamay Agta",
"duz": "Duli-Gey",
"dv": "Dhivehi; Divehi; Maldivian",
"dva": "Duau",
"dwa": "Diri",
"dwk": "Dawik Kui",
"dwr": "Dawro",
"dws": "Dutton World Speedwords",
"dwu": "Dhuwal",
"dww": "Dawawa",
"dwy": "Dhuwaya",
"dwz": "Dewas Rai",
"dya": "Dyan",
"dyb": "Dyaberdyaber",
"dyd": "Dyugun",
"dyg": "Villa Viciosa Agta",
"dyi": "Djimini Senoufo",
"dym": "Yanda Dom Dogon",
"dyn": "Dyangadi; Dhanggatti",
"dyo": "Jola-Fonyi",
"dyu": "Dyula",
"dyy": "Djabugay; Dyaabugay",
"dz": "Dzongkha",
"dza": "Tunzu",
"dze": "Djiwarli",
"dzg": "Dazaga",
"dzl": "Dzalakha",
"dzn": "Dzando",
"eaa": "Karenggapa",
"ebc": "Beginci",
"ebg": "Ebughu",
"ebk": "Eastern Bontok",
"ebo": "Teke-Ebo",
"ebr": "Ebrié",
"ebu": "Embu; Kiembu",
"ecr": "Eteocretan",
"ecs": "Ecuadorian Sign Language",
"ecy": "Eteocypriot",
"ee": "Ewe",
"eee": "E",
"efa": "Efai",
"efe": "Efe",
"efi": "Efik",
"ega": "Ega",
"egl": "Emilian",
"egm": "Benamanga",
"ego": "Eggon",
"egx": "Egyptian languages",
"egy": "Egyptian (Ancient)",
"ehs": "Miyakubo Sign Language",
"ehu": "Ehueun",
"eip": "Eipomek",
"eit": "Eitiep",
"eiv": "Askopan",
"eja": "Ejamat",
"eka": "Ekajuk",
"eke": "Ekit",
"ekg": "Ekari",
"eki": "Eki",
"ekk": "Standard Estonian",
"ekl": "Kol (Bangladesh); Kol",
"ekm": "Elip",
"eko": "Koti",
"ekp": "Ekpeye",
"ekr": "Yace",
"eky": "Eastern Kayah",
"el": "Modern Greek (1453-)",
"ele": "Elepi",
"elh": "El Hugeirat",
"eli": "Nding",
"elk": "Elkei",
"elm": "Eleme",
"elo": "El Molo",
"elu": "Elu",
"elx": "Elamite",
"ema": "Emai-Iuleha-Ora",
"emb": "Embaloh",
"eme": "Emerillon",
"emg": "Eastern Meohang",
"emi": "Mussau-Emira",
"emk": "Eastern Maninkakan",
"emm": "Mamulique",
"emn": "Eman",
"emp": "Northern Emberá",
"emq": "Eastern Minyag",
"ems": "Pacific Gulf Yupik",
"emu": "Eastern Muria",
"emw": "Emplawas",
"emx": "Erromintxela",
"emy": "Epigraphic Mayan",
"emz": "Mbessa",
"en": "English",
"ena": "Apali",
"enb": "Markweeta",
"enc": "En",
"end": "Ende",
"enf": "Forest Enets",
"enh": "Tundra Enets",
"enl": "Enlhet",
"enm": "Middle English (1100-1500)",
"enn": "Engenni",
"eno": "Enggano",
"enq": "Enga",
"enr": "Emumu; Emem",
"enu": "Enu",
"env": "Enwan (Edo State)",
"enw": "Enwan (Akwa Ibom State)",
"enx": "Enxet",
"eo": "Esperanto",
"eot": "Beti (Côte d'Ivoire)",
"epi": "Epie",
"era": "Eravallan",
"erg": "Sie",
"erh": "Eruwa",
"eri": "Ogea",
"erk": "South Efate",
"ero": "Horpa",
"err": "Erre",
"ers": "Ersu",
"ert": "Eritai",
"erw": "Erokwanas",
"es": "Spanish; Castilian",
"ese": "Ese Ejja",
"esg": "Aheri Gondi",
"esh": "Eshtehardi",
"esi": "North Alaskan Inupiatun",
"esk": "Northwest Alaska Inupiatun",
"esl": "Egypt Sign Language",
"esm": "Esuma",
"esn": "Salvadoran Sign Language",
"eso": "Estonian Sign Language",
"esq": "Esselen",
"ess": "Central Siberian Yupik",
"esu": "Central Yupik",
"esx": "Eskimo-Aleut languages",
"esy": "Eskayan",
"et": "Estonian",
"etb": "Etebi",
"etc": "Etchemin",
"eth": "Ethiopian Sign Language",
"etn": "Eton (Vanuatu)",
"eto": "Eton (Cameroon)",
"etr": "Edolo",
"ets": "Yekhee",
"ett": "Etruscan",
"etu": "Ejagham",
"etx": "Eten",
"etz": "Semimi",
"eu": "Basque",
"euq": "Basque (family)",
"eve": "Even",
"evh": "Uvbie",
"evn": "Evenki",
"ewo": "Ewondo",
"ext": "Extremaduran",
"eya": "Eyak",
"eyo": "Keiyo",
"eza": "Ezaa",
"eze": "Uzekwe",
"fa": "Persian",
"faa": "Fasu",
"fab": "Fa d'Ambu",
"fad": "Wagi",
"faf": "Fagani",
"fag": "Finongan",
"fah": "Baissa Fali",
"fai": "Faiwol",
"faj": "Faita",
"fak": "Fang (Cameroon)",
"fal": "South Fali",
"fam": "Fam",
"fan": "Fang (Equatorial Guinea)",
"fap": "Paloor",
"far": "Fataleka",
"fat": "Fanti",
"fau": "Fayu",
"fax": "Fala",
"fay": "Southwestern Fars",
"faz": "Northwestern Fars",
"fbl": "West Albay Bikol",
"fcs": "Quebec Sign Language",
"fer": "Feroge",
"ff": "Fulah",
"ffi": "Foia Foia",
"ffm": "Maasina Fulfulde",
"fgr": "Fongoro",
"fi": "Finnish",
"fia": "Nobiin",
"fie": "Fyer",
"fif": "Faifi",
"fil": "Filipino; Pilipino",
"fip": "Fipa",
"fir": "Firan",
"fit": "Tornedalen Finnish; Meänkieli",
"fiu": "Finno-Ugrian languages",
"fiw": "Fiwaga",
"fj": "Fijian",
"fkk": "Kirya-Konzəl",
"fkv": "Kven Finnish",
"fla": "Kalispel-Pend d'Oreille",
"flh": "Foau",
"fli": "Fali",
"fll": "North Fali",
"fln": "Flinders Island",
"flr": "Fuliiru",
"fly": "Flaaitaal; Tsotsitaal",
"fmp": "Fe'fe'",
"fmu": "Far Western Muria",
"fnb": "Fanbak",
"fng": "Fanagalo",
"fni": "Fania",
"fo": "Faroese",
"fod": "Foodo",
"foi": "Foi",
"fom": "Foma",
"fon": "Fon",
"for": "Fore",
"fos": "Siraya",
"fox": "Formosan languages",
"fpe": "Fernando Po Creole English",
"fqs": "Fas",
"fr": "French",
"frc": "Cajun French",
"frd": "Fordata",
"frk": "Frankish",
"frm": "Middle French (ca. 1400-1600)",
"fro": "Old French (842-ca. 1400)",
"frp": "Arpitan; Francoprovençal",
"frq": "Forak",
"frr": "Northern Frisian",
"frs": "Eastern Frisian",
"frt": "Fortsenal",
"fse": "Finnish Sign Language",
"fsl": "French Sign Language",
"fss": "Finland-Swedish Sign Language; finlandssvenskt teckenspråk; suomenruotsalainen viittomakieli",
"fub": "Adamawa Fulfulde",
"fuc": "Pulaar",
"fud": "East Futuna",
"fue": "Borgu Fulfulde",
"fuf": "Pular",
"fuh": "Western Niger Fulfulde",
"fui": "Bagirmi Fulfulde",
"fuj": "Ko",
"fum": "Fum",
"fun": "Fulniô",
"fuq": "Central-Eastern Niger Fulfulde",
"fur": "Friulian",
"fut": "Futuna-Aniwa",
"fuu": "Furu",
"fuv": "Nigerian Fulfulde",
"fuy": "Fuyug",
"fvr": "Fur",
"fwa": "Fwâi",
"fwe": "Fwe",
"fy": "Western Frisian",
"ga": "Irish",
"gaa": "Ga",
"gab": "Gabri",
"gac": "Mixed Great Andamanese",
"gad": "Gaddang",
"gae": "Guarequena",
"gaf": "Gende",
"gag": "Gagauz",
"gah": "Alekano",
"gai": "Borei",
"gaj": "Gadsup",
"gak": "Gamkonora",
"gal": "Galolen",
"gam": "Kandawo",
"gan": "Gan Chinese",
"gao": "Gants",
"gap": "Gal",
"gaq": "Gata'",
"gar": "Galeya",
"gas": "Adiwasi Garasia",
"gat": "Kenati",
"gau": "Mudhili Gadaba",
"gaw": "Nobonob",
"gax": "Borana-Arsi-Guji Oromo",
"gay": "Gayo",
"gaz": "West Central Oromo",
"gba": "Gbaya (Central African Republic)",
"gbb": "Kaytetye",
"gbd": "Karajarri",
"gbe": "Niksek",
"gbf": "Gaikundi",
"gbg": "Gbanziri",
"gbh": "Defi Gbe",
"gbi": "Galela",
"gbj": "Bodo Gadaba",
"gbk": "Gaddi",
"gbl": "Gamit",
"gbm": "Garhwali",
"gbn": "Mo'da",
"gbo": "Northern Grebo",
"gbp": "Gbaya-Bossangoa",
"gbq": "Gbaya-Bozoum",
"gbr": "Gbagyi",
"gbs": "Gbesi Gbe",
"gbu": "Gagadu",
"gbv": "Gbanu",
"gbw": "Gabi-Gabi",
"gbx": "Eastern Xwla Gbe",
"gby": "Gbari",
"gbz": "Zoroastrian Dari",
"gcc": "Mali",
"gcd": "Ganggalida",
"gce": "Galice",
"gcf": "Guadeloupean Creole French",
"gcl": "Grenadian Creole English",
"gcn": "Gaina",
"gcr": "Guianese Creole French",
"gct": "Colonia Tovar German",
"gd": "Scottish Gaelic; Gaelic",
"gda": "Gade Lohar",
"gdb": "Pottangi Ollar Gadaba",
"gdc": "Gugu Badhun",
"gdd": "Gedaged",
"gde": "Gude",
"gdf": "Guduf-Gava",
"gdg": "Ga'dang",
"gdh": "Gadjerawang; Gajirrabeng",
"gdi": "Gundi",
"gdj": "Gurdjar",
"gdk": "Gadang",
"gdl": "Dirasha",
"gdm": "Laal",
"gdn": "Umanakaina",
"gdo": "Ghodoberi",
"gdq": "Mehri",
"gdr": "Wipi",
"gds": "Ghandruk Sign Language",
"gdt": "Kungardutyi",
"gdu": "Gudu",
"gdx": "Godwari",
"gea": "Geruma",
"geb": "Kire",
"gec": "Gboloo Grebo",
"ged": "Gade",
"gef": "Gerai",
"geg": "Gengle",
"geh": "Hutterite German; Hutterisch",
"gei": "Gebe",
"gej": "Gen",
"gek": "Ywom",
"gel": "ut-Ma'in",
"gem": "Germanic languages",
"geq": "Geme",
"ges": "Geser-Gorom",
"gev": "Eviya",
"gew": "Gera",
"gex": "Garre",
"gey": "Enya",
"gez": "Geez",
"gfk": "Patpatar",
"gft": "Gafat",
"gga": "Gao",
"ggb": "Gbii",
"ggd": "Gugadj",
"gge": "Gurr-goni",
"ggg": "Gurgula",
"ggk": "Kungarakany",
"ggl": "Ganglau",
"ggt": "Gitua",
"ggu": "Gagu; Gban",
"ggw": "Gogodala",
"gha": "Ghadamès",
"ghc": "Hiberno-Scottish Gaelic",
"ghe": "Southern Ghale",
"ghh": "Northern Ghale",
"ghk": "Geko Karen",
"ghl": "Ghulfan",
"ghn": "Ghanongga",
"gho": "Ghomara",
"ghr": "Ghera",
"ghs": "Guhu-Samane",
"ght": "Kuke; Kutang Ghale",
"gia": "Kija",
"gib": "Gibanawa",
"gic": "Gail",
"gid": "Gidar",
"gie": "Gaɓogbo; Guébie",
"gig": "Goaria",
"gih": "Githabul",
"gii": "Girirra",
"gil": "Gilbertese",
"gim": "Gimi (Eastern Highlands)",
"gin": "Hinukh",
"gip": "Gimi (West New Britain)",
"giq": "Green Gelao",
"gir": "Red Gelao",
"gis": "North Giziga",
"git": "Gitxsan",
"giu": "Mulao",
"giw": "White Gelao",
"gix": "Gilima",
"giy": "Giyug",
"giz": "South Giziga",
"gjk": "Kachi Koli",
"gjm": "Gunditjmara",
"gjn": "Gonja",
"gjr": "Gurindji Kriol",
"gju": "Gujari",
"gka": "Guya",
"gkd": "Magɨ (Madang Province)",
"gke": "Ndai",
"gkn": "Gokana",
"gko": "Kok-Nar",
"gkp": "Guinea Kpelle",
"gku": "ǂUngkue",
"gl": "Galician",
"glb": "Belning",
"glc": "Bon Gula",
"gld": "Nanai",
"glh": "Northwest Pashai; Northwest Pashayi",
"glj": "Gula Iro",
"glk": "Gilaki",
"gll": "Garlali",
"glo": "Galambu",
"glr": "Glaro-Twabo",
"glu": "Gula (Chad)",
"glw": "Glavda",
"gly": "Gule",
"gma": "Gambera",
"gmb": "Gula'alaa",
"gmd": "Mághdì",
"gme": "East Germanic languages",
"gmg": "Magɨyi",
"gmh": "Middle High German (ca. 1050-1500)",
"gml": "Middle Low German",
"gmm": "Gbaya-Mbodomo",
"gmn": "Gimnime",
"gmq": "North Germanic languages",
"gmr": "Mirning; Mirniny",
"gmu": "Gumalu",
"gmv": "Gamo",
"gmw": "West Germanic languages",
"gmx": "Magoma",
"gmy": "Mycenaean Greek",
"gmz": "Mgbolizhia",
"gn": "Guarani",
"gna": "Kaansa",
"gnb": "Gangte",
"gnc": "Guanche",
"gnd": "Zulgo-Gemzek",
"gne": "Ganang",
"gng": "Ngangam",
"gnh": "Lere",
"gni": "Gooniyandi",
"gnj": "Ngen",
"gnk": "ǁGana",
"gnl": "Gangulu",
"gnm": "Ginuman",
"gnn": "Gumatj",
"gno": "Northern Gondi",
"gnq": "Gana",
"gnr": "Gureng Gureng",
"gnt": "Guntai",
"gnu": "Gnau",
"gnw": "Western Bolivian Guaraní",
"gnz": "Ganzi",
"goa": "Guro",
"gob": "Playero",
"goc": "Gorakor",
"god": "Godié",
"goe": "Gongduk",
"gof": "Gofa",
"gog": "Gogo",
"goh": "Old High German (ca. 750-1050)",
"goi": "Gobasi",
"goj": "Gowlan",
"gok": "Gowli",
"gol": "Gola",
"gom": "Goan Konkani",
"gon": "Gondi",
"goo": "Gone Dau",
"gop": "Yeretuar",
"goq": "Gorap",
"gor": "Gorontalo",
"gos": "Gronings",
"got": "Gothic",
"gou": "Gavar",
"gov": "Goo",
"gow": "Gorowa",
"gox": "Gobu",
"goy": "Goundo",
"goz": "Gozarkhani",
"gpa": "Gupa-Abawa",
"gpe": "Ghanaian Pidgin English",
"gpn": "Taiap",
"gqa": "Ga'anda",
"gqi": "Guiqiong",
"gqn": "Guana (Brazil)",
"gqr": "Gor",
"gqu": "Qau",
"gra": "Rajput Garasia",
"grb": "Grebo",
"grc": "Ancient Greek (to 1453)",
"grd": "Guruntum-Mbaaru",
"grg": "Madi",
"grh": "Gbiri-Niragu",
"gri": "Ghari",
"grj": "Southern Grebo",
"grk": "Greek languages",
"grm": "Kota Marudu Talantang",
"gro": "Groma",
"grq": "Gorovu",
"grr": "Taznatit",
"grs": "Gresi",
"grt": "Garo",
"gru": "Kistane",
"grv": "Central Grebo",
"grw": "Gweda",
"grx": "Guriaso",
"gry": "Barclayville Grebo",
"grz": "Guramalum",
"gse": "Ghanaian Sign Language",
"gsg": "German Sign Language",
"gsl": "Gusilay",
"gsm": "Guatemalan Sign Language",
"gsn": "Nema; Gusan",
"gso": "Southwest Gbaya",
"gsp": "Wasembo",
"gss": "Greek Sign Language",
"gsw": "Swiss German; Alemannic; Alsatian",
"gta": "Guató",
"gtu": "Aghu-Tharnggala",
"gu": "Gujarati",
"gua": "Shiki",
"gub": "Guajajára",
"guc": "Wayuu",
"gud": "Yocoboué Dida",
"gue": "Gurindji",
"guf": "Gupapuyngu",
"gug": "Paraguayan Guaraní",
"guh": "Guahibo",
"gui": "Eastern Bolivian Guaraní",
"guk": "Gumuz",
"gul": "Sea Island Creole English",
"gum": "Guambiano",
"gun": "Mbyá Guaraní",
"guo": "Guayabero",
"gup": "Gunwinggu",
"guq": "Aché",
"gur": "Farefare",
"gus": "Guinean Sign Language",
"gut": "Maléku Jaíka",
"guu": "Yanomamö",
"guw": "Gun",
"gux": "Gourmanchéma",
"guz": "Gusii; Ekegusii",
"gv": "Manx",
"gva": "Guana (Paraguay)",
"gvc": "Guanano",
"gve": "Duwet",
"gvf": "Golin",
"gvj": "Guajá",
"gvl": "Gulay",
"gvm": "Gurmana",
"gvn": "Kuku-Yalanji",
"gvo": "Gavião Do Jiparaná",
"gvp": "Pará Gavião",
"gvr": "Gurung",
"gvs": "Gumawana",
"gvy": "Guyani",
"gwa": "Mbato",
"gwb": "Gwa",
"gwc": "Gawri; Kalami",
"gwd": "Gawwada",
"gwe": "Gweno",
"gwf": "Gowro",
"gwg": "Moo",
"gwi": "Gwichʼin",
"gwj": "ǀGwi",
"gwm": "Awngthim",
"gwn": "Gwandara",
"gwr": "Gwere",
"gwt": "Gawar-Bati",
"gwu": "Guwamu",
"gww": "Kwini",
"gwx": "Gua",
"gxx": "Wè Southern",
"gya": "Northwest Gbaya",
"gyb": "Garus",
"gyd": "Kayardild",
"gye": "Gyem",
"gyf": "Gungabula",
"gyg": "Gbayi",
"gyi": "Gyele",
"gyl": "Gayil",
"gym": "Ngäbere",
"gyn": "Guyanese Creole English",
"gyo": "Gyalsumdo",
"gyr": "Guarayu",
"gyy": "Gunya",
"gyz": "Geji; Gyaazi",
"gza": "Ganza",
"gzi": "Gazi",
"gzn": "Gane",
"ha": "Hausa",
"haa": "Han",
"hab": "Hanoi Sign Language",
"hac": "Gurani",
"had": "Hatam",
"hae": "Eastern Oromo",
"haf": "Haiphong Sign Language",
"hag": "Hanga",
"hah": "Hahon",
"hai": "Haida",
"haj": "Hajong",
"hak": "Hakka Chinese",
"hal": "Halang",
"ham": "Hewa",
"han": "Hangaza",
"hao": "Hakö",
"hap": "Hupla",
"haq": "Ha",
"har": "Harari",
"has": "Haisla",
"hav": "Havu",
"haw": "Hawaiian",
"hax": "Southern Haida",
"hay": "Haya",
"haz": "Hazaragi",
"hba": "Hamba",
"hbb": "Huba",
"hbn": "Heiban",
"hbo": "Ancient Hebrew",
"hbu": "Habu",
"hca": "Andaman Creole Hindi",
"hch": "Huichol",
"hdn": "Northern Haida",
"hds": "Honduras Sign Language",
"hdy": "Hadiyya",
"he": "Hebrew",
"hea": "Northern Qiandong Miao",
"hed": "Herdé",
"heg": "Helong",
"heh": "Hehe",
"hei": "Heiltsuk",
"hem": "Hemba",
"hgm": "Haiǁom",
"hgw": "Haigwai",
"hhi": "Hoia Hoia",
"hhr": "Kerak",
"hhy": "Hoyahoya",
"hi": "Hindi",
"hia": "Lamang",
"hib": "Hibito",
"hid": "Hidatsa",
"hif": "Fiji Hindi",
"hig": "Kamwe",
"hih": "Pamosu",
"hii": "Hinduri",
"hij": "Hijuk",
"hik": "Seit-Kaitetu",
"hil": "Hiligaynon",
"him": "Himachali languages; Western Pahari languages",
"hio": "Tsoa",
"hir": "Himarimã",
"hit": "Hittite",
"hiw": "Hiw",
"hix": "Hixkaryána",
"hji": "Haji",
"hka": "Kahe",
"hke": "Hunde",
"hkh": "Khah; Poguli",
"hkk": "Hunjara-Kaina Ke",
"hkn": "Mel-Khaonh",
"hks": "Hong Kong Sign Language; Heung Kong Sau Yue",
"hla": "Halia",
"hlb": "Halbi",
"hld": "Halang Doan",
"hle": "Hlersu",
"hlt": "Matu Chin",
"hlu": "Hieroglyphic Luwian",
"hma": "Southern Mashan Hmong; Southern Mashan Miao",
"hmb": "Humburi Senni Songhay",
"hmc": "Central Huishui Hmong; Central Huishui Miao",
"hmd": "Large Flowery Miao; A-hmaos; Da-Hua Miao",
"hme": "Eastern Huishui Hmong; Eastern Huishui Miao",
"hmf": "Hmong Don",
"hmg": "Southwestern Guiyang Hmong",
"hmh": "Southwestern Huishui Hmong; Southwestern Huishui Miao",
"hmi": "Northern Huishui Hmong; Northern Huishui Miao",
"hmj": "Ge; Gejia",
"hmk": "Maek",
"hml": "Luopohe Hmong; Luopohe Miao",
"hmm": "Central Mashan Hmong; Central Mashan Miao",
"hmn": "Hmong; Mong",
"hmp": "Northern Mashan Hmong; Northern Mashan Miao",
"hmq": "Eastern Qiandong Miao",
"hmr": "Hmar",
"hms": "Southern Qiandong Miao",
"hmt": "Hamtai",
"hmu": "Hamap",
"hmv": "Hmong Dô",
"hmw": "Western Mashan Hmong; Western Mashan Miao",
"hmx": "Hmong-Mien languages",
"hmy": "Southern Guiyang Hmong; Southern Guiyang Miao",
"hmz": "Hmong Shua; Sinicized Miao",
"hna": "Mina (Cameroon)",
"hnd": "Southern Hindko",
"hne": "Chhattisgarhi",
"hng": "Hungu",
"hnh": "ǁAni",
"hni": "Hani",
"hnj": "Hmong Njua; Mong Leng; Mong Njua",
"hnn": "Hanunoo",
"hno": "Northern Hindko",
"hns": "Caribbean Hindustani",
"hnu": "Hung",
"ho": "Hiri Motu",
"hoa": "Hoava",
"hob": "Mari (Madang Province)",
"hoc": "Ho",
"hod": "Holma",
"hoe": "Horom",
"hoh": "Hobyót",
"hoi": "Holikachuk",
"hoj": "Hadothi; Haroti",
"hok": "Hokan languages",
"hol": "Holu",
"hom": "Homa",
"hoo": "Holoholo",
"hop": "Hopi",
"hor": "Horo",
"hos": "Ho Chi Minh City Sign Language",
"hot": "Hote; Malê",
"hov": "Hovongan",
"how": "Honi",
"hoy": "Holiya",
"hoz": "Hozo",
"hpo": "Hpon",
"hps": "Hawai'i Sign Language (HSL); Hawai'i Pidgin Sign Language",
"hr": "Croatian",
"hra": "Hrangkhol",
"hrc": "Niwer Mil",
"hre": "Hre",
"hrk": "Haruku",
"hrm": "Horned Miao",
"hro": "Haroi",
"hrp": "Nhirrpi",
"hrt": "Hértevin",
"hru": "Hruso",
"hrw": "Warwar Feni",
"hrx": "Hunsrik",
"hrz": "Harzani",
"hsb": "Upper Sorbian",
"hsh": "Hungarian Sign Language",
"hsl": "Hausa Sign Language",
"hsn": "Xiang Chinese",
"hss": "Harsusi",
"ht": "Haitian; Haitian Creole",
"hti": "Hoti",
"hto": "Minica Huitoto",
"hts": "Hadza",
"htu": "Hitu",
"htx": "Middle Hittite",
"hu": "Hungarian",
"hub": "Huambisa",
"huc": "ǂHua; ǂʼAmkhoe",
"hud": "Huaulu",
"hue": "San Francisco Del Mar Huave",
"huf": "Humene",
"hug": "Huachipaeri",
"huh": "Huilliche",
"hui": "Huli",
"huj": "Northern Guiyang Hmong; Northern Guiyang Miao",
"huk": "Hulung",
"hul": "Hula",
"hum": "Hungana",
"huo": "Hu",
"hup": "Hupa",
"huq": "Tsat",
"hur": "Halkomelem",
"hus": "Huastec",
"hut": "Humla",
"huu": "Murui Huitoto",
"huv": "San Mateo Del Mar Huave",
"huw": "Hukumina",
"hux": "Nüpode Huitoto",
"huy": "Hulaulá",
"huz": "Hunzib",
"hvc": "Haitian Vodoun Culture Language",
"hve": "San Dionisio Del Mar Huave",
"hvk": "Haveke",
"hvn": "Sabu",
"hvv": "Santa María Del Mar Huave",
"hwa": "Wané",
"hwc": "Hawai'i Creole English; Hawai'i Pidgin",
"hwo": "Hwana",
"hy": "Armenian",
"hya": "Hya",
"hyw": "Western Armenian",
"hyx": "Armenian (family)",
"hz": "Herero",
"ia": "Interlingua (International Auxiliary Language Association)",
"iai": "Iaai",
"ian": "Iatmul",
"iar": "Purari",
"iba": "Iban",
"ibb": "Ibibio",
"ibd": "Iwaidja",
"ibe": "Akpes",
"ibg": "Ibanag",
"ibh": "Bih",
"ibl": "Ibaloi",
"ibm": "Agoi",
"ibn": "Ibino",
"ibr": "Ibuoro",
"ibu": "Ibu",
"iby": "Ibani",
"ica": "Ede Ica",
"ich": "Etkywan",
"icl": "Icelandic Sign Language",
"icr": "Islander Creole English",
"id": "Indonesian",
"ida": "Idakho-Isukha-Tiriki; Luidakho-Luisukha-Lutirichi",
"idb": "Indo-Portuguese",
"idc": "Idon; Ajiya",
"idd": "Ede Idaca",
"ide": "Idere",
"idi": "Idi",
"idr": "Indri",
"ids": "Idesa",
"idt": "Idaté",
"idu": "Idoma",
"ie": "Interlingue; Occidental",
"ifa": "Amganad Ifugao",
"ifb": "Batad Ifugao; Ayangan Ifugao",
"ife": "Ifè",
"iff": "Ifo",
"ifk": "Tuwali Ifugao",
"ifm": "Teke-Fuumu",
"ifu": "Mayoyao Ifugao",
"ify": "Keley-I Kallahan",
"ig": "Igbo",
"igb": "Ebira",
"ige": "Igede",
"igg": "Igana",
"igl": "Igala",
"igm": "Kanggape",
"ign": "Ignaciano",
"igo": "Isebe",
"igs": "Interglossa",
"igw": "Igwe",
"ihb": "Iha Based Pidgin",
"ihi": "Ihievbe",
"ihp": "Iha",
"ihw": "Bidhawal",
"ii": "Sichuan Yi; Nuosu",
"iin": "Thiin",
"iir": "Indo-Iranian languages",
"ijc": "Izon",
"ije": "Biseni",
"ijj": "Ede Ije",
"ijn": "Kalabari",
"ijo": "Ijo languages",
"ijs": "Southeast Ijo",
"ik": "Inupiaq",
"ike": "Eastern Canadian Inuktitut",
"iki": "Iko",
"ikk": "Ika",
"ikl": "Ikulu",
"iko": "Olulumo-Ikom",
"ikp": "Ikpeshi",
"ikr": "Ikaranggal",
"iks": "Inuit Sign Language",
"ikt": "Inuinnaqtun; Western Canadian Inuktitut",
"ikv": "Iku-Gora-Ankwa",
"ikw": "Ikwere",
"ikx": "Ik",
"ikz": "Ikizu",
"ila": "Ile Ape",
"ilb": "Ila",
"ilg": "Garig-Ilgar",
"ili": "Ili Turki",
"ilk": "Ilongot",
"ilm": "Iranun (Malaysia)",
"ilo": "Iloko",
"ilp": "Iranun (Philippines)",
"ils": "International Sign",
"ilu": "Ili'uun",
"ilv": "Ilue",
"ima": "Mala Malasar",
"imi": "Anamgura",
"iml": "Miluk",
"imn": "Imonda",
"imo": "Imbongu",
"imr": "Imroing",
"ims": "Marsian",
"imt": "Imotong",
"imy": "Milyan",
"inb": "Inga",
"inc": "Indic languages",
"ine": "Indo-European languages",
"ing": "Degexit'an",
"inh": "Ingush",
"inj": "Jungle Inga",
"inl": "Indonesian Sign Language",
"inm": "Minaean",
"inn": "Isinai",
"ino": "Inoke-Yate",
"inp": "Iñapari",
"ins": "Indian Sign Language",
"int": "Intha",
"inz": "Ineseño",
"io": "Ido",
"ior": "Inor",
"iou": "Tuma-Irumu",
"iow": "Iowa-Oto",
"ipi": "Ipili",
"ipo": "Ipiko",
"iqu": "Iquito",
"iqw": "Ikwo",
"ira": "Iranian languages",
"ire": "Iresim",
"irh": "Irarutu",
"iri": "Rigwe; Irigwe",
"irk": "Iraqw",
"irn": "Irántxe",
"iro": "Iroquoian languages",
"irr": "Ir",
"iru": "Irula",
"irx": "Kamberau",
"iry": "Iraya",
"is": "Icelandic",
"isa": "Isabi",
"isc": "Isconahua",
"isd": "Isnag",
"ise": "Italian Sign Language",
"isg": "Irish Sign Language",
"ish": "Esan",
"isi": "Nkem-Nkum",
"isk": "Ishkashimi",
"ism": "Masimasi",
"isn": "Isanzu",
"iso": "Isoko",
"isr": "Israeli Sign Language",
"ist": "Istriot",
"isu": "Isu (Menchum Division)",
"it": "Italian",
"itb": "Binongan Itneg",
"itc": "Italic languages",
"itd": "Southern Tidung",
"ite": "Itene",
"iti": "Inlaod Itneg",
"itk": "Judeo-Italian",
"itl": "Itelmen",
"itm": "Itu Mbon Uzo",
"ito": "Itonama",
"itr": "Iteri",
"its": "Isekiri",
"itt": "Maeng Itneg",
"itv": "Itawit",
"itw": "Ito",
"itx": "Itik",
"ity": "Moyadan Itneg",
"itz": "Itzá",
"iu": "Inuktitut",
"ium": "Iu Mien",
"ivb": "Ibatan",
"ivv": "Ivatan",
"iwk": "I-Wak",
"iwm": "Iwam",
"iwo": "Iwur",
"iws": "Sepik Iwam",
"ixc": "Ixcatec",
"ixl": "Ixil",
"iya": "Iyayu",
"iyo": "Mesaka",
"iyx": "Yaka (Congo)",
"izh": "Ingrian",
"izr": "Izere",
"izz": "Izii",
"ja": "Japanese",
"jaa": "Jamamadí",
"jab": "Hyam",
"jac": "Popti'; Jakalteko",
"jad": "Jahanka",
"jae": "Yabem",
"jaf": "Jara",
"jah": "Jah Hut",
"jaj": "Zazao",
"jak": "Jakun",
"jal": "Yalahatan",
"jam": "Jamaican Creole English",
"jan": "Jandai",
"jao": "Yanyuwa",
"jaq": "Yaqay",
"jas": "New Caledonian Javanese",
"jat": "Jakati",
"jau": "Yaur",
"jax": "Jambi Malay",
"jay": "Yan-nhangu; Nhangu",
"jaz": "Jawe",
"jbe": "Judeo-Berber",
"jbi": "Badjiri",
"jbj": "Arandai",
"jbk": "Barikewa",
"jbm": "Bijim",
"jbn": "Nafusi",
"jbo": "Lojban",
"jbr": "Jofotek-Bromnya",
"jbt": "Jabutí",
"jbu": "Jukun Takum",
"jbw": "Yawijibaya",
"jcs": "Jamaican Country Sign Language",
"jct": "Krymchak",
"jda": "Jad",
"jdg": "Jadgali",
"jdt": "Judeo-Tat",
"jeb": "Jebero",
"jee": "Jerung",
"jeh": "Jeh",
"jei": "Yei",
"jek": "Jeri Kuo",
"jel": "Yelmek",
"jen": "Dza",
"jer": "Jere",
"jet": "Manem",
"jeu": "Jonkor Bourmataguil",
"jgb": "Ngbee",
"jge": "Judeo-Georgian",
"jgk": "Gwak",
"jgo": "Ngomba",
"jhi": "Jehai",
"jhs": "Jhankot Sign Language",
"jia": "Jina",
"jib": "Jibu",
"jic": "Tol",
"jid": "Bu (Kaduna State)",
"jie": "Jilbe",
"jig": "Jingulu; Djingili",
"jih": "sTodsde; Shangzhai",
"jii": "Jiiddu",
"jil": "Jilim",
"jim": "Jimi (Cameroon)",
"jio": "Jiamao",
"jiq": "Guanyinqiao; Lavrung",
"jit": "Jita",
"jiu": "Youle Jinuo",
"jiv": "Shuar",
"jiy": "Buyuan Jinuo",
"jje": "Jejueo",
"jjr": "Bankal",
"jka": "Kaera",
"jkm": "Mobwa Karen",
"jko": "Kubo",
"jkp": "Paku Karen",
"jkr": "Koro (India)",
"jks": "Amami Koniya Sign Language",
"jku": "Labir",
"jle": "Ngile",
"jls": "Jamaican Sign Language",
"jma": "Dima",
"jmb": "Zumbun",
"jmc": "Machame",
"jmd": "Yamdena",
"jmi": "Jimi (Nigeria)",
"jml": "Jumli",
"jmn": "Makuri Naga",
"jmr": "Kamara",
"jms": "Mashi (Nigeria)",
"jmw": "Mouwase",
"jmx": "Western Juxtlahuaca Mixtec",
"jna": "Jangshung",
"jnd": "Jandavra",
"jng": "Yangman",
"jni": "Janji",
"jnj": "Yemsa",
"jnl": "Rawat",
"jns": "Jaunsari",
"job": "Joba",
"jod": "Wojenaka",
"jog": "Jogi",
"jor": "Jorá",
"jos": "Jordanian Sign Language",
"jow": "Jowulu",
"jpa": "Jewish Palestinian Aramaic",
"jpr": "Judeo-Persian",
"jpx": "Japanese (family)",
"jqr": "Jaqaru",
"jra": "Jarai",
"jrb": "Judeo-Arabic",
"jrr": "Jiru",
"jrt": "Jakattoe",
"jru": "Japrería",
"jsl": "Japanese Sign Language",
"jua": "Júma",
"jub": "Wannu",
"juc": "Jurchen",
"jud": "Worodougou",
"juh": "Hõne",
"jui": "Ngadjuri",
"juk": "Wapan",
"jul": "Jirel",
"jum": "Jumjum",
"jun": "Juang",
"juo": "Jiba",
"jup": "Hupdë",
"jur": "Jurúna",
"jus": "Jumla Sign Language",
"jut": "Jutish",
"juu": "Ju",
"juw": "Wãpha",
"juy": "Juray",
"jv": "Javanese",
"jvd": "Javindo",
"jvn": "Caribbean Javanese",
"jwi": "Jwira-Pepesa",
"jya": "Jiarong",
"jye": "Judeo-Yemeni Arabic",
"jyy": "Jaya",
"ka": "Georgian",
"kaa": "Kara-Kalpak; Karakalpak",
"kab": "Kabyle",
"kac": "Kachin; Jingpho",
"kad": "Adara",
"kae": "Ketangalan",
"kaf": "Katso",
"kag": "Kajaman",
"kah": "Kara (Central African Republic)",
"kai": "Karekare",
"kaj": "Jju",
"kak": "Kalanguya; Kayapa Kallahan",
"kam": "Kamba (Kenya)",
"kao": "Xaasongaxango",
"kap": "Bezhta",
"kaq": "Capanahua",
"kar": "Karen languages",
"kav": "Katukína",
"kaw": "Kawi",
"kax": "Kao",
"kay": "Kamayurá",
"kba": "Kalarko",
"kbb": "Kaxuiâna",
"kbc": "Kadiwéu",
"kbd": "Kabardian",
"kbe": "Kanju",
"kbg": "Khamba",
"kbh": "Camsá",
"kbi": "Kaptiau",
"kbj": "Kari",
"kbk": "Grass Koiari",
"kbl": "Kanembu",
"kbm": "Iwal",
"kbn": "Kare (Central African Republic)",
"kbo": "Keliko",
"kbp": "Kabiyè",
"kbq": "Kamano",
"kbr": "Kafa",
"kbs": "Kande",
"kbt": "Abadi",
"kbu": "Kabutra",
"kbv": "Dera (Indonesia)",
"kbw": "Kaiep",
"kbx": "Ap Ma",
"kby": "Manga Kanuri",
"kbz": "Duhwa",
"kca": "Khanty",
"kcb": "Kawacha",
"kcc": "Lubila",
"kcd": "Ngkâlmpw Kanum",
"kce": "Kaivi",
"kcf": "Ukaan",
"kcg": "Tyap",
"kch": "Vono",
"kci": "Kamantan",
"kcj": "Kobiana",
"kck": "Kalanga",
"kcl": "Kela (Papua New Guinea); Kala",
"kcm": "Gula (Central African Republic)",
"kcn": "Nubi",
"kco": "Kinalakna",
"kcp": "Kanga",
"kcq": "Kamo",
"kcr": "Katla",
"kcs": "Koenoem",
"kct": "Kaian",
"kcu": "Kami (Tanzania)",
"kcv": "Kete",
"kcw": "Kabwari",
"kcx": "Kachama-Ganjule",
"kcy": "Korandje",
"kcz": "Konongo",
"kda": "Worimi",
"kdc": "Kutu",
"kdd": "Yankunytjatjara",
"kde": "Makonde",
"kdf": "Mamusi",
"kdg": "Seba",
"kdh": "Tem",
"kdi": "Kumam",
"kdj": "Karamojong",
"kdk": "Numèè; Kwényi",
"kdl": "Tsikimba",
"kdm": "Kagoma",
"kdn": "Kunda",
"kdo": "Kordofanian languages",
"kdp": "Kaningdon-Nindem",
"kdq": "Koch",
"kdr": "Karaim",
"kdt": "Kuy",
"kdu": "Kadaru",
"kdw": "Koneraw",
"kdx": "Kam",
"kdy": "Keder; Keijar",
"kdz": "Kwaja",
"kea": "Kabuverdianu",
"keb": "Kélé",
"kec": "Keiga",
"ked": "Kerewe",
"kee": "Eastern Keres",
"kef": "Kpessi",
"keg": "Tese",
"keh": "Keak",
"kei": "Kei",
"kej": "Kadar",
"kek": "Kekchí",
"kel": "Kela (Democratic Republic of Congo)",
"kem": "Kemak",
"ken": "Kenyang",
"keo": "Kakwa",
"kep": "Kaikadi",
"keq": "Kamar",
"ker": "Kera",
"kes": "Kugbo",
"ket": "Ket",
"keu": "Akebu",
"kev": "Kanikkaran",
"kew": "West Kewa",
"kex": "Kukna",
"key": "Kupia",
"kez": "Kukele",
"kfa": "Kodava",
"kfb": "Northwestern Kolami",
"kfc": "Konda-Dora",
"kfd": "Korra Koraga",
"kfe": "Kota (India)",
"kff": "Koya",
"kfg": "Kudiya",
"kfh": "Kurichiya",
"kfi": "Kannada Kurumba",
"kfj": "Kemiehua",
"kfk": "Kinnauri",
"kfl": "Kung",
"kfm": "Khunsari",
"kfn": "Kuk",
"kfo": "Koro (Côte d'Ivoire)",
"kfp": "Korwa",
"kfq": "Korku",
"kfr": "Kachhi; Kutchi",
"kfs": "Bilaspuri",
"kft": "Kanjari",
"kfu": "Katkari",
"kfv": "Kurmukar",
"kfw": "Kharam Naga",
"kfx": "Kullu Pahari",
"kfy": "Kumaoni",
"kfz": "Koromfé",
"kg": "Kongo",
"kga": "Koyaga",
"kgb": "Kawe",
"kge": "Komering",
"kgf": "Kube",
"kgg": "Kusunda",
"kgi": "Selangor Sign Language",
"kgj": "Gamale Kham",
"kgk": "Kaiwá",
"kgl": "Kunggari",
"kgm": "Karipúna",
"kgn": "Karingani",
"kgo": "Krongo",
"kgp": "Kaingang",
"kgq": "Kamoro",
"kgr": "Abun",
"kgs": "Kumbainggar",
"kgt": "Somyev",
"kgu": "Kobol",
"kgv": "Karas",
"kgw": "Karon Dori",
"kgx": "Kamaru",
"kgy": "Kyerung",
"kha": "Khasi",
"khb": "Lü",
"khc": "Tukang Besi North",
"khd": "Bädi Kanum",
"khe": "Korowai",
"khf": "Khuen",
"khg": "Khams Tibetan",
"khh": "Kehu",
"khi": "Khoisan languages",
"khj": "Kuturmi",
"khk": "Halh Mongolian",
"khl": "Lusi",
"khn": "Khandesi",
"kho": "Khotanese; Sakan",
"khp": "Kapori; Kapauri",
"khq": "Koyra Chiini Songhay",
"khr": "Kharia",
"khs": "Kasua",
"kht": "Khamti",
"khu": "Nkhumbi",
"khv": "Khvarshi",
"khw": "Khowar",
"khx": "Kanu",
"khy": "Kele (Democratic Republic of Congo)",
"khz": "Keapara",
"ki": "Kikuyu; Gikuyu",
"kia": "Kim",
"kib": "Koalib",
"kic": "Kickapoo",
"kid": "Koshin",
"kie": "Kibet",
"kif": "Eastern Parbate Kham",
"kig": "Kimaama; Kimaghima",
"kih": "Kilmeri",
"kii": "Kitsai",
"kij": "Kilivila",
"kil": "Kariya",
"kim": "Karagas",
"kio": "Kiowa",
"kip": "Sheshi Kham",
"kiq": "Kosadle; Kosare",
"kis": "Kis",
"kit": "Agob",
"kiu": "Kirmanjki (individual language)",
"kiv": "Kimbu",
"kiw": "Northeast Kiwai",
"kix": "Khiamniungan Naga",
"kiy": "Kirikiri",
"kiz": "Kisi",
"kj": "Kuanyama; Kwanyama",
"kja": "Mlap",
"kjb": "Q'anjob'al; Kanjobal",
"kjc": "Coastal Konjo",
"kjd": "Southern Kiwai",
"kje": "Kisar",
"kjg": "Khmu",
"kjh": "Khakas",
"kji": "Zabana",
"kjj": "Khinalugh",
"kjk": "Highland Konjo",
"kjl": "Western Parbate Kham",
"kjm": "Kháng",
"kjn": "Kunjen",
"kjo": "Harijan Kinnauri",
"kjp": "Pwo Eastern Karen",
"kjq": "Western Keres",
"kjr": "Kurudu",
"kjs": "East Kewa",
"kjt": "Phrae Pwo Karen",
"kju": "Kashaya",
"kjv": "Kaikavian Literary Language",
"kjx": "Ramopa",
"kjy": "Erave",
"kjz": "Bumthangkha",
"kk": "Kazakh",
"kka": "Kakanda",
"kkb": "Kwerisa",
"kkc": "Odoodee",
"kkd": "Kinuku",
"kke": "Kakabe",
"kkf": "Kalaktang Monpa",
"kkg": "Mabaka Valley Kalinga",
"kkh": "Khün",
"kki": "Kagulu",
"kkj": "Kako",
"kkk": "Kokota",
"kkl": "Kosarek Yale",
"kkm": "Kiong",
"kkn": "Kon Keu",
"kko": "Karko",
"kkp": "Gugubera; Koko-Bera",
"kkq": "Kaeku",
"kkr": "Kir-Balar",
"kks": "Giiwo",
"kkt": "Koi",
"kku": "Tumi",
"kkv": "Kangean",
"kkw": "Teke-Kukuya",
"kkx": "Kohin",
"kky": "Guugu Yimidhirr; Guguyimidjir",
"kkz": "Kaska",
"kl": "Kalaallisut; Greenlandic",
"kla": "Klamath-Modoc",
"klb": "Kiliwa",
"klc": "Kolbila",
"kld": "Gamilaraay",
"kle": "Kulung (Nepal)",
"klf": "Kendeje",
"klg": "Tagakaulo",
"klh": "Weliki",
"kli": "Kalumpang",
"klj": "Khalaj",
"klk": "Kono (Nigeria)",
"kll": "Kagan Kalagan",
"klm": "Migum",
"kln": "Kalenjin",
"klo": "Kapya",
"klp": "Kamasa",
"klq": "Rumu",
"klr": "Khaling",
"kls": "Kalasha",
"klt": "Nukna",
"klu": "Klao",
"klv": "Maskelynes",
"klw": "Tado; Lindu",
"klx": "Koluwawa",
"kly": "Kalao",
"klz": "Kabola",
"km": "Khmer; Central Khmer",
"kma": "Konni",
"kmb": "Kimbundu",
"kmc": "Southern Dong",
"kmd": "Majukayang Kalinga",
"kme": "Bakole",
"kmf": "Kare (Papua New Guinea)",
"kmg": "Kâte",
"kmh": "Kalam",
"kmi": "Kami (Nigeria)",
"kmj": "Kumarbhag Paharia",
"kmk": "Limos Kalinga",
"kml": "Tanudan Kalinga",
"kmm": "Kom (India)",
"kmn": "Awtuw",
"kmo": "Kwoma",
"kmp": "Gimme",
"kmq": "Kwama",
"kmr": "Northern Kurdish",
"kms": "Kamasau",
"kmt": "Kemtuik",
"kmu": "Kanite",
"kmv": "Karipúna Creole French",
"kmw": "Komo (Democratic Republic of Congo)",
"kmx": "Waboda",
"kmy": "Koma",
"kmz": "Khorasani Turkish",
"kn": "Kannada",
"kna": "Dera (Nigeria)",
"knb": "Lubuagan Kalinga",
"knc": "Central Kanuri",
"knd": "Konda",
"kne": "Kankanaey",
"knf": "Mankanya",
"kng": "Koongo",
"kni": "Kanufi",
"knj": "Western Kanjobal",
"knk": "Kuranko",
"knl": "Keninjal",
"knm": "Kanamarí",
"knn": "Konkani (individual language)",
"kno": "Kono (Sierra Leone)",
"knp": "Kwanja",
"knq": "Kintaq",
"knr": "Kaningra",
"kns": "Kensiu",
"knt": "Panoan Katukína",
"knu": "Kono (Guinea)",
"knv": "Tabo",
"knw": "Kung-Ekoka",
"knx": "Kendayan; Salako",
"kny": "Kanyok",
"knz": "Kalamsé",
"ko": "Korean",
"koa": "Konomala",
"koc": "Kpati",
"kod": "Kodi",
"koe": "Kacipo-Bale Suri",
"kof": "Kubi",
"kog": "Cogui; Kogi",
"koh": "Koyo",
"koi": "Komi-Permyak",
"kok": "Konkani (macrolanguage)",
"kol": "Kol (Papua New Guinea)",
"koo": "Konzo",
"kop": "Waube",
"koq": "Kota (Gabon)",
"kos": "Kosraean",
"kot": "Lagwan",
"kou": "Koke",
"kov": "Kudu-Camo",
"kow": "Kugama",
"koy": "Koyukon",
"koz": "Korak",
"kpa": "Kutto",
"kpb": "Mullu Kurumba",
"kpc": "Curripaco",
"kpd": "Koba",
"kpe": "Kpelle",
"kpf": "Komba",
"kpg": "Kapingamarangi",
"kph": "Kplang",
"kpi": "Kofei",
"kpj": "Karajá",
"kpk": "Kpan",
"kpl": "Kpala",
"kpm": "Koho",
"kpn": "Kepkiriwát",
"kpo": "Ikposo",
"kpq": "Korupun-Sela",
"kpr": "Korafe-Yegha",
"kps": "Tehit",
"kpt": "Karata",
"kpu": "Kafoa",
"kpv": "Komi-Zyrian",
"kpw": "Kobon",
"kpx": "Mountain Koiali",
"kpy": "Koryak",
"kpz": "Kupsabiny",
"kqa": "Mum",
"kqb": "Kovai",
"kqc": "Doromu-Koki",
"kqd": "Koy Sanjaq Surat",
"kqe": "Kalagan",
"kqf": "Kakabai",
"kqg": "Khe",
"kqh": "Kisankasa",
"kqi": "Koitabu",
"kqj": "Koromira",
"kqk": "Kotafon Gbe",
"kql": "Kyenele",
"kqm": "Khisa",
"kqn": "Kaonde",
"kqo": "Eastern Krahn",
"kqp": "Kimré",
"kqq": "Krenak",
"kqr": "Kimaragang",
"kqs": "Northern Kissi",
"kqt": "Klias River Kadazan",
"kqu": "Seroa",
"kqv": "Okolod",
"kqw": "Kandas",
"kqx": "Mser",
"kqy": "Koorete",
"kqz": "Korana",
"kr": "Kanuri",
"kra": "Kumhali",
"krb": "Karkin",
"krc": "Karachay-Balkar",
"krd": "Kairui-Midiki",
"kre": "Panará",
"krf": "Koro (Vanuatu)",
"krh": "Kurama",
"kri": "Krio",
"krj": "Kinaray-A",
"krk": "Kerek",
"krl": "Karelian",
"krn": "Sapo",
"kro": "Kru languages",
"krp": "Korop",
"krr": "Krung",
"krs": "Gbaya (Sudan)",
"krt": "Tumari Kanuri",
"kru": "Kurukh",
"krv": "Kavet",
"krw": "Western Krahn",
"krx": "Karon",
"kry": "Kryts",
"krz": "Sota Kanum",
"ks": "Kashmiri",
"ksa": "Shuwa-Zamani",
"ksb": "Shambala",
"ksc": "Southern Kalinga",
"ksd": "Kuanua",
"kse": "Kuni",
"ksf": "Bafia",
"ksg": "Kusaghe",
"ksh": "Kölsch",
"ksi": "Krisa; I'saka",
"ksj": "Uare",
"ksk": "Kansa",
"ksl": "Kumalu",
"ksm": "Kumba",
"ksn": "Kasiguranin",
"kso": "Kofa",
"ksp": "Kaba",
"ksq": "Kwaami",
"ksr": "Borong",
"kss": "Southern Kisi",
"kst": "Winyé",
"ksu": "Khamyang",
"ksv": "Kusu",
"ksw": "S'gaw Karen",
"ksx": "Kedang",
"ksy": "Kharia Thar",
"ksz": "Kodaku",
"kta": "Katua",
"ktb": "Kambaata",
"ktc": "Kholok",
"ktd": "Kokata; Kukatha",
"kte": "Nubri",
"ktf": "Kwami",
"ktg": "Kalkutung",
"kth": "Karanga",
"kti": "North Muyu",
"ktj": "Plapo Krumen",
"ktk": "Kaniet",
"ktl": "Koroshi",
"ktm": "Kurti",
"ktn": "Karitiâna",
"kto": "Kuot",
"ktp": "Kaduo",
"ktq": "Katabaga",
"kts": "South Muyu",
"ktt": "Ketum",
"ktu": "Kituba (Democratic Republic of Congo)",
"ktv": "Eastern Katu",
"ktw": "Kato",
"ktx": "Kaxararí",
"kty": "Kango (Bas-Uélé District)",
"ktz": "Juǀʼhoan; Juǀʼhoansi",
"ku": "Kurdish",
"kub": "Kutep",
"kuc": "Kwinsu",
"kud": "'Auhelawa",
"kue": "Kuman (Papua New Guinea)",
"kuf": "Western Katu",
"kug": "Kupa",
"kuh": "Kushi",
"kui": "Kuikúro-Kalapálo; Kalapalo",
"kuj": "Kuria",
"kuk": "Kepo'",
"kul": "Kulere",
"kum": "Kumyk",
"kun": "Kunama",
"kuo": "Kumukio",
"kup": "Kunimaipa",
"kuq": "Karipuna",
"kus": "Kusaal",
"kut": "Kutenai",
"kuu": "Upper Kuskokwim",
"kuv": "Kur",
"kuw": "Kpagua",
"kux": "Kukatja",
"kuy": "Kuuku-Ya'u",
"kuz": "Kunza",
"kv": "Komi",
"kva": "Bagvalal",
"kvb": "Kubu",
"kvc": "Kove",
"kvd": "Kui (Indonesia)",
"kve": "Kalabakan",
"kvf": "Kabalai",
"kvg": "Kuni-Boazi",
"kvh": "Komodo",
"kvi": "Kwang",
"kvj": "Psikye",
"kvk": "Korean Sign Language",
"kvl": "Kayaw",
"kvm": "Kendem",
"kvn": "Border Kuna",
"kvo": "Dobel",
"kvp": "Kompane",
"kvq": "Geba Karen",
"kvr": "Kerinci",
"kvt": "Lahta Karen; Lahta",
"kvu": "Yinbaw Karen",
"kvv": "Kola",
"kvw": "Wersing",
"kvx": "Parkari Koli",
"kvy": "Yintale Karen; Yintale",
"kvz": "Tsakwambo; Tsaukambo",
"kw": "Cornish",
"kwa": "Dâw",
"kwb": "Kwa",
"kwc": "Likwala",
"kwd": "Kwaio",
"kwe": "Kwerba",
"kwf": "Kwara'ae",
"kwg": "Sara Kaba Deme",
"kwh": "Kowiai",
"kwi": "Awa-Cuaiquer",
"kwj": "Kwanga",
"kwk": "Kwakiutl",
"kwl": "Kofyar",
"kwm": "Kwambi",
"kwn": "Kwangali",
"kwo": "Kwomtari",
"kwp": "Kodia",
"kwr": "Kwer",
"kws": "Kwese",
"kwt": "Kwesten",
"kwu": "Kwakum",
"kwv": "Sara Kaba Náà",
"kww": "Kwinti",
"kwx": "Khirwar",
"kwy": "San Salvador Kongo",
"kwz": "Kwadi",
"kxa": "Kairiru",
"kxb": "Krobu",
"kxc": "Konso; Khonso",
"kxd": "Brunei",
"kxf": "Manumanaw Karen; Manumanaw",
"kxh": "Karo (Ethiopia)",
"kxi": "Keningau Murut",
"kxj": "Kulfa",
"kxk": "Zayein Karen",
"kxm": "Northern Khmer",
"kxn": "Kanowit-Tanjong Melanau",
"kxo": "Kanoé",
"kxp": "Wadiyara Koli",
"kxq": "Smärky Kanum",
"kxr": "Koro (Papua New Guinea)",
"kxs": "Kangjia",
"kxt": "Koiwat",
"kxv": "Kuvi",
"kxw": "Konai",
"kxx": "Likuba",
"kxy": "Kayong",
"kxz": "Kerewo",
"ky": "Kirghiz; Kyrgyz",
"kya": "Kwaya",
"kyb": "Butbut Kalinga",
"kyc": "Kyaka",
"kyd": "Karey",
"kye": "Krache",
"kyf": "Kouya",
"kyg": "Keyagana",
"kyh": "Karok",
"kyi": "Kiput",
"kyj": "Karao",
"kyk": "Kamayo",
"kyl": "Kalapuya",
"kym": "Kpatili",
"kyn": "Northern Binukidnon",
"kyo": "Kelon",
"kyp": "Kang",
"kyq": "Kenga",
"kyr": "Kuruáya",
"kys": "Baram Kayan",
"kyt": "Kayagar",
"kyu": "Western Kayah",
"kyv": "Kayort",
"kyw": "Kudmali",
"kyx": "Rapoisi",
"kyy": "Kambaira",
"kyz": "Kayabí",
"kza": "Western Karaboro",
"kzb": "Kaibobo",
"kzc": "Bondoukou Kulango",
"kzd": "Kadai",
"kze": "Kosena",
"kzf": "Da'a Kaili",
"kzg": "Kikai",
"kzi": "Kelabit",
"kzk": "Kazukuru",
"kzl": "Kayeli",
"kzm": "Kais",
"kzn": "Kokola",
"kzo": "Kaningi",
"kzp": "Kaidipang",
"kzq": "Kaike",
"kzr": "Karang",
"kzs": "Sugut Dusun",
"kzu": "Kayupulau",
"kzv": "Komyandaret",
"kzw": "Karirí-Xocó",
"kzx": "Kamarian",
"kzy": "Kango (Tshopo District)",
"kzz": "Kalabra",
"la": "Latin",
"laa": "Southern Subanen",
"lab": "Linear A",
"lac": "Lacandon",
"lad": "Ladino",
"lae": "Pattani",
"laf": "Lafofa",
"lag": "Langi",
"lah": "Lahnda",
"lai": "Lambya",
"laj": "Lango (Uganda)",
"lal": "Lalia",
"lam": "Lamba",
"lan": "Laru",
"lap": "Laka (Chad)",
"laq": "Qabiao",
"lar": "Larteh",
"las": "Lama (Togo)",
"lau": "Laba",
"law": "Lauje",
"lax": "Tiwa",
"lay": "Lama Bai",
"laz": "Aribwatsa",
"lb": "Luxembourgish; Letzeburgesch",
"lbb": "Label",
"lbc": "Lakkia",
"lbe": "Lak",
"lbf": "Tinani",
"lbg": "Laopang",
"lbi": "La'bi",
"lbj": "Ladakhi",
"lbk": "Central Bontok",
"lbl": "Libon Bikol",
"lbm": "Lodhi",
"lbn": "Rmeet",
"lbo": "Laven",
"lbq": "Wampar",
"lbr": "Lohorung",
"lbs": "Libyan Sign Language",
"lbt": "Lachi",
"lbu": "Labu",
"lbv": "Lavatbura-Lamusong",
"lbw": "Tolaki",
"lbx": "Lawangan",
"lby": "Lamalama; Lamu-Lamu",
"lbz": "Lardil",
"lcc": "Legenyem",
"lcd": "Lola",
"lce": "Loncong; Sekak",
"lcf": "Lubu",
"lch": "Luchazi",
"lcl": "Lisela",
"lcm": "Tungag",
"lcp": "Western Lawa",
"lcq": "Luhu",
"lcs": "Lisabata-Nuniali",
"lda": "Kla-Dan",
"ldb": "Dũya",
"ldd": "Luri",
"ldg": "Lenyima",
"ldh": "Lamja-Dengsa-Tola",
"ldi": "Laari",
"ldj": "Lemoro",
"ldk": "Leelau",
"ldl": "Kaan",
"ldm": "Landoma",
"ldn": "Láadan",
"ldo": "Loo",
"ldp": "Tso",
"ldq": "Lufu",
"lea": "Lega-Shabunda",
"leb": "Lala-Bisa",
"lec": "Leco",
"led": "Lendu",
"lee": "Lyélé",
"lef": "Lelemi",
"leh": "Lenje",
"lei": "Lemio",
"lej": "Lengola",
"lek": "Leipon",
"lel": "Lele (Democratic Republic of Congo)",
"lem": "Nomaande",
"len": "Lenca",
"leo": "Leti (Cameroon)",
"lep": "Lepcha",
"leq": "Lembena",
"ler": "Lenkau",
"les": "Lese",
"let": "Lesing-Gelimi; Amio-Gelimi",
"leu": "Kara (Papua New Guinea)",
"lev": "Lamma",
"lew": "Ledo Kaili",
"lex": "Luang",
"ley": "Lemolang",
"lez": "Lezghian",
"lfa": "Lefa",
"lfn": "Lingua Franca Nova",
"lg": "Ganda; Luganda",
"lga": "Lungga",
"lgb": "Laghu",
"lgg": "Lugbara",
"lgh": "Laghuu",
"lgi": "Lengilu",
"lgk": "Lingarak; Neverver",
"lgl": "Wala",
"lgm": "Lega-Mwenga",
"lgn": "T'apo; Opuuo",
"lgo": "Lango (South Sudan)",
"lgq": "Logba",
"lgr": "Lengo",
"lgt": "Pahi",
"lgu": "Longgu",
"lgz": "Ligenza",
"lha": "Laha (Viet Nam)",
"lhh": "Laha (Indonesia)",
"lhi": "Lahu Shi",
"lhl": "Lahul Lohar",
"lhm": "Lhomi",
"lhn": "Lahanan",
"lhp": "Lhokpu",
"lhs": "Mlahsö",
"lht": "Lo-Toga",
"lhu": "Lahu",
"li": "Limburgan; Limburger; Limburgish",
"lia": "West-Central Limba",
"lib": "Likum",
"lic": "Hlai",
"lid": "Nyindrou",
"lie": "Likila",
"lif": "Limbu",
"lig": "Ligbi",
"lih": "Lihir",
"lij": "Ligurian",
"lik": "Lika",
"lil": "Lillooet",
"lio": "Liki",
"lip": "Sekpele",
"liq": "Libido",
"lir": "Liberian English",
"lis": "Lisu",
"liu": "Logorik",
"liv": "Liv",
"liw": "Col",
"lix": "Liabuku",
"liy": "Banda-Bambari",
"liz": "Libinza",
"lja": "Golpa",
"lje": "Rampi",
"lji": "Laiyolo",
"ljl": "Li'o",
"ljp": "Lampung Api",
"ljw": "Yirandali",
"ljx": "Yuru",
"lka": "Lakalei",
"lkb": "Kabras; Lukabaras",
"lkc": "Kucong",
"lkd": "Lakondê",
"lke": "Kenyi",
"lkh": "Lakha",
"lki": "Laki",
"lkj": "Remun",
"lkl": "Laeko-Libuat",
"lkm": "Kalaamaya",
"lkn": "Lakon; Vure",
"lko": "Khayo; Olukhayo",
"lkr": "Päri",
"lks": "Kisa; Olushisa",
"lkt": "Lakota",
"lku": "Kungkari",
"lky": "Lokoya",
"lla": "Lala-Roba",
"llb": "Lolo",
"llc": "Lele (Guinea)",
"lld": "Ladin",
"lle": "Lele (Papua New Guinea)",
"llf": "Hermit",
"llg": "Lole",
"llh": "Lamu",
"lli": "Teke-Laali",
"llj": "Ladji Ladji",
"llk": "Lelak",
"lll": "Lilau",
"llm": "Lasalimu",
"lln": "Lele (Chad)",
"llp": "North Efate",
"llq": "Lolak",
"lls": "Lithuanian Sign Language",
"llu": "Lau",
"llx": "Lauan",
"lma": "East Limba",
"lmb": "Merei",
"lmc": "Limilngan",
"lmd": "Lumun",
"lme": "Pévé",
"lmf": "South Lembata",
"lmg": "Lamogai",
"lmh": "Lambichhong",
"lmi": "Lombi",
"lmj": "West Lembata",
"lmk": "Lamkang",
"lml": "Hano",
"lmn": "Lambadi",
"lmo": "Lombard",
"lmp": "Limbum",
"lmq": "Lamatuka",
"lmr": "Lamalera",
"lmu": "Lamenu",
"lmv": "Lomaiviti",
"lmw": "Lake Miwok",
"lmx": "Laimbue",
"lmy": "Lamboya",
"ln": "Lingala",
"lna": "Langbashe",
"lnb": "Mbalanhu",
"lnd": "Lundayeh; Lun Bawang",
"lng": "Langobardic",
"lnh": "Lanoh",
"lni": "Daantanai'",
"lnj": "Leningitij",
"lnl": "South Central Banda",
"lnm": "Langam",
"lnn": "Lorediakarkar",
"lns": "Lamnso'",
"lnu": "Longuda",
"lnw": "Lanima",
"lnz": "Lonzo",
"lo": "Lao",
"loa": "Loloda",
"lob": "Lobi",
"loc": "Inonhan",
"loe": "Saluan",
"lof": "Logol",
"log": "Logo",
"loh": "Narim",
"loi": "Loma (Côte d'Ivoire)",
"loj": "Lou",
"lok": "Loko",
"lol": "Mongo",
"lom": "Loma (Liberia)",
"lon": "Malawi Lomwe",
"loo": "Lombo",
"lop": "Lopa",
"loq": "Lobala",
"lor": "Téén",
"los": "Loniu",
"lot": "Otuho",
"lou": "Louisiana Creole",
"lov": "Lopi",
"low": "Tampias Lobu",
"lox": "Loun",
"loy": "Loke",
"loz": "Lozi",
"lpa": "Lelepa",
"lpe": "Lepki",
"lpn": "Long Phuri Naga",
"lpo": "Lipo",
"lpx": "Lopit",
"lqr": "Logir",
"lra": "Rara Bakati'",
"lrc": "Northern Luri",
"lre": "Laurentian",
"lrg": "Laragia",
"lri": "Marachi; Olumarachi",
"lrk": "Loarki",
"lrl": "Lari",
"lrm": "Marama; Olumarama",
"lrn": "Lorang",
"lro": "Laro",
"lrr": "Southern Yamphu",
"lrt": "Larantuka Malay",
"lrv": "Larevat",
"lrz": "Lemerig",
"lsa": "Lasgerdi",
"lsb": "Burundian Sign Language; Langue des Signes Burundaise",
"lsc": "Albarradas Sign Language; Lengua de señas Albarradas",
"lsd": "Lishana Deni",
"lse": "Lusengo",
"lsh": "Lish",
"lsi": "Lashi",
"lsl": "Latvian Sign Language",
"lsm": "Saamia; Olusamia",
"lsn": "Tibetan Sign Language",
"lso": "Laos Sign Language",
"lsp": "Panamanian Sign Language; Lengua de Señas Panameñas",
"lsr": "Aruop",
"lss": "Lasi",
"lst": "Trinidad and Tobago Sign Language",
"lsv": "Sivia Sign Language",
"lsw": "Seychelles Sign Language; Lalang Siny Seselwa; Langue des Signes Seychelloise",
"lsy": "Mauritian Sign Language",
"lt": "Lithuanian",
"ltc": "Late Middle Chinese",
"ltg": "Latgalian",
"lth": "Thur",
"lti": "Leti (Indonesia)",
"ltn": "Latundê",
"lto": "Tsotso; Olutsotso",
"lts": "Tachoni; Lutachoni",
"ltu": "Latu",
"lu": "Luba-Katanga",
"lua": "Luba-Lulua",
"luc": "Aringa",
"lud": "Ludian",
"lue": "Luvale",
"luf": "Laua",
"lui": "Luiseno",
"luj": "Luna",
"luk": "Lunanakha",
"lul": "Olu'bo",
"lum": "Luimbi",
"lun": "Lunda",
"luo": "Luo (Kenya and Tanzania); Dholuo",
"lup": "Lumbu",
"luq": "Lucumi",
"lur": "Laura",
"lus": "Lushai",
"lut": "Lushootseed",
"luu": "Lumba-Yakkha",
"luv": "Luwati",
"luw": "Luo (Cameroon)",
"luy": "Luyia; Oluluyia",
"luz": "Southern Luri",
"lv": "Latvian",
"lva": "Maku'a",
"lvi": "Lavi",
"lvk": "Lavukaleve",
"lvs": "Standard Latvian",
"lvu": "Levuka",
"lwa": "Lwalu",
"lwe": "Lewo Eleng",
"lwg": "Wanga; Oluwanga",
"lwh": "White Lachi",
"lwl": "Eastern Lawa",
"lwm": "Laomian",
"lwo": "Luwo",
"lws": "Malawian Sign Language",
"lwt": "Lewotobi",
"lwu": "Lawu",
"lww": "Lewo",
"lxm": "Lakurumau",
"lya": "Layakha",
"lyg": "Lyngngam",
"lyn": "Luyana",
"lzh": "Literary Chinese",
"lzl": "Litzlitz",
"lzn": "Leinong Naga",
"lzz": "Laz",
"maa": "San Jerónimo Tecóatl Mazatec",
"mab": "Yutanduchi Mixtec",
"mad": "Madurese",
"mae": "Bo-Rukul",
"maf": "Mafa",
"mag": "Magahi",
"mai": "Maithili",
"maj": "Jalapa De Díaz Mazatec",
"mak": "Makasar",
"mam": "Mam",
"man": "Mandingo; Manding",
"map": "Austronesian languages",
"maq": "Chiquihuitlán Mazatec",
"mas": "Masai",
"mat": "San Francisco Matlatzinca",
"mau": "Huautla Mazatec",
"mav": "Sateré-Mawé",
"maw": "Mampruli",
"max": "North Moluccan Malay",
"maz": "Central Mazahua",
"mba": "Higaonon",
"mbb": "Western Bukidnon Manobo",
"mbc": "Macushi",
"mbd": "Dibabawon Manobo",
"mbe": "Molale",
"mbf": "Baba Malay",
"mbh": "Mangseng",
"mbi": "Ilianen Manobo",
"mbj": "Nadëb",
"mbk": "Malol",
"mbl": "Maxakalí",
"mbm": "Ombamba",
"mbn": "Macaguán",
"mbo": "Mbo (Cameroon)",
"mbp": "Malayo",
"mbq": "Maisin",
"mbr": "Nukak Makú",
"mbs": "Sarangani Manobo",
"mbt": "Matigsalug Manobo",
"mbu": "Mbula-Bwazza",
"mbv": "Mbulungish",
"mbw": "Maring",
"mbx": "Mari (East Sepik Province)",
"mby": "Memoni",
"mbz": "Amoltepec Mixtec",
"mca": "Maca",
"mcb": "Machiguenga",
"mcc": "Bitur",
"mcd": "Sharanahua",
"mce": "Itundujia Mixtec",
"mcf": "Matsés",
"mcg": "Mapoyo",
"mch": "Maquiritari",
"mci": "Mese",
"mcj": "Mvanip",
"mck": "Mbunda",
"mcl": "Macaguaje",
"mcm": "Malaccan Creole Portuguese",
"mcn": "Masana",
"mco": "Coatlán Mixe",
"mcp": "Makaa",
"mcq": "Ese",
"mcr": "Menya",
"mcs": "Mambai",
"mct": "Mengisa",
"mcu": "Cameroon Mambila",
"mcv": "Minanibai",
"mcw": "Mawa (Chad)",
"mcx": "Mpiemo",
"mcy": "South Watut",
"mcz": "Mawan",
"mda": "Mada (Nigeria)",
"mdb": "Morigi",
"mdc": "Male (Papua New Guinea)",
"mdd": "Mbum",
"mde": "Maba (Chad)",
"mdf": "Moksha",
"mdg": "Massalat",
"mdh": "Maguindanaon",
"mdi": "Mamvu",
"mdj": "Mangbetu",
"mdk": "Mangbutu",
"mdl": "Maltese Sign Language",
"mdm": "Mayogo",
"mdn": "Mbati",
"mdp": "Mbala",
"mdq": "Mbole",
"mdr": "Mandar",
"mds": "Maria (Papua New Guinea)",
"mdt": "Mbere",
"mdu": "Mboko",
"mdv": "Santa Lucía Monteverde Mixtec",
"mdw": "Mbosi",
"mdx": "Dizin",
"mdy": "Male (Ethiopia)",
"mdz": "Suruí Do Pará",
"mea": "Menka",
"meb": "Ikobi",
"mec": "Marra",
"med": "Melpa",
"mee": "Mengen",
"mef": "Megam",
"meh": "Southwestern Tlaxiaco Mixtec",
"mei": "Midob",
"mej": "Meyah",
"mek": "Mekeo",
"mel": "Central Melanau",
"mem": "Mangala",
"men": "Mende (Sierra Leone)",
"meo": "Kedah Malay",
"mep": "Miriwoong",
"meq": "Merey",
"mer": "Meru",
"mes": "Masmaje",
"met": "Mato",
"meu": "Motu",
"mev": "Mano",
"mew": "Maaka",
"mey": "Hassaniyya",
"mez": "Menominee",
"mfa": "Pattani Malay",
"mfb": "Bangka",
"mfc": "Mba",
"mfd": "Mendankwe-Nkwen",
"mfe": "Morisyen",
"mff": "Naki",
"mfg": "Mogofin",
"mfh": "Matal",
"mfi": "Wandala",
"mfj": "Mefele",
"mfk": "North Mofu",
"mfl": "Putai",
"mfm": "Marghi South",
"mfn": "Cross River Mbembe",
"mfo": "Mbe",
"mfp": "Makassar Malay",
"mfq": "Moba",
"mfr": "Marrithiyel",
"mfs": "Mexican Sign Language",
"mft": "Mokerang",
"mfu": "Mbwela",
"mfv": "Mandjak",
"mfw": "Mulaha",
"mfx": "Melo",
"mfy": "Mayo",
"mfz": "Mabaan",
"mg": "Malagasy",
"mga": "Middle Irish (900-1200)",
"mgb": "Mararit",
"mgc": "Morokodo",
"mgd": "Moru",
"mge": "Mango",
"mgf": "Maklew",
"mgg": "Mpumpong",
"mgh": "Makhuwa-Meetto",
"mgi": "Lijili",
"mgj": "Abureni",
"mgk": "Mawes",
"mgl": "Maleu-Kilenge",
"mgm": "Mambae",
"mgn": "Mbangi",
"mgo": "Meta'",
"mgp": "Eastern Magar",
"mgq": "Malila",
"mgr": "Mambwe-Lungu",
"mgs": "Manda (Tanzania)",
"mgt": "Mongol",
"mgu": "Mailu",
"mgv": "Matengo",
"mgw": "Matumbi",
"mgy": "Mbunga",
"mgz": "Mbugwe",
"mh": "Marshallese",
"mha": "Manda (India)",
"mhb": "Mahongwe",
"mhc": "Mocho",
"mhd": "Mbugu",
"mhe": "Besisi; Mah Meri",
"mhf": "Mamaa",
"mhg": "Margu",
"mhi": "Ma'di",
"mhj": "Mogholi",
"mhk": "Mungaka",
"mhl": "Mauwake",
"mhm": "Makhuwa-Moniga",
"mhn": "Mócheno",
"mho": "Mashi (Zambia)",
"mhp": "Balinese Malay",
"mhq": "Mandan",
"mhr": "Eastern Mari",
"mhs": "Buru (Indonesia)",
"mht": "Mandahuaca",
"mhu": "Digaro-Mishmi; Darang Deng",
"mhw": "Mbukushu",
"mhx": "Maru; Lhaovo",
"mhy": "Ma'anyan",
"mhz": "Mor (Mor Islands)",
"mi": "Maori",
"mia": "Miami",
"mib": "Atatláhuca Mixtec",
"mic": "Mi'kmaq; Micmac",
"mid": "Mandaic",
"mie": "Ocotepec Mixtec",
"mif": "Mofu-Gudur",
"mig": "San Miguel El Grande Mixtec",
"mih": "Chayuco Mixtec",
"mii": "Chigmecatitlán Mixtec",
"mij": "Abar; Mungbam",
"mik": "Mikasuki",
"mil": "Peñoles Mixtec",
"mim": "Alacatlatzala Mixtec",
"min": "Minangkabau",
"mio": "Pinotepa Nacional Mixtec",
"mip": "Apasco-Apoala Mixtec",
"miq": "Mískito",
"mir": "Isthmus Mixe",
"mit": "Southern Puebla Mixtec",
"miu": "Cacaloxtepec Mixtec",
"miw": "Akoye",
"mix": "Mixtepec Mixtec",
"miy": "Ayutla Mixtec",
"miz": "Coatzospan Mixtec",
"mjb": "Makalero",
"mjc": "San Juan Colorado Mixtec",
"mjd": "Northwest Maidu",
"mje": "Muskum",
"mjg": "Tu",
"mjh": "Mwera (Nyasa)",
"mji": "Kim Mun",
"mjj": "Mawak",
"mjk": "Matukar",
"mjl": "Mandeali",
"mjm": "Medebur",
"mjn": "Ma (Papua New Guinea)",
"mjo": "Malankuravan",
"mjp": "Malapandaram",
"mjq": "Malaryan",
"mjr": "Malavedan",
"mjs": "Miship",
"mjt": "Sauria Paharia",
"mju": "Manna-Dora",
"mjv": "Mannan",
"mjw": "Karbi",
"mjx": "Mahali",
"mjy": "Mahican",
"mjz": "Majhi",
"mk": "Macedonian",
"mka": "Mbre",
"mkb": "Mal Paharia",
"mkc": "Siliput",
"mke": "Mawchi",
"mkf": "Miya",
"mkg": "Mak (China)",
"mkh": "Mon-Khmer languages",
"mki": "Dhatki",
"mkj": "Mokilese",
"mkk": "Byep",
"mkl": "Mokole",
"mkm": "Moklen",
"mkn": "Kupang Malay",
"mko": "Mingang Doso",
"mkp": "Moikodi",
"mkq": "Bay Miwok",
"mkr": "Malas",
"mks": "Silacayoapan Mixtec",
"mkt": "Vamale",
"mku": "Konyanka Maninka",
"mkv": "Mafea",
"mkw": "Kituba (Congo)",
"mkx": "Kinamiging Manobo",
"mky": "East Makian",
"mkz": "Makasae",
"ml": "Malayalam",
"mla": "Malo",
"mlb": "Mbule",
"mlc": "Cao Lan",
"mle": "Manambu",
"mlf": "Mal",
"mlh": "Mape",
"mli": "Malimpung",
"mlj": "Miltu",
"mlk": "Ilwana; Kiwilwana",
"mll": "Malua Bay",
"mlm": "Mulam",
"mln": "Malango",
"mlo": "Mlomp",
"mlp": "Bargam",
"mlq": "Western Maninkakan",
"mlr": "Vame",
"mls": "Masalit",
"mlu": "To'abaita",
"mlv": "Motlav; Mwotlap",
"mlw": "Moloko",
"mlx": "Malfaxal; Naha'ai",
"mlz": "Malaynon",
"mma": "Mama",
"mmb": "Momina",
"mmc": "Michoacán Mazahua",
"mmd": "Maonan",
"mme": "Mae",
"mmf": "Mundat",
"mmg": "North Ambrym",
"mmh": "Mehináku",
"mmi": "Musar",
"mmj": "Majhwar",
"mmk": "Mukha-Dora",
"mml": "Man Met",
"mmm": "Maii",
"mmn": "Mamanwa",
"mmo": "Mangga Buang",
"mmp": "Siawi",
"mmq": "Musak",
"mmr": "Western Xiangxi Miao",
"mmt": "Malalamai",
"mmu": "Mmaala",
"mmv": "Miriti",
"mmw": "Emae",
"mmx": "Madak",
"mmy": "Migaama",
"mmz": "Mabaale",
"mn": "Mongolian",
"mna": "Mbula",
"mnb": "Muna",
"mnc": "Manchu",
"mnd": "Mondé",
"mne": "Naba",
"mnf": "Mundani",
"mng": "Eastern Mnong",
"mnh": "Mono (Democratic Republic of Congo)",
"mni": "Manipuri",
"mnj": "Munji",
"mnk": "Mandinka",
"mnl": "Tiale",
"mnm": "Mapena",
"mnn": "Southern Mnong",
"mno": "Manobo languages",
"mnp": "Min Bei Chinese",
"mnq": "Minriq",
"mnr": "Mono (USA)",
"mns": "Mansi",
"mnu": "Mer",
"mnv": "Rennell-Bellona",
"mnw": "Mon",
"mnx": "Manikion",
"mny": "Manyawa",
"mnz": "Moni",
"moa": "Mwan",
"moc": "Mocoví",
"mod": "Mobilian",
"moe": "Innu; Montagnais",
"mog": "Mongondow",
"moh": "Mohawk",
"moi": "Mboi",
"moj": "Monzombo",
"mok": "Morori",
"mom": "Mangue",
"moo": "Monom",
"mop": "Mopán Maya",
"moq": "Mor (Bomberai Peninsula)",
"mor": "Moro",
"mos": "Mossi",
"mot": "Barí",
"mou": "Mogum",
"mov": "Mohave",
"mow": "Moi (Congo)",
"mox": "Molima",
"moy": "Shekkacho",
"moz": "Mukulu; Gergiko",
"mpa": "Mpoto",
"mpb": "Malak Malak; Mullukmulluk",
"mpc": "Mangarrayi",
"mpd": "Machinere",
"mpe": "Majang",
"mpg": "Marba",
"mph": "Maung",
"mpi": "Mpade",
"mpj": "Martu Wangka; Wangkajunga",
"mpk": "Mbara (Chad)",
"mpl": "Middle Watut",
"mpm": "Yosondúa Mixtec",
"mpn": "Mindiri",
"mpo": "Miu",
"mpp": "Migabac",
"mpq": "Matís",
"mpr": "Vangunu",
"mps": "Dadibi",
"mpt": "Mian",
"mpu": "Makuráp",
"mpv": "Mungkip",
"mpw": "Mapidian",
"mpx": "Misima-Panaeati",
"mpy": "Mapia",
"mpz": "Mpi",
"mqa": "Maba (Indonesia)",
"mqb": "Mbuko",
"mqc": "Mangole",
"mqe": "Matepi",
"mqf": "Momuna",
"mqg": "Kota Bangun Kutai Malay",
"mqh": "Tlazoyaltepec Mixtec",
"mqi": "Mariri",
"mqj": "Mamasa",
"mqk": "Rajah Kabunsuwan Manobo",
"mql": "Mbelime",
"mqm": "South Marquesan",
"mqn": "Moronene",
"mqo": "Modole",
"mqp": "Manipa",
"mqq": "Minokok",
"mqr": "Mander",
"mqs": "West Makian",
"mqt": "Mok",
"mqu": "Mandari",
"mqv": "Mosimo",
"mqw": "Murupi",
"mqx": "Mamuju",
"mqy": "Manggarai",
"mqz": "Pano",
"mr": "Marathi",
"mra": "Mlabri",
"mrb": "Marino",
"mrc": "Maricopa",
"mrd": "Western Magar",
"mre": "Martha's Vineyard Sign Language",
"mrf": "Elseng",
"mrg": "Mising",
"mrh": "Mara Chin",
"mrj": "Western Mari",
"mrk": "Hmwaveke",
"mrl": "Mortlockese",
"mrm": "Merlav; Mwerlap",
"mrn": "Cheke Holo",
"mro": "Mru",
"mrp": "Morouas",
"mrq": "North Marquesan",
"mrr": "Maria (India)",
"mrs": "Maragus",
"mrt": "Marghi Central",
"mru": "Mono (Cameroon)",
"mrv": "Mangareva",
"mrw": "Maranao",
"mrx": "Maremgi; Dineor",
"mry": "Mandaya",
"mrz": "Marind",
"ms": "Malay (macrolanguage)",
"msb": "Masbatenyo",
"msc": "Sankaran Maninka",
"msd": "Yucatec Maya Sign Language",
"mse": "Musey",
"msf": "Mekwei",
"msg": "Moraid",
"msh": "Masikoro Malagasy",
"msi": "Sabah Malay",
"msj": "Ma (Democratic Republic of Congo)",
"msk": "Mansaka",
"msl": "Molof; Poule",
"msm": "Agusan Manobo",
"msn": "Vurës",
"mso": "Mombum",
"msp": "Maritsauá",
"msq": "Caac",
"msr": "Mongolian Sign Language",
"mss": "West Masela",
"msu": "Musom",
"msv": "Maslam",
"msw": "Mansoanka",
"msx": "Moresada",
"msy": "Aruamu",
"msz": "Momare",
"mt": "Maltese",
"mta": "Cotabato Manobo",
"mtb": "Anyin Morofo",
"mtc": "Munit",
"mtd": "Mualang",
"mte": "Mono (Solomon Islands)",
"mtf": "Murik (Papua New Guinea)",
"mtg": "Una",
"mth": "Munggui",
"mti": "Maiwa (Papua New Guinea)",
"mtj": "Moskona",
"mtk": "Mbe'",
"mtl": "Montol",
"mtm": "Mator",
"mtn": "Matagalpa",
"mto": "Totontepec Mixe",
"mtp": "Wichí Lhamtés Nocten",
"mtq": "Muong",
"mtr": "Mewari",
"mts": "Yora",
"mtt": "Mota",
"mtu": "Tututepec Mixtec",
"mtv": "Asaro'o",
"mtw": "Southern Binukidnon",
"mtx": "Tidaá Mixtec",
"mty": "Nabi",
"mua": "Mundang",
"mub": "Mubi",
"muc": "Ajumbu",
"mud": "Mednyj Aleut",
"mue": "Media Lengua",
"mug": "Musgu",
"muh": "Mündü",
"mui": "Musi",
"muj": "Mabire",
"muk": "Mugom",
"mum": "Maiwala",
"mun": "Munda languages",
"muo": "Nyong",
"mup": "Malvi",
"muq": "Eastern Xiangxi Miao",
"mur": "Murle",
"mus": "Creek",
"mut": "Western Muria",
"muu": "Yaaku",
"muv": "Muthuvan",
"mux": "Bo-Ung",
"muy": "Muyang",
"muz": "Mursi",
"mva": "Manam",
"mvb": "Mattole",
"mvd": "Mamboru",
"mve": "Marwari (Pakistan)",
"mvf": "Peripheral Mongolian",
"mvg": "Yucuañe Mixtec",
"mvh": "Mulgi",
"mvi": "Miyako",
"mvk": "Mekmek",
"mvl": "Mbara (Australia)",
"mvn": "Minaveha",
"mvo": "Marovo",
"mvp": "Duri",
"mvq": "Moere",
"mvr": "Marau",
"mvs": "Massep",
"mvt": "Mpotovoro",
"mvu": "Marfa",
"mvv": "Tagal Murut",
"mvw": "Machinga",
"mvx": "Meoswar",
"mvy": "Indus Kohistani",
"mvz": "Mesqan",
"mwa": "Mwatebu",
"mwb": "Juwal",
"mwc": "Are",
"mwe": "Mwera (Chimwera)",
"mwf": "Murrinh-Patha",
"mwg": "Aiklep",
"mwh": "Mouk-Aria",
"mwi": "Labo; Ninde",
"mwk": "Kita Maninkakan",
"mwl": "Mirandese",
"mwm": "Sar",
"mwn": "Nyamwanga",
"mwo": "Central Maewo",
"mwp": "Kala Lagaw Ya",
"mwq": "Mün Chin",
"mwr": "Marwari",
"mws": "Mwimbi-Muthambi",
"mwt": "Moken",
"mwu": "Mittu",
"mwv": "Mentawai",
"mww": "Hmong Daw",
"mwz": "Moingi",
"mxa": "Northwest Oaxaca Mixtec",
"mxb": "Tezoatlán Mixtec",
"mxc": "Manyika",
"mxd": "Modang",
"mxe": "Mele-Fila",
"mxf": "Malgbe",
"mxg": "Mbangala",
"mxh": "Mvuba",
"mxi": "Mozarabic",
"mxj": "Miju-Mishmi; Geman Deng",
"mxk": "Monumbo",
"mxl": "Maxi Gbe",
"mxm": "Meramera",
"mxn": "Moi (Indonesia)",
"mxo": "Mbowe",
"mxp": "Tlahuitoltepec Mixe",
"mxq": "Juquila Mixe",
"mxr": "Murik (Malaysia)",
"mxs": "Huitepec Mixtec",
"mxt": "Jamiltepec Mixtec",
"mxu": "Mada (Cameroon)",
"mxv": "Metlatónoc Mixtec",
"mxw": "Namo",
"mxx": "Mahou; Mawukakan",
"mxy": "Southeastern Nochixtlán Mixtec",
"mxz": "Central Masela",
"my": "Burmese",
"myb": "Mbay",
"myc": "Mayeka",
"mye": "Myene",
"myf": "Bambassi",
"myg": "Manta",
"myh": "Makah",
"myj": "Mangayat",
"myk": "Mamara Senoufo",
"myl": "Moma",
"mym": "Me'en",
"myn": "Mayan languages",
"myo": "Anfillo",
"myp": "Pirahã",
"myr": "Muniche",
"mys": "Mesmes",
"myu": "Mundurukú",
"myv": "Erzya",
"myw": "Muyuw",
"myx": "Masaaba",
"myy": "Macuna",
"myz": "Classical Mandaic",
"mza": "Santa María Zacatepec Mixtec",
"mzb": "Tumzabt",
"mzc": "Madagascar Sign Language",
"mzd": "Malimba",
"mze": "Morawa",
"mzg": "Monastic Sign Language",
"mzh": "Wichí Lhamtés Güisnay",
"mzi": "Ixcatlán Mazatec",
"mzj": "Manya",
"mzk": "Nigeria Mambila",
"mzl": "Mazatlán Mixe",
"mzm": "Mumuye",
"mzn": "Mazanderani",
"mzo": "Matipuhy",
"mzp": "Movima",
"mzq": "Mori Atas",
"mzr": "Marúbo",
"mzs": "Macanese",
"mzt": "Mintil",
"mzu": "Inapang",
"mzv": "Manza",
"mzw": "Deg",
"mzx": "Mawayana",
"mzy": "Mozambican Sign Language",
"mzz": "Maiadomu",
"na": "Nauru",
"naa": "Namla",
"nab": "Southern Nambikuára",
"nac": "Narak",
"nae": "Naka'ela",
"naf": "Nabak",
"nag": "Naga Pidgin",
"nah": "Nahuatl languages",
"nai": "North American Indian languages",
"naj": "Nalu",
"nak": "Nakanai",
"nal": "Nalik",
"nam": "Ngan'gityemerri",
"nan": "Min Nan Chinese",
"nao": "Naaba",
"nap": "Neapolitan",
"naq": "Khoekhoe; Nama (Namibia)",
"nar": "Iguta",
"nas": "Naasioi",
"nat": "Ca̱hungwa̱rya̱; Hungworo",
"naw": "Nawuri",
"nax": "Nakwi",
"nay": "Ngarrindjeri",
"naz": "Coatepec Nahuatl",
"nb": "Norwegian Bokmål",
"nba": "Nyemba",
"nbb": "Ndoe",
"nbc": "Chang Naga",
"nbd": "Ngbinda",
"nbe": "Konyak Naga",
"nbg": "Nagarchal",
"nbh": "Ngamo",
"nbi": "Mao Naga",
"nbj": "Ngarinyman",
"nbk": "Nake",
"nbm": "Ngbaka Ma'bo",
"nbn": "Kuri",
"nbo": "Nkukoli",
"nbp": "Nnam",
"nbq": "Nggem",
"nbr": "Numana",
"nbs": "Namibian Sign Language",
"nbt": "Na",
"nbu": "Rongmei Naga",
"nbv": "Ngamambo",
"nbw": "Southern Ngbandi",
"nby": "Ningera",
"nca": "Iyo",
"ncb": "Central Nicobarese",
"ncc": "Ponam",
"ncd": "Nachering",
"nce": "Yale",
"ncf": "Notsi",
"ncg": "Nisga'a",
"nch": "Central Huasteca Nahuatl",
"nci": "Classical Nahuatl",
"ncj": "Northern Puebla Nahuatl",
"nck": "Na-kara",
"ncl": "Michoacán Nahuatl",
"ncm": "Nambo",
"ncn": "Nauna",
"nco": "Sibe",
"ncq": "Northern Katang",
"ncr": "Ncane",
"ncs": "Nicaraguan Sign Language",
"nct": "Chothe Naga",
"ncu": "Chumburung",
"ncx": "Central Puebla Nahuatl",
"ncz": "Natchez",
"nd": "North Ndebele",
"nda": "Ndasa",
"ndb": "Kenswei Nsei",
"ndc": "Ndau",
"ndd": "Nde-Nsele-Nta",
"ndf": "Nadruvian",
"ndg": "Ndengereko",
"ndh": "Ndali",
"ndi": "Samba Leko",
"ndj": "Ndamba",
"ndk": "Ndaka",
"ndl": "Ndolo",
"ndm": "Ndam",
"ndn": "Ngundi",
"ndp": "Ndo",
"ndq": "Ndombe",
"ndr": "Ndoola",
"nds": "Low German; Low Saxon",
"ndt": "Ndunga",
"ndu": "Dugun",
"ndv": "Ndut",
"ndw": "Ndobo",
"ndx": "Nduga",
"ndy": "Lutos",
"ndz": "Ndogo",
"ne": "Nepali (macrolanguage)",
"nea": "Eastern Ngad'a",
"neb": "Toura (Côte d'Ivoire)",
"nec": "Nedebang",
"ned": "Nde-Gbite",
"nee": "Nêlêmwa-Nixumwak",
"nef": "Nefamese",
"neg": "Negidal",
"neh": "Nyenkha",
"nei": "Neo-Hittite",
"nej": "Neko",
"nek": "Neku",
"nem": "Nemi",
"nen": "Nengone",
"neo": "Ná-Meo",
"neq": "North Central Mixe",
"ner": "Yahadian",
"nes": "Bhoti Kinnauri",
"net": "Nete",
"neu": "Neo",
"nev": "Nyaheun",
"new": "Newari; Nepal Bhasa",
"nex": "Neme",
"ney": "Neyo",
"nez": "Nez Perce",
"nfa": "Dhao",
"nfd": "Ahwai",
"nfl": "Ayiwo; Äiwoo",
"nfr": "Nafaanra",
"nfu": "Mfumte",
"ng": "Ndonga",
"nga": "Ngbaka",
"ngb": "Northern Ngbandi",
"ngc": "Ngombe (Democratic Republic of Congo)",
"ngd": "Ngando (Central African Republic)",
"nge": "Ngemba",
"ngf": "Trans-New Guinea languages",
"ngg": "Ngbaka Manza",
"ngh": "Nǁng",
"ngi": "Ngizim",
"ngj": "Ngie",
"ngk": "Dalabon",
"ngl": "Lomwe",
"ngm": "Ngatik Men's Creole",
"ngn": "Ngwo",
"ngp": "Ngulu",
"ngq": "Ngurimi; Ngoreme",
"ngr": "Engdewu",
"ngs": "Gvoko",
"ngt": "Kriang; Ngeq",
"ngu": "Guerrero Nahuatl",
"ngv": "Nagumi",
"ngw": "Ngwaba",
"ngx": "Nggwahyi",
"ngy": "Tibea",
"ngz": "Ngungwel",
"nha": "Nhanda",
"nhb": "Beng",
"nhc": "Tabasco Nahuatl",
"nhd": "Chiripá; Ava Guaraní",
"nhe": "Eastern Huasteca Nahuatl",
"nhf": "Nhuwala",
"nhg": "Tetelcingo Nahuatl",
"nhh": "Nahari",
"nhi": "Zacatlán-Ahuacatlán-Tepetzintla Nahuatl",
"nhk": "Isthmus-Cosoleacaque Nahuatl",
"nhm": "Morelos Nahuatl",
"nhn": "Central Nahuatl",
"nho": "Takuu",
"nhp": "Isthmus-Pajapan Nahuatl",
"nhq": "Huaxcaleca Nahuatl",
"nhr": "Naro",
"nht": "Ometepec Nahuatl",
"nhu": "Noone",
"nhv": "Temascaltepec Nahuatl",
"nhw": "Western Huasteca Nahuatl",
"nhx": "Isthmus-Mecayapan Nahuatl",
"nhy": "Northern Oaxaca Nahuatl",
"nhz": "Santa María La Alta Nahuatl",
"nia": "Nias",
"nib": "Nakame",
"nic": "Niger-Kordofanian languages",
"nid": "Ngandi",
"nie": "Niellim",
"nif": "Nek",
"nig": "Ngalakgan",
"nih": "Nyiha (Tanzania)",
"nii": "Nii",
"nij": "Ngaju",
"nik": "Southern Nicobarese",
"nil": "Nila",
"nim": "Nilamba",
"nin": "Ninzo",
"nio": "Nganasan",
"niq": "Nandi",
"nir": "Nimboran",
"nis": "Nimi",
"nit": "Southeastern Kolami",
"niu": "Niuean",
"niv": "Gilyak",
"niw": "Nimo",
"nix": "Hema",
"niy": "Ngiti",
"niz": "Ningil",
"nja": "Nzanyi",
"njb": "Nocte Naga",
"njd": "Ndonde Hamba",
"njh": "Lotha Naga",
"nji": "Gudanji",
"njj": "Njen",
"njl": "Njalgulgule",
"njm": "Angami Naga",
"njn": "Liangmai Naga",
"njo": "Ao Naga",
"njr": "Njerep",
"njs": "Nisa",
"njt": "Ndyuka-Trio Pidgin",
"nju": "Ngadjunmaya",
"njx": "Kunyi",
"njy": "Njyem",
"njz": "Nyishi",
"nka": "Nkoya",
"nkb": "Khoibu Naga",
"nkc": "Nkongho",
"nkd": "Koireng",
"nke": "Duke",
"nkf": "Inpui Naga",
"nkg": "Nekgini",
"nkh": "Khezha Naga",
"nki": "Thangal Naga",
"nkj": "Nakai",
"nkk": "Nokuku",
"nkm": "Namat",
"nkn": "Nkangala",
"nko": "Nkonya",
"nkp": "Niuatoputapu",
"nkq": "Nkami",
"nkr": "Nukuoro",
"nks": "North Asmat",
"nkt": "Nyika (Tanzania)",
"nku": "Bouna Kulango",
"nkv": "Nyika (Malawi and Zambia)",
"nkw": "Nkutu",
"nkx": "Nkoroo",
"nkz": "Nkari",
"nl": "Dutch; Flemish",
"nla": "Ngombale",
"nlc": "Nalca",
"nle": "East Nyala",
"nlg": "Gela",
"nli": "Grangali",
"nlj": "Nyali",
"nlk": "Ninia Yali",
"nll": "Nihali",
"nlm": "Mankiyali",
"nlo": "Ngul",
"nlq": "Lao Naga",
"nlu": "Nchumbulu",
"nlv": "Orizaba Nahuatl",
"nlw": "Walangama",
"nlx": "Nahali",
"nly": "Nyamal",
"nlz": "Nalögo",
"nma": "Maram Naga",
"nmb": "Big Nambas; V'ënen Taut",
"nmc": "Ngam",
"nmd": "Ndumu",
"nme": "Mzieme Naga",
"nmf": "Tangkhul Naga (India)",
"nmg": "Kwasio",
"nmh": "Monsang Naga",
"nmi": "Nyam",
"nmj": "Ngombe (Central African Republic)",
"nmk": "Namakura",
"nml": "Ndemli",
"nmm": "Manangba",
"nmn": "ǃXóõ",
"nmo": "Moyon Naga",
"nmp": "Nimanbur",
"nmq": "Nambya",
"nmr": "Nimbari",
"nms": "Letemboi",
"nmt": "Namonuito",
"nmu": "Northeast Maidu",
"nmv": "Ngamini",
"nmw": "Nimoa; Rifao",
"nmx": "Nama (Papua New Guinea)",
"nmy": "Namuyi",
"nmz": "Nawdm",
"nn": "Norwegian Nynorsk",
"nna": "Nyangumarta",
"nnb": "Nande",
"nnc": "Nancere",
"nnd": "West Ambae",
"nne": "Ngandyera",
"nnf": "Ngaing",
"nng": "Maring Naga",
"nnh": "Ngiemboon",
"nni": "North Nuaulu",
"nnj": "Nyangatom",
"nnk": "Nankina",
"nnl": "Northern Rengma Naga",
"nnm": "Namia",
"nnn": "Ngete",
"nnp": "Wancho Naga",
"nnq": "Ngindo",
"nnr": "Narungga",
"nnt": "Nanticoke",
"nnu": "Dwang",
"nnv": "Nugunu (Australia)",
"nnw": "Southern Nuni",
"nny": "Nyangga",
"nnz": "Nda'nda'",
"no": "Norwegian",
"noa": "Woun Meu",
"noc": "Nuk",
"nod": "Northern Thai",
"noe": "Nimadi",
"nof": "Nomane",
"nog": "Nogai",
"noh": "Nomu",
"noi": "Noiri",
"noj": "Nonuya",
"nok": "Nooksack",
"nol": "Nomlaki",
"nom": "Nocamán",
"non": "Old Norse",
"nop": "Numanggang",
"noq": "Ngongo",
"nos": "Eastern Nisu",
"not": "Nomatsiguenga",
"nou": "Ewage-Notu",
"nov": "Novial",
"now": "Nyambo",
"noy": "Noy",
"noz": "Nayi",
"npa": "Nar Phu",
"npb": "Nupbikha",
"npg": "Ponyo-Gongwang Naga",
"nph": "Phom Naga",
"npi": "Nepali (individual language)",
"npl": "Southeastern Puebla Nahuatl",
"npn": "Mondropolon",
"npo": "Pochuri Naga",
"nps": "Nipsan",
"npu": "Puimei Naga",
"npx": "Noipx",
"npy": "Napu",
"nqg": "Southern Nago",
"nqk": "Kura Ede Nago",
"nql": "Ngendelengo",
"nqm": "Ndom",
"nqn": "Nen",
"nqo": "N'Ko; N’Ko",
"nqq": "Kyan-Karyaw Naga",
"nqt": "Nteng",
"nqy": "Akyaung Ari Naga",
"nr": "South Ndebele",
"nra": "Ngom",
"nrb": "Nara",
"nrc": "Noric",
"nre": "Southern Rengma Naga",
"nrf": "Jèrriais; Guernésiais",
"nrg": "Narango",
"nri": "Chokri Naga",
"nrk": "Ngarla",
"nrl": "Ngarluma",
"nrm": "Narom",
"nrn": "Norn",
"nrp": "North Picene",
"nrr": "Norra; Nora",
"nrt": "Northern Kalapuya",
"nru": "Narua",
"nrx": "Ngurmbur",
"nrz": "Lala",
"nsa": "Sangtam Naga",
"nsb": "Lower Nossob",
"nsc": "Nshi",
"nsd": "Southern Nisu",
"nse": "Nsenga",
"nsf": "Northwestern Nisu",
"nsg": "Ngasa",
"nsh": "Ngoshie",
"nsi": "Nigerian Sign Language",
"nsk": "Naskapi",
"nsl": "Norwegian Sign Language",
"nsm": "Sumi Naga",
"nsn": "Nehan",
"nso": "Pedi; Northern Sotho; Sepedi",
"nsp": "Nepalese Sign Language",
"nsq": "Northern Sierra Miwok",
"nsr": "Maritime Sign Language",
"nss": "Nali",
"nst": "Tase Naga",
"nsu": "Sierra Negra Nahuatl",
"nsv": "Southwestern Nisu",
"nsw": "Navut",
"nsx": "Nsongo",
"nsy": "Nasal",
"nsz": "Nisenan",
"ntd": "Northern Tidung",
"nte": "Nathembo",
"ntg": "Ngantangarra",
"nti": "Natioro",
"ntj": "Ngaanyatjarra",
"ntk": "Ikoma-Nata-Isenye",
"ntm": "Nateni",
"nto": "Ntomba",
"ntp": "Northern Tepehuan",
"ntr": "Delo",
"ntu": "Natügu",
"ntw": "Nottoway",
"ntx": "Tangkhul Naga (Myanmar)",
"nty": "Mantsi",
"ntz": "Natanzi",
"nua": "Yuanga",
"nub": "Nubian languages",
"nuc": "Nukuini",
"nud": "Ngala",
"nue": "Ngundu",
"nuf": "Nusu",
"nug": "Nungali",
"nuh": "Ndunda",
"nui": "Ngumbi",
"nuj": "Nyole",
"nuk": "Nuu-chah-nulth; Nuuchahnulth",
"nul": "Nusa Laut",
"num": "Niuafo'ou",
"nun": "Anong",
"nuo": "Nguôn",
"nup": "Nupe-Nupe-Tako",
"nuq": "Nukumanu",
"nur": "Nukuria",
"nus": "Nuer",
"nut": "Nung (Viet Nam)",
"nuu": "Ngbundu",
"nuv": "Northern Nuni",
"nuw": "Nguluwan",
"nux": "Mehek",
"nuy": "Nunggubuyu",
"nuz": "Tlamacazapa Nahuatl",
"nv": "Navajo; Navaho",
"nvh": "Nasarian",
"nvm": "Namiae",
"nvo": "Nyokon",
"nwa": "Nawathinehena",
"nwb": "Nyabwa",
"nwc": "Classical Newari; Classical Nepal Bhasa; Old Newari",
"nwe": "Ngwe",
"nwg": "Ngayawung",
"nwi": "Southwest Tanna",
"nwm": "Nyamusa-Molo",
"nwo": "Nauo",
"nwr": "Nawaru",
"nww": "Ndwewe",
"nwx": "Middle Newar",
"nwy": "Nottoway-Meherrin",
"nxa": "Nauete",
"nxd": "Ngando (Democratic Republic of Congo)",
"nxe": "Nage",
"nxg": "Ngad'a",
"nxi": "Nindi",
"nxk": "Koki Naga",
"nxl": "South Nuaulu",
"nxm": "Numidian",
"nxn": "Ngawun",
"nxo": "Ndambomo",
"nxq": "Naxi",
"nxr": "Ninggerum",
"nxx": "Nafri",
"ny": "Nyanja; Chewa; Chichewa",
"nyb": "Nyangbo",
"nyc": "Nyanga-li",
"nyd": "Nyore; Olunyole",
"nye": "Nyengo",
"nyf": "Giryama; Kigiryama",
"nyg": "Nyindu",
"nyh": "Nyikina",
"nyi": "Ama (Sudan)",
"nyj": "Nyanga",
"nyk": "Nyaneka",
"nyl": "Nyeu",
"nym": "Nyamwezi",
"nyn": "Nyankole",
"nyo": "Nyoro",
"nyp": "Nyang'i",
"nyq": "Nayini",
"nyr": "Nyiha (Malawi)",
"nys": "Nyungar",
"nyt": "Nyawaygi",
"nyu": "Nyungwe",
"nyv": "Nyulnyul",
"nyw": "Nyaw",
"nyx": "Nganyaywana",
"nyy": "Nyakyusa-Ngonde",
"nza": "Tigon Mbembe",
"nzb": "Njebi",
"nzd": "Nzadi",
"nzi": "Nzima",
"nzk": "Nzakara",
"nzm": "Zeme Naga",
"nzs": "New Zealand Sign Language",
"nzu": "Teke-Nzikou",
"nzy": "Nzakambay",
"nzz": "Nanga Dama Dogon",
"oaa": "Orok",
"oac": "Oroch",
"oar": "Old Aramaic (up to 700 BCE); Ancient Aramaic (up to 700 BCE)",
"oav": "Old Avar",
"obi": "Obispeño",
"obk": "Southern Bontok",
"obl": "Oblo",
"obm": "Moabite",
"obo": "Obo Manobo",
"obr": "Old Burmese",
"obt": "Old Breton",
"obu": "Obulom",
"oc": "Occitan (post 1500)",
"oca": "Ocaina",
"och": "Old Chinese",
"ocm": "Old Cham",
"oco": "Old Cornish",
"ocu": "Atzingo Matlatzinca",
"oda": "Odut",
"odk": "Od",
"odt": "Old Dutch",
"odu": "Odual",
"ofo": "Ofo",
"ofs": "Old Frisian",
"ofu": "Efutop",
"ogb": "Ogbia",
"ogc": "Ogbah",
"oge": "Old Georgian",
"ogg": "Ogbogolo",
"ogo": "Khana",
"ogu": "Ogbronuagum",
"oht": "Old Hittite",
"ohu": "Old Hungarian",
"oia": "Oirata",
"oie": "Okolie",
"oin": "Inebu One",
"oj": "Ojibwa",
"ojb": "Northwestern Ojibwa",
"ojc": "Central Ojibwa",
"ojg": "Eastern Ojibwa",
"ojp": "Old Japanese",
"ojs": "Severn Ojibwa",
"ojv": "Ontong Java",
"ojw": "Western Ojibwa",
"oka": "Okanagan",
"okb": "Okobo",
"okc": "Kobo",
"okd": "Okodia",
"oke": "Okpe (Southwestern Edo)",
"okg": "Koko Babangk",
"okh": "Koresh-e Rostam",
"oki": "Okiek",
"okj": "Oko-Juwoi",
"okk": "Kwamtim One",
"okl": "Old Kentish Sign Language",
"okm": "Middle Korean (10th-16th cent.)",
"okn": "Oki-No-Erabu",
"oko": "Old Korean (3rd-9th cent.)",
"okr": "Kirike",
"oks": "Oko-Eni-Osayen",
"oku": "Oku",
"okv": "Orokaiva",
"okx": "Okpe (Northwestern Edo)",
"okz": "Old Khmer",
"ola": "Walungge",
"old": "Mochi",
"ole": "Olekha",
"olk": "Olkol",
"olm": "Oloma",
"olo": "Livvi",
"olr": "Olrat",
"olt": "Old Lithuanian",
"olu": "Kuvale",
"om": "Oromo",
"oma": "Omaha-Ponca",
"omb": "East Ambae",
"omc": "Mochica",
"omg": "Omagua",
"omi": "Omi",
"omk": "Omok",
"oml": "Ombo",
"omn": "Minoan",
"omo": "Utarmbung",
"omp": "Old Manipuri",
"omq": "Oto-Manguean languages",
"omr": "Old Marathi",
"omt": "Omotik",
"omu": "Omurano",
"omv": "Omotic languages",
"omw": "South Tairora",
"omx": "Old Mon",
"omy": "Old Malay",
"ona": "Ona",
"onb": "Lingao",
"one": "Oneida",
"ong": "Olo",
"oni": "Onin",
"onj": "Onjob",
"onk": "Kabore One",
"onn": "Onobasulu",
"ono": "Onondaga",
"onp": "Sartang",
"onr": "Northern One",
"ons": "Ono",
"ont": "Ontenu",
"onu": "Unua",
"onw": "Old Nubian",
"onx": "Onin Based Pidgin",
"ood": "Tohono O'odham",
"oog": "Ong",
"oon": "Önge",
"oor": "Oorlams",
"oos": "Old Ossetic",
"opa": "Okpamheri",
"opk": "Kopkaka",
"opm": "Oksapmin",
"opo": "Opao",
"opt": "Opata",
"opy": "Ofayé",
"or": "Oriya (macrolanguage); Odia (macrolanguage)",
"ora": "Oroha",
"orc": "Orma",
"ore": "Orejón",
"org": "Oring",
"orh": "Oroqen",
"orn": "Orang Kanaq",
"oro": "Orokolo",
"orr": "Oruma",
"ors": "Orang Seletar",
"ort": "Adivasi Oriya",
"oru": "Ormuri",
"orv": "Old Russian",
"orw": "Oro Win",
"orx": "Oro",
"ory": "Odia (individual language); Oriya (individual language)",
"orz": "Ormu",
"os": "Ossetian; Ossetic",
"osa": "Osage",
"osc": "Oscan",
"osi": "Osing",
"osn": "Old Sundanese",
"oso": "Ososo",
"osp": "Old Spanish",
"ost": "Osatu",
"osu": "Southern One",
"osx": "Old Saxon",
"ota": "Ottoman Turkish (1500-1928)",
"otb": "Old Tibetan",
"otd": "Ot Danum",
"ote": "Mezquital Otomi",
"oti": "Oti",
"otk": "Old Turkish",
"otl": "Tilapa Otomi",
"otm": "Eastern Highland Otomi",
"otn": "Tenango Otomi",
"oto": "Otomian languages",
"otq": "Querétaro Otomi",
"otr": "Otoro",
"ots": "Estado de México Otomi",
"ott": "Temoaya Otomi",
"otu": "Otuke",
"otw": "Ottawa",
"otx": "Texcatepec Otomi",
"oty": "Old Tamil",
"otz": "Ixtenco Otomi",
"oua": "Tagargrent",
"oub": "Glio-Oubi",
"oue": "Oune",
"oui": "Old Uighur",
"oum": "Ouma",
"ovd": "Elfdalian; Övdalian",
"owi": "Owiniga",
"owl": "Old Welsh",
"oyb": "Oy",
"oyd": "Oyda",
"oym": "Wayampi",
"oyy": "Oya'oya",
"ozm": "Koonzime",
"pa": "Panjabi; Punjabi",
"paa": "Papuan languages",
"pab": "Parecís",
"pac": "Pacoh",
"pad": "Paumarí",
"pae": "Pagibete",
"paf": "Paranawát",
"pag": "Pangasinan",
"pah": "Tenharim",
"pai": "Pe",
"pak": "Parakanã",
"pal": "Pahlavi",
"pam": "Pampanga; Kapampangan",
"pao": "Northern Paiute",
"pap": "Papiamento",
"paq": "Parya",
"par": "Panamint; Timbisha",
"pas": "Papasena",
"pau": "Palauan",
"pav": "Pakaásnovos",
"paw": "Pawnee",
"pax": "Pankararé",
"pay": "Pech",
"paz": "Pankararú",
"pbb": "Páez",
"pbc": "Patamona",
"pbe": "Mezontla Popoloca",
"pbf": "Coyotepec Popoloca",
"pbg": "Paraujano",
"pbh": "E'ñapa Woromaipu",
"pbi": "Parkwa",
"pbl": "Mak (Nigeria)",
"pbm": "Puebla Mazatec",
"pbn": "Kpasam",
"pbo": "Papel",
"pbp": "Badyara",
"pbr": "Pangwa",
"pbs": "Central Pame",
"pbt": "Southern Pashto",
"pbu": "Northern Pashto",
"pbv": "Pnar",
"pby": "Pyu (Papua New Guinea)",
"pca": "Santa Inés Ahuatempan Popoloca",
"pcb": "Pear",
"pcc": "Bouyei",
"pcd": "Picard",
"pce": "Ruching Palaung",
"pcf": "Paliyan",
"pcg": "Paniya",
"pch": "Pardhan",
"pci": "Duruwa",
"pcj": "Parenga",
"pck": "Paite Chin",
"pcl": "Pardhi",
"pcm": "Nigerian Pidgin",
"pcn": "Piti",
"pcp": "Pacahuara",
"pcw": "Pyapun",
"pda": "Anam",
"pdc": "Pennsylvania German",
"pdi": "Pa Di",
"pdn": "Podena; Fedan",
"pdo": "Padoe",
"pdt": "Plautdietsch",
"pdu": "Kayan",
"pea": "Peranakan Indonesian",
"peb": "Eastern Pomo",
"ped": "Mala (Papua New Guinea)",
"pee": "Taje",
"pef": "Northeastern Pomo",
"peg": "Pengo",
"peh": "Bonan",
"pei": "Chichimeca-Jonaz",
"pej": "Northern Pomo",
"pek": "Penchal",
"pel": "Pekal",
"pem": "Phende",
"peo": "Old Persian (ca. 600-400 B.C.)",
"pep": "Kunja",
"peq": "Southern Pomo",
"pes": "Iranian Persian",
"pev": "Pémono",
"pex": "Petats",
"pey": "Petjo",
"pez": "Eastern Penan",
"pfa": "Pááfang",
"pfe": "Pere",
"pfl": "Pfaelzisch",
"pga": "Sudanese Creole Arabic",
"pgd": "Gāndhārī",
"pgg": "Pangwali",
"pgi": "Pagi",
"pgk": "Rerep",
"pgl": "Primitive Irish",
"pgn": "Paelignian",
"pgs": "Pangseng",
"pgu": "Pagu",
"pgz": "Papua New Guinean Sign Language",
"pha": "Pa-Hng",
"phd": "Phudagi",
"phg": "Phuong",
"phh": "Phukha",
"phi": "Philippine languages",
"phj": "Pahari",
"phk": "Phake",
"phl": "Phalura; Palula",
"phm": "Phimbi",
"phn": "Phoenician",
"pho": "Phunoi",
"phq": "Phana'",
"phr": "Pahari-Potwari",
"pht": "Phu Thai",
"phu": "Phuan",
"phv": "Pahlavani",
"phw": "Phangduwali",
"pi": "Pali",
"pia": "Pima Bajo",
"pib": "Yine",
"pic": "Pinji",
"pid": "Piaroa",
"pie": "Piro",
"pif": "Pingelapese",
"pig": "Pisabo",
"pih": "Pitcairn-Norfolk",
"pij": "Pijao",
"pil": "Yom",
"pim": "Powhatan",
"pin": "Piame",
"pio": "Piapoco",
"pip": "Pero",
"pir": "Piratapuyo",
"pis": "Pijin",
"pit": "Pitta Pitta",
"piu": "Pintupi-Luritja",
"piv": "Pileni; Vaeakau-Taumako",
"piw": "Pimbwe",
"pix": "Piu",
"piy": "Piya-Kwonci",
"piz": "Pije",
"pjt": "Pitjantjatjara",
"pka": "Ardhamāgadhī Prākrit",
"pkb": "Pokomo; Kipfokomo",
"pkc": "Paekche",
"pkg": "Pak-Tong",
"pkh": "Pankhu",
"pkn": "Pakanha",
"pko": "Pökoot",
"pkp": "Pukapuka",
"pkr": "Attapady Kurumba",
"pks": "Pakistan Sign Language",
"pkt": "Maleng",
"pku": "Paku",
"pl": "Polish",
"pla": "Miani",
"plb": "Polonombauk",
"plc": "Central Palawano",
"pld": "Polari",
"ple": "Palu'e",
"plf": "Central Malayo-Polynesian languages",
"plg": "Pilagá",
"plh": "Paulohi",
"plj": "Polci",
"plk": "Kohistani Shina",
"pll": "Shwe Palaung",
"pln": "Palenquero",
"plo": "Oluta Popoluca",
"plq": "Palaic",
"plr": "Palaka Senoufo",
"pls": "San Marcos Tlacoyalco Popoloca; San Marcos Tlalcoyalco Popoloca",
"plt": "Plateau Malagasy",
"plu": "Palikúr",
"plv": "Southwest Palawano",
"plw": "Brooke's Point Palawano",
"ply": "Bolyu",
"plz": "Paluan",
"pma": "Paama",
"pmb": "Pambia",
"pmd": "Pallanganmiddang",
"pme": "Pwaamei",
"pmf": "Pamona",
"pmh": "Māhārāṣṭri Prākrit",
"pmi": "Northern Pumi",
"pmj": "Southern Pumi",
"pmk": "Pamlico",
"pml": "Lingua Franca",
"pmm": "Pomo",
"pmn": "Pam",
"pmo": "Pom",
"pmq": "Northern Pame",
"pmr": "Paynamar",
"pms": "Piemontese",
"pmt": "Tuamotuan",
"pmw": "Plains Miwok",
"pmx": "Poumei Naga",
"pmy": "Papuan Malay",
"pmz": "Southern Pame",
"pna": "Punan Bah-Biau",
"pnb": "Western Panjabi",
"pnc": "Pannei",
"pnd": "Mpinda",
"pne": "Western Penan",
"png": "Pangu; Pongu",
"pnh": "Penrhyn",
"pni": "Aoheng",
"pnj": "Pinjarup",
"pnk": "Paunaka",
"pnl": "Paleni",
"pnm": "Punan Batu 1",
"pnn": "Pinai-Hagahai",
"pno": "Panobo",
"pnp": "Pancana",
"pnq": "Pana (Burkina Faso)",
"pnr": "Panim",
"pns": "Ponosakan",
"pnt": "Pontic",
"pnu": "Jiongnai Bunu",
"pnv": "Pinigura",
"pnw": "Banyjima; Panytyima",
"pnx": "Phong-Kniang",
"pny": "Pinyin",
"pnz": "Pana (Central African Republic)",
"poc": "Poqomam",
"poe": "San Juan Atzingo Popoloca",
"pof": "Poke",
"pog": "Potiguára",
"poh": "Poqomchi'",
"poi": "Highland Popoluca",
"pok": "Pokangá",
"pom": "Southeastern Pomo",
"pon": "Pohnpeian",
"poo": "Central Pomo",
"pop": "Pwapwâ",
"poq": "Texistepec Popoluca",
"pos": "Sayula Popoluca",
"pot": "Potawatomi",
"pov": "Upper Guinea Crioulo",
"pow": "San Felipe Otlaltepec Popoloca",
"pox": "Polabian",
"poy": "Pogolo",
"poz": "Malayo-Polynesian languages",
"ppe": "Papi",
"ppi": "Paipai",
"ppk": "Uma",
"ppl": "Pipil; Nicarao",
"ppm": "Papuma",
"ppn": "Papapana",
"ppo": "Folopa",
"ppp": "Pelende",
"ppq": "Pei",
"pps": "San Luís Temalacayuca Popoloca",
"ppt": "Pare",
"ppu": "Papora",
"pqa": "Pa'a",
"pqe": "Eastern Malayo-Polynesian languages",
"pqm": "Malecite-Passamaquoddy",
"pqw": "Western Malayo-Polynesian languages",
"pra": "Prakrit languages",
"prc": "Parachi",
"prd": "Parsi-Dari",
"pre": "Principense",
"prf": "Paranan",
"prg": "Prussian",
"prh": "Porohanon",
"pri": "Paicî",
"prk": "Parauk",
"prl": "Peruvian Sign Language",
"prm": "Kibiri",
"prn": "Prasuni",
"pro": "Old Provençal (to 1500); Old Occitan (to 1500)",
"prp": "Parsi",
"prq": "Ashéninka Perené",
"prr": "Puri",
"prs": "Dari; Afghan Persian",
"prt": "Phai",
"pru": "Puragi",
"prw": "Parawen",
"prx": "Purik",
"prz": "Providencia Sign Language",
"ps": "Pushto; Pashto",
"psa": "Asue Awyu",
"psc": "Iranian Sign Language; Persian Sign Language",
"psd": "Plains Indian Sign Language",
"pse": "Central Malay",
"psg": "Penang Sign Language",
"psh": "Southwest Pashai; Southwest Pashayi",
"psi": "Southeast Pashai; Southeast Pashayi",
"psl": "Puerto Rican Sign Language",
"psm": "Pauserna",
"psn": "Panasuan",
"pso": "Polish Sign Language",
"psp": "Philippine Sign Language",
"psq": "Pasi",
"psr": "Portuguese Sign Language",
"pss": "Kaulong",
"pst": "Central Pashto",
"psu": "Sauraseni Prākrit",
"psw": "Port Sandwich",
"psy": "Piscataway",
"pt": "Portuguese",
"pta": "Pai Tavytera",
"pth": "Pataxó Hã-Ha-Hãe",
"pti": "Pindiini; Wangkatha",
"ptn": "Patani",
"pto": "Zo'é",
"ptp": "Patep",
"ptq": "Pattapu",
"ptr": "Piamatsina",
"ptt": "Enrekang",
"ptu": "Bambam",
"ptv": "Port Vato",
"ptw": "Pentlatch",
"pty": "Pathiya",
"pua": "Western Highland Purepecha",
"pub": "Purum",
"puc": "Punan Merap",
"pud": "Punan Aput",
"pue": "Puelche",
"puf": "Punan Merah",
"pug": "Phuie",
"pui": "Puinave",
"puj": "Punan Tubu",
"pum": "Puma",
"puo": "Puoc",
"pup": "Pulabu",
"puq": "Puquina",
"pur": "Puruborá",
"put": "Putoh",
"puu": "Punu",
"puw": "Puluwatese",
"pux": "Puare",
"puy": "Purisimeño",
"pwa": "Pawaia",
"pwb": "Panawa",
"pwg": "Gapapaiwa",
"pwi": "Patwin",
"pwm": "Molbog",
"pwn": "Paiwan",
"pwo": "Pwo Western Karen",
"pwr": "Powari",
"pww": "Pwo Northern Karen",
"pxm": "Quetzaltepec Mixe",
"pye": "Pye Krumen",
"pym": "Fyam",
"pyn": "Poyanáwa",
"pys": "Paraguayan Sign Language; Lengua de Señas del Paraguay",
"pyu": "Puyuma",
"pyx": "Pyu (Myanmar)",
"pyy": "Pyen",
"pzh": "Pazeh",
"pzn": "Jejara Naga; Para Naga",
"qu": "Quechua",
"qua": "Quapaw",
"qub": "Huallaga Huánuco Quechua",
"quc": "K'iche'; Quiché",
"qud": "Calderón Highland Quichua",
"quf": "Lambayeque Quechua",
"qug": "Chimborazo Highland Quichua",
"quh": "South Bolivian Quechua",
"qui": "Quileute",
"quk": "Chachapoyas Quechua",
"qul": "North Bolivian Quechua",
"qum": "Sipacapense",
"qun": "Quinault",
"qup": "Southern Pastaza Quechua",
"quq": "Quinqui",
"qur": "Yanahuanca Pasco Quechua",
"qus": "Santiago del Estero Quichua",
"quv": "Sacapulteco",
"quw": "Tena Lowland Quichua",
"qux": "Yauyos Quechua",
"quy": "Ayacucho Quechua",
"quz": "Cusco Quechua",
"qva": "Ambo-Pasco Quechua",
"qvc": "Cajamarca Quechua",
"qve": "Eastern Apurímac Quechua",
"qvh": "Huamalíes-Dos de Mayo Huánuco Quechua",
"qvi": "Imbabura Highland Quichua",
"qvj": "Loja Highland Quichua",
"qvl": "Cajatambo North Lima Quechua",
"qvm": "Margos-Yarowilca-Lauricocha Quechua",
"qvn": "North Junín Quechua",
"qvo": "Napo Lowland Quechua",
"qvp": "Pacaraos Quechua",
"qvs": "San Martín Quechua",
"qvw": "Huaylla Wanca Quechua",
"qvy": "Queyu",
"qvz": "Northern Pastaza Quichua",
"qwa": "Corongo Ancash Quechua",
"qwc": "Classical Quechua",
"qwe": "Quechuan (family)",
"qwh": "Huaylas Ancash Quechua",
"qwm": "Kuman (Russia)",
"qws": "Sihuas Ancash Quechua",
"qwt": "Kwalhioqua-Tlatskanai",
"qxa": "Chiquián Ancash Quechua",
"qxc": "Chincha Quechua",
"qxh": "Panao Huánuco Quechua",
"qxl": "Salasaca Highland Quichua",
"qxn": "Northern Conchucos Ancash Quechua",
"qxo": "Southern Conchucos Ancash Quechua",
"qxp": "Puno Quechua",
"qxq": "Qashqa'i",
"qxr": "Cañar Highland Quichua",
"qxs": "Southern Qiang",
"qxt": "Santa Ana de Tusi Pasco Quechua",
"qxu": "Arequipa-La Unión Quechua",
"qxw": "Jauja Wanca Quechua",
"qya": "Quenya",
"qyp": "Quiripi",
"raa": "Dungmali",
"rab": "Camling",
"rac": "Rasawa",
"rad": "Rade",
"raf": "Western Meohang",
"rag": "Logooli; Lulogooli",
"rah": "Rabha",
"rai": "Ramoaaina",
"raj": "Rajasthani",
"rak": "Tulu-Bohuai",
"ral": "Ralte",
"ram": "Canela",
"ran": "Riantana",
"rao": "Rao",
"rap": "Rapanui",
"raq": "Saam",
"rar": "Rarotongan; Cook Islands Maori",
"ras": "Tegali",
"rat": "Razajerdi",
"rau": "Raute",
"rav": "Sampang",
"raw": "Rawang",
"rax": "Rang",
"ray": "Rapa",
"raz": "Rahambuu",
"rbb": "Rumai Palaung",
"rbk": "Northern Bontok",
"rbl": "Miraya Bikol",
"rbp": "Barababaraba",
"rcf": "Réunion Creole French",
"rdb": "Rudbari",
"rea": "Rerau",
"reb": "Rembong",
"ree": "Rejang Kayan",
"reg": "Kara (Tanzania)",
"rei": "Reli",
"rej": "Rejang",
"rel": "Rendille",
"rem": "Remo",
"ren": "Rengao",
"rer": "Rer Bare",
"res": "Reshe",
"ret": "Retta",
"rey": "Reyesano",
"rga": "Roria",
"rge": "Romano-Greek",
"rgk": "Rangkas",
"rgn": "Romagnol",
"rgr": "Resígaro",
"rgs": "Southern Roglai",
"rgu": "Ringgou",
"rhg": "Rohingya",
"rhp": "Yahang",
"ria": "Riang (India)",
"rib": "Bribri Sign Language",
"rif": "Tarifit",
"ril": "Riang Lang; Riang (Myanmar)",
"rim": "Nyaturu",
"rin": "Nungu",
"rir": "Ribun",
"rit": "Ritharrngu",
"riu": "Riung",
"rjg": "Rajong",
"rji": "Raji",
"rjs": "Rajbanshi",
"rka": "Kraol",
"rkb": "Rikbaktsa",
"rkh": "Rakahanga-Manihiki",
"rki": "Rakhine",
"rkm": "Marka",
"rkt": "Rangpuri; Kamta",
"rkw": "Arakwal",
"rm": "Romansh",
"rma": "Rama",
"rmb": "Rembarrnga",
"rmc": "Carpathian Romani",
"rmd": "Traveller Danish",
"rme": "Angloromani",
"rmf": "Kalo Finnish Romani",
"rmg": "Traveller Norwegian",
"rmh": "Murkim",
"rmi": "Lomavren",
"rmk": "Romkun",
"rml": "Baltic Romani",
"rmm": "Roma",
"rmn": "Balkan Romani",
"rmo": "Sinte Romani",
"rmp": "Rempi",
"rmq": "Caló",
"rms": "Romanian Sign Language",
"rmt": "Domari",
"rmu": "Tavringer Romani",
"rmv": "Romanova",
"rmw": "Welsh Romani",
"rmx": "Romam",
"rmy": "Vlax Romani",
"rmz": "Marma",
"rn": "Rundi",
"rnb": "Brunca Sign Language",
"rnd": "Ruund",
"rng": "Ronga",
"rnl": "Ranglong",
"rnn": "Roon",
"rnp": "Rongpo",
"rnr": "Nari Nari",
"rnw": "Rungwa",
"ro": "Romanian; Moldavian; Moldovan",
"roa": "Romance languages",
"rob": "Tae'",
"roc": "Cacgia Roglai",
"rod": "Rogo",
"roe": "Ronji",
"rof": "Rombo",
"rog": "Northern Roglai",
"rol": "Romblomanon",
"rom": "Romany",
"roo": "Rotokas",
"rop": "Kriol",
"ror": "Rongga",
"rou": "Runga",
"row": "Dela-Oenale",
"rpn": "Repanbitip",
"rpt": "Rapting",
"rri": "Ririo",
"rro": "Waima",
"rrt": "Arritinngithigh",
"rsb": "Romano-Serbian",
"rsk": "Ruthenian; Rusyn",
"rsl": "Russian Sign Language",
"rsm": "Miriwoong Sign Language",
"rsn": "Rwandan Sign Language",
"rtc": "Rungtu Chin",
"rth": "Ratahan",
"rtm": "Rotuman",
"rts": "Yurats",
"rtw": "Rathawi",
"ru": "Russian",
"rub": "Gungu",
"ruc": "Ruuli",
"rue": "Rusyn",
"ruf": "Luguru",
"rug": "Roviana",
"ruh": "Ruga",
"rui": "Rufiji",
"ruk": "Che",
"ruo": "Istro Romanian",
"rup": "Macedo-Romanian; Aromanian; Arumanian",
"ruq": "Megleno Romanian",
"rut": "Rutul",
"ruu": "Lanas Lobu",
"ruy": "Mala (Nigeria)",
"ruz": "Ruma",
"rw": "Kinyarwanda",
"rwa": "Rawo",
"rwk": "Rwa",
"rwl": "Ruwila",
"rwm": "Amba (Uganda)",
"rwo": "Rawa",
"rwr": "Marwari (India)",
"rxd": "Ngardi",
"rxw": "Karuwali; Garuwali",
"ryn": "Northern Amami-Oshima",
"rys": "Yaeyama",
"ryu": "Central Okinawan",
"rzh": "Rāziḥī",
"sa": "Sanskrit",
"saa": "Saba",
"sab": "Buglere",
"sac": "Meskwaki",
"sad": "Sandawe",
"sae": "Sabanê",
"saf": "Safaliba",
"sah": "Yakut",
"sai": "South American Indian languages",
"saj": "Sahu",
"sak": "Sake",
"sal": "Salishan languages",
"sam": "Samaritan Aramaic",
"sao": "Sause",
"saq": "Samburu",
"sar": "Saraveca",
"sas": "Sasak",
"sat": "Santali",
"sau": "Saleman",
"sav": "Saafi-Saafi",
"saw": "Sawi",
"sax": "Sa",
"say": "Saya",
"saz": "Saurashtra",
"sba": "Ngambay",
"sbb": "Simbo",
"sbc": "Kele (Papua New Guinea)",
"sbd": "Southern Samo",
"sbe": "Saliba",
"sbf": "Chabu; Shabo",
"sbg": "Seget",
"sbh": "Sori-Harengan",
"sbi": "Seti",
"sbj": "Surbakhal",
"sbk": "Safwa",
"sbl": "Botolan Sambal",
"sbm": "Sagala",
"sbn": "Sindhi Bhil",
"sbo": "Sabüm",
"sbp": "Sangu (Tanzania)",
"sbq": "Sileibi",
"sbr": "Sembakung Murut",
"sbs": "Subiya",
"sbt": "Kimki",
"sbu": "Stod Bhoti",
"sbv": "Sabine",
"sbw": "Simba",
"sbx": "Seberuang",
"sby": "Soli",
"sbz": "Sara Kaba",
"sc": "Sardinian",
"scb": "Chut",
"sce": "Dongxiang",
"scf": "San Miguel Creole French",
"scg": "Sanggau",
"sch": "Sakachep",
"sci": "Sri Lankan Creole Malay",
"sck": "Sadri",
"scl": "Shina",
"scn": "Sicilian",
"sco": "Scots",
"scp": "Hyolmo; Helambu Sherpa",
"scq": "Sa'och",
"scs": "North Slavey",
"sct": "Southern Katang",
"scu": "Shumcho",
"scv": "Sheni",
"scw": "Sha",
"scx": "Sicel",
"sd": "Sindhi",
"sda": "Toraja-Sa'dan",
"sdb": "Shabak",
"sdc": "Sassarese Sardinian",
"sde": "Surubu",
"sdf": "Sarli",
"sdg": "Savi",
"sdh": "Southern Kurdish",
"sdj": "Suundi",
"sdk": "Sos Kundi",
"sdl": "Saudi Arabian Sign Language",
"sdn": "Gallurese Sardinian",
"sdo": "Bukar-Sadung Bidayuh",
"sdp": "Sherdukpen",
"sdq": "Semandang",
"sdr": "Oraon Sadri",
"sds": "Sened",
"sdt": "Shuadit",
"sdu": "Sarudu",
"sdv": "Eastern Sudanic languages",
"sdx": "Sibu Melanau",
"sdz": "Sallands",
"se": "Northern Sami",
"sea": "Semai",
"seb": "Shempire Senoufo",
"sec": "Sechelt",
"sed": "Sedang",
"see": "Seneca",
"sef": "Cebaara Senoufo",
"seg": "Segeju",
"seh": "Sena",
"sei": "Seri",
"sej": "Sene",
"sek": "Sekani",
"sel": "Selkup",
"sem": "Semitic languages",
"sen": "Nanerigé Sénoufo",
"seo": "Suarmin",
"sep": "Sìcìté Sénoufo",
"seq": "Senara Sénoufo",
"ser": "Serrano",
"ses": "Koyraboro Senni Songhai",
"set": "Sentani",
"seu": "Serui-Laut",
"sev": "Nyarafolo Senoufo",
"sew": "Sewa Bay",
"sey": "Secoya",
"sez": "Senthang Chin",
"sfb": "Langue des signes de Belgique Francophone; French Belgian Sign Language",
"sfe": "Eastern Subanen",
"sfm": "Small Flowery Miao",
"sfs": "South African Sign Language",
"sfw": "Sehwi",
"sg": "Sango",
"sga": "Old Irish (to 900)",
"sgb": "Mag-antsi Ayta",
"sgc": "Kipsigis",
"sgd": "Surigaonon",
"sge": "Segai",
"sgg": "Swiss-German Sign Language",
"sgh": "Shughni",
"sgi": "Suga",
"sgj": "Surgujia",
"sgk": "Sangkong",
"sgm": "Singa",
"sgn": "Sign languages",
"sgp": "Singpho",
"sgr": "Sangisari",
"sgs": "Samogitian",
"sgt": "Brokpake",
"sgu": "Salas",
"sgw": "Sebat Bet Gurage",
"sgx": "Sierra Leone Sign Language",
"sgy": "Sanglechi",
"sgz": "Sursurunga",
"sh": "Serbo-Croatian",
"sha": "Shall-Zwall",
"shb": "Ninam",
"shc": "Sonde",
"shd": "Kundal Shahi",
"she": "Sheko",
"shg": "Shua",
"shh": "Shoshoni",
"shi": "Tachelhit",
"shj": "Shatt",
"shk": "Shilluk",
"shl": "Shendu",
"shm": "Shahrudi",
"shn": "Shan",
"sho": "Shanga",
"shp": "Shipibo-Conibo",
"shq": "Sala",
"shr": "Shi",
"shs": "Shuswap",
"sht": "Shasta",
"shu": "Chadian Arabic",
"shv": "Shehri",
"shw": "Shwai",
"shx": "She",
"shy": "Tachawit",
"shz": "Syenara Senoufo",
"si": "Sinhala; Sinhalese",
"sia": "Akkala Sami",
"sib": "Sebop",
"sid": "Sidamo",
"sie": "Simaa",
"sif": "Siamou",
"sig": "Paasaal",
"sih": "Zire; Sîshëë",
"sii": "Shom Peng",
"sij": "Numbami",
"sik": "Sikiana",
"sil": "Tumulung Sisaala",
"sim": "Mende (Papua New Guinea)",
"sio": "Siouan languages",
"sip": "Sikkimese",
"siq": "Sonia",
"sir": "Siri",
"sis": "Siuslaw",
"sit": "Sino-Tibetan languages",
"siu": "Sinagen",
"siv": "Sumariup",
"siw": "Siwai",
"six": "Sumau",
"siy": "Sivandi",
"siz": "Siwi",
"sja": "Epena",
"sjb": "Sajau Basap",
"sjd": "Kildin Sami",
"sje": "Pite Sami",
"sjg": "Assangori",
"sjk": "Kemi Sami",
"sjl": "Sajalong; Miji",
"sjm": "Mapun",
"sjn": "Sindarin",
"sjo": "Xibe",
"sjp": "Surjapuri",
"sjr": "Siar-Lak",
"sjs": "Senhaja De Srair",
"sjt": "Ter Sami",
"sju": "Ume Sami",
"sjw": "Shawnee",
"sk": "Slovak",
"ska": "Skagit",
"skb": "Saek",
"skc": "Ma Manda",
"skd": "Southern Sierra Miwok",
"ske": "Seke (Vanuatu)",
"skf": "Sakirabiá",
"skg": "Sakalava Malagasy",
"skh": "Sikule",
"ski": "Sika",
"skj": "Seke (Nepal)",
"skm": "Kutong",
"skn": "Kolibugan Subanon",
"sko": "Seko Tengah",
"skp": "Sekapan",
"skq": "Sininkere",
"skr": "Saraiki; Seraiki",
"sks": "Maia",
"skt": "Sakata",
"sku": "Sakao",
"skv": "Skou",
"skw": "Skepi Creole Dutch",
"skx": "Seko Padang",
"sky": "Sikaiana",
"skz": "Sekar",
"sl": "Slovenian",
"sla": "Slavic languages",
"slc": "Sáliba",
"sld": "Sissala",
"sle": "Sholaga",
"slf": "Swiss-Italian Sign Language",
"slg": "Selungai Murut",
"slh": "Southern Puget Sound Salish",
"sli": "Lower Silesian",
"slj": "Salumá",
"sll": "Salt-Yui",
"slm": "Pangutaran Sama",
"sln": "Salinan",
"slp": "Lamaholot",
"slq": "Salchuq",
"slr": "Salar",
"sls": "Singapore Sign Language",
"slt": "Sila",
"slu": "Selaru",
"slw": "Sialum",
"slx": "Salampasu",
"sly": "Selayar",
"slz": "Ma'ya",
"sm": "Samoan",
"sma": "Southern Sami",
"smb": "Simbari",
"smc": "Som",
"smf": "Auwe",
"smg": "Simbali",
"smh": "Samei",
"smi": "Sami languages",
"smj": "Lule Sami",
"smk": "Bolinao",
"sml": "Central Sama",
"smm": "Musasa",
"smn": "Inari Sami",
"smp": "Samaritan",
"smq": "Samo",
"smr": "Simeulue",
"sms": "Skolt Sami",
"smt": "Simte",
"smu": "Somray",
"smv": "Samvedi",
"smw": "Sumbawa",
"smx": "Samba",
"smy": "Semnani",
"smz": "Simeku",
"sn": "Shona",
"snc": "Sinaugoro",
"sne": "Bau Bidayuh",
"snf": "Noon",
"sng": "Sanga (Democratic Republic of Congo)",
"sni": "Sensi",
"snj": "Riverain Sango",
"snk": "Soninke",
"snl": "Sangil",
"snm": "Southern Ma'di",
"snn": "Siona",
"sno": "Snohomish",
"snp": "Siane",
"snq": "Sangu (Gabon)",
"snr": "Sihan",
"sns": "South West Bay; Nahavaq",
"snu": "Senggi; Viid",
"snv": "Sa'ban",
"snw": "Selee",
"snx": "Sam",
"sny": "Saniyo-Hiyewe",
"snz": "Kou",
"so": "Somali",
"soa": "Thai Song",
"sob": "Sobei",
"soc": "So (Democratic Republic of Congo)",
"sod": "Songoora",
"soe": "Songomeno",
"sog": "Sogdian",
"soh": "Aka",
"soi": "Sonha",
"soj": "Soi",
"sok": "Sokoro",
"sol": "Solos",
"son": "Songhai languages",
"soo": "Songo",
"sop": "Songe",
"soq": "Kanasi",
"sor": "Somrai",
"sos": "Seeku",
"sou": "Southern Thai",
"sov": "Sonsorol",
"sow": "Sowanda",
"sox": "Swo",
"soy": "Miyobe",
"soz": "Temi",
"spb": "Sepa (Indonesia)",
"spc": "Sapé",
"spd": "Saep",
"spe": "Sepa (Papua New Guinea)",
"spg": "Sian",
"spi": "Saponi",
"spk": "Sengo",
"spl": "Selepet",
"spm": "Akukem",
"spn": "Sanapaná",
"spo": "Spokane",
"spp": "Supyire Senoufo",
"spq": "Loreto-Ucayali Spanish",
"spr": "Saparua",
"sps": "Saposa",
"spt": "Spiti Bhoti",
"spu": "Sapuan",
"spv": "Sambalpuri; Kosli",
"spx": "South Picene",
"spy": "Sabaot",
"sq": "Albanian",
"sqa": "Shama-Sambuga",
"sqh": "Shau",
"sqj": "Albanian languages",
"sqk": "Albanian Sign Language",
"sqm": "Suma",
"sqn": "Susquehannock",
"sqo": "Sorkhei",
"sqq": "Sou",
"sqr": "Siculo Arabic",
"sqs": "Sri Lankan Sign Language",
"sqt": "Soqotri",
"squ": "Squamish",
"sqx": "Kufr Qassem Sign Language (KQSL)",
"sr": "Serbian",
"sra": "Saruga",
"srb": "Sora",
"src": "Logudorese Sardinian",
"sre": "Sara",
"srf": "Nafi",
"srg": "Sulod",
"srh": "Sarikoli",
"sri": "Siriano",
"srk": "Serudung Murut",
"srl": "Isirawa",
"srm": "Saramaccan",
"srn": "Sranan Tongo",
"sro": "Campidanese Sardinian",
"srq": "Sirionó",
"srr": "Serer",
"srs": "Sarsi",
"srt": "Sauri",
"sru": "Suruí",
"srv": "Southern Sorsoganon",
"srw": "Serua",
"srx": "Sirmauri",
"sry": "Sera",
"srz": "Shahmirzadi",
"ss": "Swati",
"ssa": "Nilo-Saharan languages",
"ssb": "Southern Sama",
"ssc": "Suba-Simbiti",
"ssd": "Siroi",
"sse": "Balangingi; Bangingih Sama",
"ssf": "Thao",
"ssg": "Seimat",
"ssh": "Shihhi Arabic",
"ssi": "Sansi",
"ssj": "Sausi",
"ssk": "Sunam",
"ssl": "Western Sisaala",
"ssm": "Semnam",
"ssn": "Waata",
"sso": "Sissano",
"ssp": "Spanish Sign Language",
"ssq": "So'a",
"ssr": "Swiss-French Sign Language",
"sss": "Sô",
"sst": "Sinasina",
"ssu": "Susuami",
"ssv": "Shark Bay",
"ssx": "Samberigi",
"ssy": "Saho",
"ssz": "Sengseng",
"st": "Southern Sotho",
"sta": "Settla",
"stb": "Northern Subanen",
"std": "Sentinel",
"ste": "Liana-Seti",
"stf": "Seta",
"stg": "Trieng",
"sth": "Shelta",
"sti": "Bulo Stieng",
"stj": "Matya Samo",
"stk": "Arammba",
"stl": "Stellingwerfs",
"stm": "Setaman",
"stn": "Owa",
"sto": "Stoney",
"stp": "Southeastern Tepehuan",
"stq": "Saterfriesisch",
"str": "Straits Salish",
"sts": "Shumashti",
"stt": "Budeh Stieng",
"stu": "Samtao",
"stv": "Silt'e",
"stw": "Satawalese",
"sty": "Siberian Tatar",
"su": "Sundanese",
"sua": "Sulka",
"sub": "Suku",
"suc": "Western Subanon",
"sue": "Suena",
"sug": "Suganga",
"sui": "Suki",
"suj": "Shubi",
"suk": "Sukuma",
"suo": "Bouni",
"suq": "Tirmaga-Chai Suri; Suri",
"sur": "Mwaghavul",
"sus": "Susu",
"sut": "Subtiaba",
"suv": "Puroik",
"suw": "Sumbwa",
"sux": "Sumerian",
"suy": "Suyá",
"suz": "Sunwar",
"sv": "Swedish",
"sva": "Svan",
"svb": "Ulau-Suain",
"svc": "Vincentian Creole English",
"sve": "Serili",
"svk": "Slovakian Sign Language",
"svm": "Slavomolisano",
"svs": "Savosavo",
"svx": "Skalvian",
"sw": "Swahili (macrolanguage)",
"swb": "Maore Comorian",
"swc": "Congo Swahili",
"swf": "Sere",
"swg": "Swabian",
"swh": "Swahili (individual language); Kiswahili",
"swi": "Sui",
"swj": "Sira",
"swk": "Malawi Sena",
"swl": "Swedish Sign Language",
"swm": "Samosa",
"swn": "Sawknah",
"swo": "Shanenawa",
"swp": "Suau",
"swq": "Sharwa",
"swr": "Saweru",
"sws": "Seluwasan",
"swt": "Sawila",
"swu": "Suwawa",
"swv": "Shekhawati",
"sww": "Sowa",
"swx": "Suruahá",
"swy": "Sarua",
"sxb": "Suba",
"sxc": "Sicanian",
"sxe": "Sighu",
"sxg": "Shuhi; Shixing",
"sxk": "Southern Kalapuya",
"sxl": "Selian",
"sxm": "Samre",
"sxn": "Sangir",
"sxo": "Sorothaptic",
"sxr": "Saaroa",
"sxs": "Sasaru",
"sxu": "Upper Saxon",
"sxw": "Saxwe Gbe",
"sya": "Siang",
"syb": "Central Subanen",
"syc": "Classical Syriac",
"syd": "Samoyedic languages",
"syi": "Seki",
"syk": "Sukur",
"syl": "Sylheti",
"sym": "Maya Samo",
"syn": "Senaya",
"syo": "Suoy",
"syr": "Syriac",
"sys": "Sinyar",
"syw": "Kagate",
"syx": "Samay",
"syy": "Al-Sayyid Bedouin Sign Language",
"sza": "Semelai",
"szb": "Ngalum",
"szc": "Semaq Beri",
"szd": "Seru",
"sze": "Seze",
"szg": "Sengele",
"szl": "Silesian",
"szn": "Sula",
"szp": "Suabo",
"szs": "Solomon Islands Sign Language",
"szv": "Isu (Fako Division)",
"szw": "Sawai",
"szy": "Sakizaya",
"ta": "Tamil",
"taa": "Lower Tanana",
"tab": "Tabassaran",
"tac": "Lowland Tarahumara",
"tad": "Tause",
"tae": "Tariana",
"taf": "Tapirapé",
"tag": "Tagoi",
"tai": "Tai languages",
"taj": "Eastern Tamang",
"tak": "Tala",
"tal": "Tal",
"tan": "Tangale",
"tao": "Yami",
"tap": "Taabwa",
"taq": "Tamasheq",
"tar": "Central Tarahumara",
"tas": "Tay Boi",
"tau": "Upper Tanana",
"tav": "Tatuyo",
"taw": "Tai",
"tax": "Tamki",
"tay": "Atayal",
"taz": "Tocho",
"tba": "Aikanã",
"tbc": "Takia",
"tbd": "Kaki Ae",
"tbe": "Tanimbili",
"tbf": "Mandara",
"tbg": "North Tairora",
"tbh": "Dharawal; Thurawal",
"tbi": "Gaam",
"tbj": "Tiang",
"tbk": "Calamian Tagbanwa",
"tbl": "Tboli",
"tbm": "Tagbu",
"tbn": "Barro Negro Tunebo",
"tbo": "Tawala",
"tbp": "Taworta; Diebroud",
"tbq": "Tibeto-Burman languages",
"tbr": "Tumtum",
"tbs": "Tanguat",
"tbt": "Tembo (Kitembo)",
"tbu": "Tubar",
"tbv": "Tobo",
"tbw": "Tagbanwa",
"tbx": "Kapin",
"tby": "Tabaru",
"tbz": "Ditammari",
"tca": "Ticuna",
"tcb": "Tanacross",
"tcc": "Datooga",
"tcd": "Tafi",
"tce": "Southern Tutchone",
"tcf": "Malinaltepec Me'phaa; Malinaltepec Tlapanec",
"tcg": "Tamagario",
"tch": "Turks And Caicos Creole English",
"tci": "Wára",
"tck": "Tchitchege",
"tcl": "Taman (Myanmar)",
"tcm": "Tanahmerah",
"tcn": "Tichurong",
"tco": "Taungyo",
"tcp": "Tawr Chin",
"tcq": "Kaiy",
"tcs": "Torres Strait Creole; Yumplatok",
"tct": "T'en",
"tcu": "Southeastern Tarahumara",
"tcw": "Tecpatlán Totonac",
"tcx": "Toda",
"tcy": "Tulu",
"tcz": "Thado Chin",
"tda": "Tagdal",
"tdb": "Panchpargania",
"tdc": "Emberá-Tadó",
"tdd": "Tai Nüa",
"tde": "Tiranige Diga Dogon",
"tdf": "Talieng",
"tdg": "Western Tamang",
"tdh": "Thulung",
"tdi": "Tomadino",
"tdj": "Tajio",
"tdk": "Tambas",
"tdl": "Sur",
"tdm": "Taruma",
"tdn": "Tondano",
"tdo": "Teme",
"tdq": "Tita",
"tdr": "Todrah",
"tds": "Doutai",
"tdt": "Tetun Dili",
"tdv": "Toro",
"tdx": "Tandroy-Mahafaly Malagasy",
"tdy": "Tadyawan",
"te": "Telugu",
"tea": "Temiar",
"teb": "Tetete",
"tec": "Terik",
"ted": "Tepo Krumen",
"tee": "Huehuetla Tepehua",
"tef": "Teressa",
"teg": "Teke-Tege",
"teh": "Tehuelche",
"tei": "Torricelli",
"tek": "Ibali Teke",
"tem": "Timne",
"ten": "Tama (Colombia)",
"teo": "Teso",
"tep": "Tepecano",
"teq": "Temein",
"ter": "Tereno",
"tes": "Tengger",
"tet": "Tetum",
"teu": "Soo",
"tev": "Teor",
"tew": "Tewa (USA)",
"tex": "Tennet",
"tey": "Tulishi",
"tez": "Tetserret",
"tfi": "Tofin Gbe",
"tfn": "Tanaina",
"tfo": "Tefaro",
"tfr": "Teribe",
"tft": "Ternate",
"tg": "Tajik",
"tga": "Sagalla",
"tgb": "Tobilung",
"tgc": "Tigak",
"tgd": "Ciwogai",
"tge": "Eastern Gorkha Tamang",
"tgf": "Chalikha",
"tgh": "Tobagonian Creole English",
"tgi": "Lawunuia",
"tgj": "Tagin",
"tgn": "Tandaganon",
"tgo": "Sudest",
"tgp": "Tangoa",
"tgq": "Tring",
"tgr": "Tareng",
"tgs": "Nume",
"tgt": "Central Tagbanwa",
"tgu": "Tanggu",
"tgv": "Tingui-Boto",
"tgw": "Tagwana Senoufo",
"tgx": "Tagish",
"tgy": "Togoyo",
"tgz": "Tagalaka",
"th": "Thai",
"thd": "Kuuk Thaayorre; Thayore",
"the": "Chitwania Tharu",
"thf": "Thangmi",
"thh": "Northern Tarahumara",
"thi": "Tai Long",
"thk": "Tharaka; Kitharaka",
"thl": "Dangaura Tharu",
"thm": "Aheu",
"thn": "Thachanadan",
"thp": "Thompson",
"thq": "Kochila Tharu",
"thr": "Rana Tharu",
"ths": "Thakali",
"tht": "Tahltan",
"thu": "Thuri",
"thv": "Tahaggart Tamahaq",
"thy": "Tha",
"thz": "Tayart Tamajeq",
"ti": "Tigrinya",
"tia": "Tidikelt Tamazight",
"tic": "Tira",
"tif": "Tifal",
"tig": "Tigre",
"tih": "Timugon Murut",
"tii": "Tiene",
"tij": "Tilung",
"tik": "Tikar",
"til": "Tillamook",
"tim": "Timbe",
"tin": "Tindi",
"tio": "Teop",
"tip": "Trimuris",
"tiq": "Tiéfo",
"tis": "Masadiit Itneg",
"tit": "Tinigua",
"tiu": "Adasen",
"tiv": "Tiv",
"tiw": "Tiwi",
"tix": "Southern Tiwa",
"tiy": "Tiruray",
"tiz": "Tai Hongjin",
"tja": "Tajuasohn",
"tjg": "Tunjung",
"tji": "Northern Tujia",
"tjj": "Tjungundji",
"tjl": "Tai Laing",
"tjm": "Timucua",
"tjn": "Tonjon",
"tjo": "Temacine Tamazight",
"tjp": "Tjupany",
"tjs": "Southern Tujia",
"tju": "Tjurruru",
"tjw": "Djabwurrung",
"tk": "Turkmen",
"tka": "Truká",
"tkb": "Buksa",
"tkd": "Tukudede",
"tke": "Takwane",
"tkf": "Tukumanféd",
"tkg": "Tesaka Malagasy",
"tkl": "Tokelau",
"tkm": "Takelma",
"tkn": "Toku-No-Shima",
"tkp": "Tikopia",
"tkq": "Tee",
"tkr": "Tsakhur",
"tks": "Takestani",
"tkt": "Kathoriya Tharu",
"tku": "Upper Necaxa Totonac",
"tkv": "Mur Pano",
"tkw": "Teanu",
"tkx": "Tangko",
"tkz": "Takua",
"tl": "Tagalog",
"tla": "Southwestern Tepehuan",
"tlb": "Tobelo",
"tlc": "Yecuatla Totonac",
"tld": "Talaud",
"tlf": "Telefol",
"tlg": "Tofanma",
"tlh": "Klingon; tlhIngan Hol",
"tli": "Tlingit",
"tlj": "Talinga-Bwisi",
"tlk": "Taloki",
"tll": "Tetela",
"tlm": "Tolomako",
"tln": "Talondo'",
"tlo": "Talodi",
"tlp": "Filomena Mata-Coahuitlán Totonac",
"tlq": "Tai Loi",
"tlr": "Talise",
"tls": "Tambotalo",
"tlt": "Sou Nama; Teluti",
"tlu": "Tulehu",
"tlv": "Taliabu",
"tlx": "Khehek",
"tly": "Talysh",
"tma": "Tama (Chad)",
"tmb": "Katbol; Avava",
"tmc": "Tumak",
"tmd": "Haruai",
"tme": "Tremembé",
"tmf": "Toba-Maskoy",
"tmg": "Ternateño",
"tmh": "Tamashek",
"tmi": "Tutuba",
"tmj": "Samarokena",
"tmk": "Northwestern Tamang",
"tml": "Tamnim Citak",
"tmm": "Tai Thanh",
"tmn": "Taman (Indonesia)",
"tmo": "Temoq",
"tmq": "Tumleo",
"tmr": "Jewish Babylonian Aramaic (ca. 200-1200 CE)",
"tms": "Tima",
"tmt": "Tasmate",
"tmu": "Iau",
"tmv": "Tembo (Motembo)",
"tmw": "Temuan",
"tmy": "Tami",
"tmz": "Tamanaku",
"tn": "Tswana",
"tna": "Tacana",
"tnb": "Western Tunebo",
"tnc": "Tanimuca-Retuarã",
"tnd": "Angosturas Tunebo",
"tng": "Tobanga",
"tnh": "Maiani",
"tni": "Tandia",
"tnk": "Kwamera",
"tnl": "Lenakel",
"tnm": "Tabla",
"tnn": "North Tanna",
"tno": "Toromono",
"tnp": "Whitesands",
"tnq": "Taino",
"tnr": "Ménik",
"tns": "Tenis",
"tnt": "Tontemboan",
"tnu": "Tay Khang",
"tnv": "Tangchangya",
"tnw": "Tonsawang",
"tnx": "Tanema",
"tny": "Tongwe",
"tnz": "Ten'edn",
"to": "Tonga (Tonga Islands)",
"tob": "Toba",
"toc": "Coyutla Totonac",
"tod": "Toma",
"tof": "Gizrra",
"tog": "Tonga (Nyasa)",
"toh": "Gitonga",
"toi": "Tonga (Zambia)",
"toj": "Tojolabal",
"tok": "Toki Pona",
"tol": "Tolowa",
"tom": "Tombulu",
"too": "Xicotepec De Juárez Totonac",
"top": "Papantla Totonac",
"toq": "Toposa",
"tor": "Togbo-Vara Banda",
"tos": "Highland Totonac",
"tou": "Tho",
"tov": "Upper Taromi",
"tow": "Jemez",
"tox": "Tobian",
"toy": "Topoiyo",
"toz": "To",
"tpa": "Taupota",
"tpc": "Azoyú Me'phaa; Azoyú Tlapanec",
"tpe": "Tippera",
"tpf": "Tarpia",
"tpg": "Kula",
"tpi": "Tok Pisin",
"tpj": "Tapieté",
"tpk": "Tupinikin",
"tpl": "Tlacoapa Me'phaa; Tlacoapa Tlapanec",
"tpm": "Tampulma",
"tpn": "Tupinambá",
"tpo": "Tai Pao",
"tpp": "Pisaflores Tepehua",
"tpq": "Tukpa",
"tpr": "Tuparí",
"tpt": "Tlachichilco Tepehua",
"tpu": "Tampuan",
"tpv": "Tanapag",
"tpw": "Tupí",
"tpx": "Acatepec Me'phaa; Acatepec Tlapanec",
"tpy": "Trumai",
"tpz": "Tinputz",
"tqb": "Tembé",
"tql": "Lehali",
"tqm": "Turumsa",
"tqn": "Tenino",
"tqo": "Toaripi",
"tqp": "Tomoip",
"tqq": "Tunni",
"tqr": "Torona",
"tqt": "Western Totonac",
"tqu": "Touo",
"tqw": "Tonkawa",
"tr": "Turkish",
"tra": "Tirahi",
"trb": "Terebu",
"trc": "Copala Triqui",
"trd": "Turi",
"tre": "East Tarangan",
"trf": "Trinidadian Creole English",
"trg": "Lishán Didán",
"trh": "Turaka",
"tri": "Trió",
"trj": "Toram",
"trk": "Turkic languages",
"trl": "Traveller Scottish",
"trm": "Tregami",
"trn": "Trinitario",
"tro": "Tarao Naga",
"trp": "Kok Borok",
"trq": "San Martín Itunyoso Triqui",
"trr": "Taushiro",
"trs": "Chicahuaxtla Triqui",
"trt": "Tunggare",
"tru": "Turoyo; Surayt",
"trv": "Sediq; Seediq; Taroko",
"trw": "Torwali",
"trx": "Tringgus-Sembaan Bidayuh",
"try": "Turung",
"trz": "Torá",
"ts": "Tsonga",
"tsa": "Tsaangi",
"tsb": "Tsamai",
"tsc": "Tswa",
"tsd": "Tsakonian",
"tse": "Tunisian Sign Language",
"tsg": "Tausug",
"tsh": "Tsuvan",
"tsi": "Tsimshian",
"tsj": "Tshangla",
"tsk": "Tseku",
"tsl": "Ts'ün-Lao",
"tsm": "Turkish Sign Language; Türk İşaret Dili",
"tsp": "Northern Toussian",
"tsq": "Thai Sign Language",
"tsr": "Akei",
"tss": "Taiwan Sign Language",
"tst": "Tondi Songway Kiini",
"tsu": "Tsou",
"tsv": "Tsogo",
"tsw": "Tsishingini",
"tsx": "Mubami",
"tsy": "Tebul Sign Language",
"tsz": "Purepecha",
"tt": "Tatar",
"tta": "Tutelo",
"ttb": "Gaa",
"ttc": "Tektiteko",
"ttd": "Tauade",
"tte": "Bwanabwana",
"ttf": "Tuotomb",
"ttg": "Tutong",
"tth": "Upper Ta'oih",
"tti": "Tobati",
"ttj": "Tooro",
"ttk": "Totoro",
"ttl": "Totela",
"ttm": "Northern Tutchone",
"ttn": "Towei",
"tto": "Lower Ta'oih",
"ttp": "Tombelala",
"ttq": "Tawallammat Tamajaq",
"ttr": "Tera",
"tts": "Northeastern Thai",
"ttt": "Muslim Tat",
"ttu": "Torau",
"ttv": "Titan",
"ttw": "Long Wat",
"tty": "Sikaritai",
"ttz": "Tsum",
"tua": "Wiarumus",
"tub": "Tübatulabal",
"tuc": "Mutu",
"tud": "Tuxá",
"tue": "Tuyuca",
"tuf": "Central Tunebo",
"tug": "Tunia",
"tuh": "Taulil",
"tui": "Tupuri",
"tuj": "Tugutil",
"tul": "Tula",
"tum": "Tumbuka",
"tun": "Tunica",
"tuo": "Tucano",
"tup": "Tupi languages",
"tuq": "Tedaga",
"tus": "Tuscarora",
"tut": "Altaic languages",
"tuu": "Tututni",
"tuv": "Turkana",
"tuw": "Tungus languages",
"tux": "Tuxináwa",
"tuy": "Tugen",
"tuz": "Turka",
"tva": "Vaghua",
"tvd": "Tsuvadi",
"tve": "Te'un",
"tvk": "Southeast Ambrym",
"tvl": "Tuvalu",
"tvm": "Tela-Masbuar",
"tvn": "Tavoyan",
"tvo": "Tidore",
"tvs": "Taveta",
"tvt": "Tutsa Naga",
"tvu": "Tunen",
"tvw": "Sedoa",
"tvx": "Taivoan",
"tvy": "Timor Pidgin",
"tw": "Twi",
"twa": "Twana",
"twb": "Western Tawbuid",
"twc": "Teshenawa",
"twd": "Twents",
"twe": "Tewa (Indonesia)",
"twf": "Northern Tiwa",
"twg": "Tereweng",
"twh": "Tai Dón",
"twl": "Tawara",
"twm": "Tawang Monpa",
"twn": "Twendi",
"two": "Tswapong",
"twp": "Ere",
"twq": "Tasawaq",
"twr": "Southwestern Tarahumara",
"twt": "Turiwára",
"twu": "Termanu",
"tww": "Tuwari",
"twx": "Tewe",
"twy": "Tawoyan",
"txa": "Tombonuo",
"txb": "Tokharian B",
"txc": "Tsetsaut",
"txe": "Totoli",
"txg": "Tangut",
"txh": "Thracian",
"txi": "Ikpeng",
"txj": "Tarjumo",
"txm": "Tomini",
"txn": "West Tarangan",
"txo": "Toto",
"txq": "Tii",
"txr": "Tartessian",
"txs": "Tonsea",
"txt": "Citak",
"txu": "Kayapó",
"txx": "Tatana",
"txy": "Tanosy Malagasy",
"ty": "Tahitian",
"tya": "Tauya",
"tye": "Kyanga",
"tyh": "O'du",
"tyi": "Teke-Tsaayi",
"tyj": "Tai Do; Tai Yo",
"tyl": "Thu Lao",
"tyn": "Kombai",
"typ": "Thaypan",
"tyr": "Tai Daeng",
"tys": "Tày Sa Pa",
"tyt": "Tày Tac",
"tyu": "Kua",
"tyv": "Tuvinian",
"tyx": "Teke-Tyee",
"tyy": "Tiyaa",
"tyz": "Tày",
"tza": "Tanzanian Sign Language",
"tzh": "Tzeltal",
"tzj": "Tz'utujil",
"tzl": "Talossan",
"tzm": "Central Atlas Tamazight",
"tzn": "Tugun",
"tzo": "Tzotzil",
"tzx": "Tabriak",
"uam": "Uamué",
"uan": "Kuan",
"uar": "Tairuma",
"uba": "Ubang",
"ubi": "Ubi",
"ubl": "Buhi'non Bikol",
"ubr": "Ubir",
"ubu": "Umbu-Ungu",
"uby": "Ubykh",
"uda": "Uda",
"ude": "Udihe",
"udg": "Muduga",
"udi": "Udi",
"udj": "Ujir",
"udl": "Wuzlam",
"udm": "Udmurt",
"udu": "Uduk",
"ues": "Kioko",
"ufi": "Ufim",
"ug": "Uighur; Uyghur",
"uga": "Ugaritic",
"ugb": "Kuku-Ugbanh",
"uge": "Ughele",
"ugh": "Kubachi",
"ugn": "Ugandan Sign Language",
"ugo": "Ugong",
"ugy": "Uruguayan Sign Language",
"uha": "Uhami",
"uhn": "Damal",
"uis": "Uisai",
"uiv": "Iyive",
"uji": "Tanjijili",
"uk": "Ukrainian",
"uka": "Kaburi",
"ukg": "Ukuriguma",
"ukh": "Ukhwejo",
"uki": "Kui (India)",
"ukk": "Muak Sa-aak",
"ukl": "Ukrainian Sign Language",
"ukp": "Ukpe-Bayobiri",
"ukq": "Ukwa",
"uks": "Urubú-Kaapor Sign Language; Kaapor Sign Language",
"uku": "Ukue",
"ukv": "Kuku",
"ukw": "Ukwuani-Aboh-Ndoni",
"uky": "Kuuk-Yak",
"ula": "Fungwa",
"ulb": "Ulukwumi",
"ulc": "Ulch",
"ule": "Lule",
"ulf": "Usku; Afra",
"uli": "Ulithian",
"ulk": "Meriam Mir",
"ull": "Ullatan",
"ulm": "Ulumanda'",
"uln": "Unserdeutsch",
"ulu": "Uma' Lung",
"ulw": "Ulwa",
"uma": "Umatilla",
"umb": "Umbundu",
"umc": "Marrucinian",
"umd": "Umbindhamu",
"umg": "Morrobalama; Umbuygamu",
"umi": "Ukit",
"umm": "Umon",
"umn": "Makyan Naga",
"umo": "Umotína",
"ump": "Umpila",
"umr": "Umbugarla",
"ums": "Pendau",
"umu": "Munsee",
"una": "North Watut",
"und": "Undetermined",
"une": "Uneme",
"ung": "Ngarinyin",
"uni": "Uni",
"unk": "Enawené-Nawé",
"unm": "Unami",
"unn": "Kurnai",
"unr": "Mundari",
"unu": "Unubahe",
"unx": "Munda",
"unz": "Unde Kaili",
"uon": "Kulon",
"upi": "Umeda",
"upv": "Uripiv-Wala-Rano-Atchin",
"ur": "Urdu",
"ura": "Urarina",
"urb": "Urubú-Kaapor; Kaapor",
"urc": "Urningangg",
"ure": "Uru",
"urf": "Uradhi",
"urg": "Urigina",
"urh": "Urhobo",
"uri": "Urim",
"urj": "Uralic languages",
"urk": "Urak Lawoi'",
"url": "Urali",
"urm": "Urapmin",
"urn": "Uruangnirin",
"uro": "Ura (Papua New Guinea)",
"urp": "Uru-Pa-In",
"urr": "Lehalurup; Löyöp",
"urt": "Urat",
"uru": "Urumi",
"urv": "Uruava",
"urw": "Sop",
"urx": "Urimo",
"ury": "Orya",
"urz": "Uru-Eu-Wau-Wau",
"usa": "Usarufa",
"ush": "Ushojo",
"usi": "Usui",
"usk": "Usaghade",
"usp": "Uspanteco",
"uss": "us-Saare",
"usu": "Uya",
"uta": "Otank",
"ute": "Ute-Southern Paiute",
"uth": "ut-Hun",
"utp": "Amba (Solomon Islands)",
"utr": "Etulo",
"utu": "Utu",
"uum": "Urum",
"uur": "Ura (Vanuatu)",
"uuu": "U",
"uve": "West Uvean; Fagauvea",
"uvh": "Uri",
"uvl": "Lote",
"uwa": "Kuku-Uwanh",
"uya": "Doko-Uyanga",
"uz": "Uzbek",
"uzn": "Northern Uzbek",
"uzs": "Southern Uzbek",
"vaa": "Vaagri Booli",
"vae": "Vale",
"vaf": "Vafsi",
"vag": "Vagla",
"vah": "Varhadi-Nagpuri",
"vai": "Vai",
"vaj": "Sekele; Northwestern ǃKung; Vasekele",
"val": "Vehes",
"vam": "Vanimo",
"van": "Valman",
"vao": "Vao",
"vap": "Vaiphei",
"var": "Huarijio",
"vas": "Vasavi",
"vau": "Vanuma",
"vav": "Varli",
"vay": "Wayu",
"vbb": "Southeast Babar",
"vbk": "Southwestern Bontok",
"ve": "Venda",
"vec": "Venetian",
"ved": "Veddah",
"vel": "Veluws",
"vem": "Vemgo-Mabas",
"veo": "Ventureño",
"vep": "Veps",
"ver": "Mom Jango",
"vgr": "Vaghri",
"vgt": "Vlaamse Gebarentaal; Flemish Sign Language",
"vi": "Vietnamese",
"vic": "Virgin Islands Creole English",
"vid": "Vidunda",
"vif": "Vili",
"vig": "Viemo",
"vil": "Vilela",
"vin": "Vinza",
"vis": "Vishavan",
"vit": "Viti",
"viv": "Iduna",
"vka": "Kariyarra",
"vkj": "Kujarge",
"vkk": "Kaur",
"vkl": "Kulisusu",
"vkm": "Kamakan",
"vkn": "Koro Nulu",
"vko": "Kodeoha",
"vkp": "Korlai Creole Portuguese",
"vkt": "Tenggarong Kutai Malay",
"vku": "Kurrama",
"vkz": "Koro Zuba",
"vlp": "Valpei",
"vls": "Vlaams",
"vma": "Martuyhunira",
"vmb": "Barbaram",
"vmc": "Juxtlahuaca Mixtec",
"vmd": "Mudu Koraga",
"vme": "East Masela",
"vmf": "Mainfränkisch",
"vmg": "Lungalunga",
"vmh": "Maraghei",
"vmi": "Miwa",
"vmj": "Ixtayutla Mixtec",
"vmk": "Makhuwa-Shirima",
"vml": "Malgana",
"vmm": "Mitlatongo Mixtec",
"vmp": "Soyaltepec Mazatec",
"vmq": "Soyaltepec Mixtec",
"vmr": "Marenje",
"vms": "Moksela",
"vmu": "Muluridyi",
"vmv": "Valley Maidu",
"vmw": "Makhuwa",
"vmx": "Tamazola Mixtec",
"vmy": "Ayautla Mazatec",
"vmz": "Mazatlán Mazatec",
"vnk": "Vano; Lovono",
"vnm": "Vinmavis; Neve'ei",
"vnp": "Vunapu",
"vo": "Volapük",
"vor": "Voro",
"vot": "Votic",
"vra": "Vera'a",
"vro": "Võro",
"vrs": "Varisi",
"vrt": "Burmbar; Banam Bay",
"vsi": "Moldova Sign Language",
"vsl": "Venezuelan Sign Language",
"vsv": "Valencian Sign Language; Llengua de signes valenciana",
"vto": "Vitou",
"vum": "Vumbu",
"vun": "Vunjo",
"vut": "Vute",
"vwa": "Awa (China)",
"wa": "Walloon",
"waa": "Walla Walla",
"wab": "Wab",
"wac": "Wasco-Wishram",
"wad": "Wamesa; Wondama",
"wae": "Walser",
"waf": "Wakoná",
"wag": "Wa'ema",
"wah": "Watubela",
"wai": "Wares",
"waj": "Waffa",
"wak": "Wakashan languages",
"wal": "Wolaytta; Wolaitta",
"wam": "Wampanoag",
"wan": "Wan",
"wao": "Wappo",
"wap": "Wapishana",
"waq": "Wagiman",
"war": "Waray (Philippines)",
"was": "Washo",
"wat": "Kaninuwa",
"wau": "Waurá",
"wav": "Waka",
"waw": "Waiwai",
"wax": "Watam; Marangis",
"way": "Wayana",
"waz": "Wampur",
"wba": "Warao",
"wbb": "Wabo",
"wbe": "Waritai",
"wbf": "Wara",
"wbh": "Wanda",
"wbi": "Vwanji",
"wbj": "Alagwa",
"wbk": "Waigali",
"wbl": "Wakhi",
"wbm": "Wa",
"wbp": "Warlpiri",
"wbq": "Waddar",
"wbr": "Wagdi",
"wbs": "West Bengal Sign Language",
"wbt": "Warnman",
"wbv": "Wajarri",
"wbw": "Woi",
"wca": "Yanomámi",
"wci": "Waci Gbe",
"wdd": "Wandji",
"wdg": "Wadaginam",
"wdj": "Wadjiginy",
"wdk": "Wadikali",
"wdt": "Wendat",
"wdu": "Wadjigu",
"wdy": "Wadjabangayi",
"wea": "Wewaw",
"wec": "Wè Western",
"wed": "Wedau",
"weg": "Wergaia",
"weh": "Weh",
"wei": "Kiunum",
"wem": "Weme Gbe",
"wen": "Sorbian languages",
"weo": "Wemale",
"wep": "Westphalien",
"wer": "Weri",
"wes": "Cameroon Pidgin",
"wet": "Perai",
"weu": "Rawngtu Chin",
"wew": "Wejewa",
"wfg": "Yafi; Zorop",
"wga": "Wagaya",
"wgb": "Wagawaga",
"wgg": "Wangkangurru; Wangganguru",
"wgi": "Wahgi",
"wgo": "Waigeo",
"wgu": "Wirangu",
"wgy": "Warrgamay",
"wha": "Sou Upaa; Manusela",
"whg": "North Wahgi",
"whk": "Wahau Kenyah",
"whu": "Wahau Kayan",
"wib": "Southern Toussian",
"wic": "Wichita",
"wie": "Wik-Epa",
"wif": "Wik-Keyangan",
"wig": "Wik Ngathan",
"wih": "Wik-Me'anha",
"wii": "Minidien",
"wij": "Wik-Iiyanh",
"wik": "Wikalkan",
"wil": "Wilawila",
"wim": "Wik-Mungkan",
"win": "Ho-Chunk",
"wir": "Wiraféd",
"wiu": "Wiru",
"wiv": "Vitu",
"wiy": "Wiyot",
"wja": "Waja",
"wji": "Warji",
"wka": "Kw'adza",
"wkb": "Kumbaran",
"wkd": "Wakde; Mo",
"wkl": "Kalanadi",
"wkr": "Keerray-Woorroong",
"wku": "Kunduvadi",
"wkw": "Wakawaka",
"wky": "Wangkayutyuru",
"wla": "Walio",
"wlc": "Mwali Comorian",
"wle": "Wolane",
"wlg": "Kunbarlang",
"wlh": "Welaun",
"wli": "Waioli",
"wlk": "Wailaki",
"wll": "Wali (Sudan)",
"wlm": "Middle Welsh",
"wlo": "Wolio",
"wlr": "Wailapa",
"wls": "Wallisian",
"wlu": "Wuliwuli",
"wlv": "Wichí Lhamtés Vejoz",
"wlw": "Walak",
"wlx": "Wali (Ghana)",
"wly": "Waling",
"wma": "Mawa (Nigeria)",
"wmb": "Wambaya",
"wmc": "Wamas",
"wmd": "Mamaindé",
"wme": "Wambule",
"wmg": "Western Minyag",
"wmh": "Waima'a",
"wmi": "Wamin",
"wmm": "Maiwa (Indonesia)",
"wmn": "Waamwang",
"wmo": "Wom (Papua New Guinea)",
"wms": "Wambon",
"wmt": "Walmajarri",
"wmw": "Mwani",
"wmx": "Womo",
"wnb": "Wanambre",
"wnc": "Wantoat",
"wnd": "Wandarang",
"wne": "Waneci",
"wng": "Wanggom",
"wni": "Ndzwani Comorian",
"wnk": "Wanukaka",
"wnm": "Wanggamala",
"wnn": "Wunumara",
"wno": "Wano",
"wnp": "Wanap",
"wnu": "Usan",
"wnw": "Wintu",
"wny": "Wanyi; Waanyi",
"wo": "Wolof",
"woa": "Kuwema; Tyaraity",
"wob": "Wè Northern",
"woc": "Wogeo",
"wod": "Wolani",
"woe": "Woleaian",
"wof": "Gambian Wolof",
"wog": "Wogamusin",
"woi": "Kamang",
"wok": "Longto",
"wom": "Wom (Nigeria)",
"won": "Wongo",
"woo": "Manombai",
"wor": "Woria",
"wos": "Hanga Hundi",
"wow": "Wawonii",
"woy": "Weyto",
"wpc": "Maco",
"wrb": "Waluwarra; Warluwara",
"wrg": "Warungu; Gudjal",
"wrh": "Wiradjuri",
"wri": "Wariyangga",
"wrk": "Garrwa",
"wrl": "Warlmanpa",
"wrm": "Warumungu",
"wrn": "Warnang",
"wro": "Worrorra",
"wrp": "Waropen",
"wrr": "Wardaman",
"wrs": "Waris",
"wru": "Waru",
"wrv": "Waruna",
"wrw": "Gugu Warra",
"wrx": "Wae Rana",
"wry": "Merwari",
"wrz": "Waray (Australia)",
"wsa": "Warembori",
"wsg": "Adilabad Gondi",
"wsi": "Wusi",
"wsk": "Waskia",
"wsr": "Owenia",
"wss": "Wasa",
"wsu": "Wasu",
"wsv": "Wotapuri-Katarqalai",
"wtf": "Watiwa",
"wth": "Wathawurrung",
"wti": "Berta",
"wtk": "Watakataui",
"wtm": "Mewati",
"wtw": "Wotu",
"wua": "Wikngenchera",
"wub": "Wunambal",
"wud": "Wudu",
"wuh": "Wutunhua",
"wul": "Silimo",
"wum": "Wumbvu",
"wun": "Bungu",
"wur": "Wurrugu",
"wut": "Wutung",
"wuu": "Wu Chinese",
"wuv": "Wuvulu-Aua",
"wux": "Wulna",
"wuy": "Wauyai",
"wwa": "Waama",
"wwb": "Wakabunga",
"wwo": "Wetamut; Dorig",
"wwr": "Warrwa",
"www": "Wawa",
"wxa": "Waxianghua",
"wxw": "Wardandi",
"wyb": "Wangaaybuwan-Ngiyambaa",
"wyi": "Woiwurrung",
"wym": "Wymysorys",
"wyn": "Wyandot",
"wyr": "Wayoró",
"wyy": "Western Fijian",
"xaa": "Andalusian Arabic",
"xab": "Sambe",
"xac": "Kachari",
"xad": "Adai",
"xae": "Aequian",
"xag": "Aghwan",
"xai": "Kaimbé",
"xaj": "Ararandewára",
"xak": "Máku",
"xal": "Kalmyk; Oirat",
"xam": "ǀXam",
"xan": "Xamtanga",
"xao": "Khao",
"xap": "Apalachee",
"xaq": "Aquitanian",
"xar": "Karami",
"xas": "Kamas",
"xat": "Katawixi",
"xau": "Kauwera",
"xav": "Xavánte",
"xaw": "Kawaiisu",
"xay": "Kayan Mahakam",
"xbb": "Lower Burdekin",
"xbc": "Bactrian",
"xbd": "Bindal",
"xbe": "Bigambal",
"xbg": "Bunganditj",
"xbi": "Kombio",
"xbj": "Birrpayi",
"xbm": "Middle Breton",
"xbn": "Kenaboi",
"xbo": "Bolgarian",
"xbp": "Bibbulman",
"xbr": "Kambera",
"xbw": "Kambiwá",
"xby": "Batjala; Batyala",
"xcb": "Cumbric",
"xcc": "Camunic",
"xce": "Celtiberian",
"xcg": "Cisalpine Gaulish",
"xch": "Chemakum; Chimakum",
"xcl": "Classical Armenian",
"xcm": "Comecrudo",
"xcn": "Cotoname",
"xco": "Chorasmian",
"xcr": "Carian",
"xct": "Classical Tibetan",
"xcu": "Curonian",
"xcv": "Chuvantsy",
"xcw": "Coahuilteco",
"xcy": "Cayuse",
"xda": "Darkinyung",
"xdc": "Dacian",
"xdk": "Dharuk",
"xdm": "Edomite",
"xdo": "Kwandu",
"xdq": "Kaitag",
"xdy": "Malayic Dayak",
"xeb": "Eblan",
"xed": "Hdi",
"xeg": "ǁXegwi",
"xel": "Kelo",
"xem": "Kembayan",
"xep": "Epi-Olmec",
"xer": "Xerénte",
"xes": "Kesawai",
"xet": "Xetá",
"xeu": "Keoru-Ahia",
"xfa": "Faliscan",
"xga": "Galatian",
"xgb": "Gbin",
"xgd": "Gudang",
"xgf": "Gabrielino-Fernandeño",
"xgg": "Goreng",
"xgi": "Garingbal",
"xgl": "Galindan",
"xgm": "Dharumbal; Guwinmal",
"xgn": "Mongolian languages",
"xgr": "Garza",
"xgu": "Unggumi",
"xgw": "Guwa",
"xh": "Xhosa",
"xha": "Harami",
"xhc": "Hunnic",
"xhd": "Hadrami",
"xhe": "Khetrani",
"xhm": "Middle Khmer (1400 to 1850 CE)",
"xhr": "Hernican",
"xht": "Hattic",
"xhu": "Hurrian",
"xhv": "Khua",
"xib": "Iberian",
"xii": "Xiri",
"xil": "Illyrian",
"xin": "Xinca",
"xir": "Xiriâna",
"xis": "Kisan",
"xiv": "Indus Valley Language",
"xiy": "Xipaya",
"xjb": "Minjungbal",
"xjt": "Jaitmatang",
"xka": "Kalkoti",
"xkb": "Northern Nago",
"xkc": "Kho'ini",
"xkd": "Mendalam Kayan",
"xke": "Kereho",
"xkf": "Khengkha",
"xkg": "Kagoro",
"xki": "Kenyan Sign Language",
"xkj": "Kajali",
"xkk": "Kachok; Kaco'",
"xkl": "Mainstream Kenyah",
"xkn": "Kayan River Kayan",
"xko": "Kiorr",
"xkp": "Kabatei",
"xkq": "Koroni",
"xkr": "Xakriabá",
"xks": "Kumbewaha",
"xkt": "Kantosi",
"xku": "Kaamba",
"xkv": "Kgalagadi",
"xkw": "Kembra",
"xkx": "Karore",
"xky": "Uma' Lasan",
"xkz": "Kurtokha",
"xla": "Kamula",
"xlb": "Loup B",
"xlc": "Lycian",
"xld": "Lydian",
"xle": "Lemnian",
"xlg": "Ligurian (Ancient)",
"xli": "Liburnian",
"xln": "Alanic",
"xlo": "Loup A",
"xlp": "Lepontic",
"xls": "Lusitanian",
"xlu": "Cuneiform Luwian",
"xly": "Elymian",
"xma": "Mushungulu",
"xmb": "Mbonga",
"xmc": "Makhuwa-Marrevone",
"xmd": "Mbudum",
"xme": "Median",
"xmf": "Mingrelian",
"xmg": "Mengaka",
"xmh": "Kugu-Muminh",
"xmj": "Majera",
"xmk": "Ancient Macedonian",
"xml": "Malaysian Sign Language",
"xmm": "Manado Malay",
"xmn": "Manichaean Middle Persian",
"xmo": "Morerebi",
"xmp": "Kuku-Mu'inh",
"xmq": "Kuku-Mangk",
"xmr": "Meroitic",
"xms": "Moroccan Sign Language",
"xmt": "Matbat",
"xmu": "Kamu",
"xmv": "Antankarana Malagasy; Tankarana Malagasy",
"xmw": "Tsimihety Malagasy",
"xmx": "Salawati; Maden",
"xmy": "Mayaguduna",
"xmz": "Mori Bawah",
"xna": "Ancient North Arabian",
"xnb": "Kanakanabu",
"xnd": "Na-Dene languages",
"xng": "Middle Mongolian",
"xnh": "Kuanhua",
"xni": "Ngarigu",
"xnj": "Ngoni (Tanzania)",
"xnk": "Nganakarti",
"xnm": "Ngumbarl",
"xnn": "Northern Kankanay",
"xno": "Anglo-Norman",
"xnq": "Ngoni (Mozambique)",
"xnr": "Kangri",
"xns": "Kanashi",
"xnt": "Narragansett",
"xnu": "Nukunul",
"xny": "Nyiyaparli",
"xnz": "Kenzi; Mattoki",
"xoc": "O'chi'chi'",
"xod": "Kokoda",
"xog": "Soga",
"xoi": "Kominimung",
"xok": "Xokleng",
"xom": "Komo (Sudan)",
"xon": "Konkomba",
"xoo": "Xukurú",
"xop": "Kopar",
"xor": "Korubo",
"xow": "Kowaki",
"xpa": "Pirriya",
"xpb": "Northeastern Tasmanian; Pyemmairrener",
"xpc": "Pecheneg",
"xpd": "Oyster Bay Tasmanian",
"xpe": "Liberia Kpelle",
"xpf": "Southeast Tasmanian; Nuenonne",
"xpg": "Phrygian",
"xph": "North Midlands Tasmanian; Tyerrenoterpanner",
"xpi": "Pictish",
"xpj": "Mpalitjanh",
"xpk": "Kulina Pano",
"xpl": "Port Sorell Tasmanian",
"xpm": "Pumpokol",
"xpn": "Kapinawá",
"xpo": "Pochutec",
"xpp": "Puyo-Paekche",
"xpq": "Mohegan-Pequot",
"xpr": "Parthian",
"xps": "Pisidian",
"xpt": "Punthamara",
"xpu": "Punic",
"xpv": "Northern Tasmanian; Tommeginne",
"xpw": "Northwestern Tasmanian; Peerapper",
"xpx": "Southwestern Tasmanian; Toogee",
"xpy": "Puyo",
"xpz": "Bruny Island Tasmanian",
"xqa": "Karakhanid",
"xqt": "Qatabanian",
"xra": "Krahô",
"xrb": "Eastern Karaboro",
"xrd": "Gundungurra",
"xre": "Kreye",
"xrg": "Minang",
"xri": "Krikati-Timbira",
"xrm": "Armazic",
"xrn": "Arin",
"xrr": "Raetic",
"xrt": "Aranama-Tamique",
"xru": "Marriammu",
"xrw": "Karawa",
"xsa": "Sabaean",
"xsb": "Sambal",
"xsc": "Scythian",
"xsd": "Sidetic",
"xse": "Sempan",
"xsh": "Shamang",
"xsi": "Sio",
"xsj": "Subi",
"xsl": "South Slavey",
"xsm": "Kasem",
"xsn": "Sanga (Nigeria)",
"xso": "Solano",
"xsp": "Silopi",
"xsq": "Makhuwa-Saka",
"xsr": "Sherpa",
"xss": "Assan",
"xsu": "Sanumá",
"xsv": "Sudovian",
"xsy": "Saisiyat",
"xta": "Alcozauca Mixtec",
"xtb": "Chazumba Mixtec",
"xtc": "Katcha-Kadugli-Miri",
"xtd": "Diuxi-Tilantongo Mixtec",
"xte": "Ketengban",
"xtg": "Transalpine Gaulish",
"xth": "Yitha Yitha",
"xti": "Sinicahua Mixtec",
"xtj": "San Juan Teita Mixtec",
"xtl": "Tijaltepec Mixtec",
"xtm": "Magdalena Peñasco Mixtec",
"xtn": "Northern Tlaxiaco Mixtec",
"xto": "Tokharian A",
"xtp": "San Miguel Piedras Mixtec",
"xtq": "Tumshuqese",
"xtr": "Early Tripuri",
"xts": "Sindihui Mixtec",
"xtt": "Tacahua Mixtec",
"xtu": "Cuyamecalco Mixtec",
"xtv": "Thawa",
"xtw": "Tawandê",
"xty": "Yoloxochitl Mixtec",
"xua": "Alu Kurumba",
"xub": "Betta Kurumba",
"xud": "Umiida",
"xug": "Kunigami",
"xuj": "Jennu Kurumba",
"xul": "Ngunawal; Nunukul",
"xum": "Umbrian",
"xun": "Unggaranggu",
"xuo": "Kuo",
"xup": "Upper Umpqua",
"xur": "Urartian",
"xut": "Kuthant",
"xuu": "Kxoe; Khwedam",
"xve": "Venetic",
"xvi": "Kamviri",
"xvn": "Vandalic",
"xvo": "Volscian",
"xvs": "Vestinian",
"xwa": "Kwaza",
"xwc": "Woccon",
"xwd": "Wadi Wadi",
"xwe": "Xwela Gbe",
"xwg": "Kwegu",
"xwj": "Wajuk",
"xwk": "Wangkumara",
"xwl": "Western Xwla Gbe",
"xwo": "Written Oirat",
"xwr": "Kwerba Mamberamo",
"xwt": "Wotjobaluk",
"xww": "Wemba Wemba",
"xxb": "Boro (Ghana)",
"xxk": "Ke'o",
"xxm": "Minkin",
"xxr": "Koropó",
"xxt": "Tambora",
"xya": "Yaygir",
"xyb": "Yandjibara",
"xyj": "Mayi-Yapi",
"xyk": "Mayi-Kulan",
"xyl": "Yalakalore",
"xyt": "Mayi-Thakurti",
"xyy": "Yorta Yorta",
"xzh": "Zhang-Zhung",
"xzm": "Zemgalian",
"xzp": "Ancient Zapotec",
"yaa": "Yaminahua",
"yab": "Yuhup",
"yac": "Pass Valley Yali",
"yad": "Yagua",
"yae": "Pumé",
"yaf": "Yaka (Democratic Republic of Congo)",
"yag": "Yámana",
"yah": "Yazgulyam",
"yai": "Yagnobi",
"yaj": "Banda-Yangere",
"yak": "Yakama",
"yal": "Yalunka",
"yam": "Yamba",
"yan": "Mayangna",
"yao": "Yao",
"yap": "Yapese",
"yaq": "Yaqui",
"yar": "Yabarana",
"yas": "Nugunu (Cameroon)",
"yat": "Yambeta",
"yau": "Yuwana",
"yav": "Yangben",
"yaw": "Yawalapití",
"yax": "Yauma",
"yay": "Agwagwune",
"yaz": "Lokaa",
"yba": "Yala",
"ybb": "Yemba",
"ybe": "West Yugur",
"ybh": "Yakha",
"ybi": "Yamphu",
"ybj": "Hasha",
"ybk": "Bokha",
"ybl": "Yukuben",
"ybm": "Yaben",
"ybn": "Yabaâna",
"ybo": "Yabong",
"ybx": "Yawiyo",
"yby": "Yaweyuha",
"ych": "Chesu",
"ycl": "Lolopo",
"ycn": "Yucuna",
"ycp": "Chepya",
"yda": "Yanda",
"ydd": "Eastern Yiddish",
"yde": "Yangum Dey",
"ydg": "Yidgha",
"ydk": "Yoidik",
"yea": "Ravula",
"yec": "Yeniche",
"yee": "Yimas",
"yei": "Yeni",
"yej": "Yevanic",
"yel": "Yela",
"yer": "Tarok",
"yes": "Nyankpa",
"yet": "Yetfa",
"yeu": "Yerukula",
"yev": "Yapunda",
"yey": "Yeyi",
"yga": "Malyangapa",
"ygi": "Yiningayi",
"ygl": "Yangum Gel",
"ygm": "Yagomi",
"ygp": "Gepo",
"ygr": "Yagaria",
"ygs": "Yolŋu Sign Language",
"ygu": "Yugul",
"ygw": "Yagwoia",
"yha": "Baha Buyang",
"yhd": "Judeo-Iraqi Arabic",
"yhl": "Hlepho Phowa",
"yhs": "Yan-nhaŋu Sign Language",
"yi": "Yiddish",
"yia": "Yinggarda",
"yif": "Ache",
"yig": "Wusa Nasu",
"yih": "Western Yiddish",
"yii": "Yidiny",
"yij": "Yindjibarndi",
"yik": "Dongshanba Lalo",
"yil": "Yindjilandji",
"yim": "Yimchungru Naga",
"yin": "Riang Lai; Yinchia",
"yip": "Pholo",
"yiq": "Miqie",
"yir": "North Awyu",
"yis": "Yis",
"yit": "Eastern Lalu",
"yiu": "Awu",
"yiv": "Northern Nisu",
"yix": "Axi Yi",
"yiz": "Azhe",
"yka": "Yakan",
"ykg": "Northern Yukaghir",
"yki": "Yoke",
"ykk": "Yakaikeke",
"ykl": "Khlula",
"ykm": "Kap",
"ykn": "Kua-nsi",
"yko": "Yasa",
"ykr": "Yekora",
"ykt": "Kathu",
"yku": "Kuamasi",
"yky": "Yakoma",
"yla": "Yaul",
"ylb": "Yaleba",
"yle": "Yele",
"ylg": "Yelogu",
"yli": "Angguruk Yali",
"yll": "Yil",
"ylm": "Limi",
"yln": "Langnian Buyang",
"ylo": "Naluo Yi",
"ylr": "Yalarnnga",
"ylu": "Aribwaung",
"yly": "Nyâlayu; Nyelâyu",
"ymb": "Yambes",
"ymc": "Southern Muji",
"ymd": "Muda",
"yme": "Yameo",
"ymg": "Yamongeri",
"ymh": "Mili",
"ymi": "Moji",
"ymk": "Makwe",
"yml": "Iamalele",
"ymm": "Maay",
"ymn": "Yamna; Sunum",
"ymo": "Yangum Mon",
"ymp": "Yamap",
"ymq": "Qila Muji",
"ymr": "Malasar",
"yms": "Mysian",
"ymx": "Northern Muji",
"ymz": "Muzi",
"yna": "Aluo",
"ynd": "Yandruwandha",
"yne": "Lang'e",
"yng": "Yango",
"ynk": "Naukan Yupik",
"ynl": "Yangulam",
"ynn": "Yana",
"yno": "Yong",
"ynq": "Yendang",
"yns": "Yansi",
"ynu": "Yahuna",
"yo": "Yoruba",
"yob": "Yoba",
"yog": "Yogad",
"yoi": "Yonaguni",
"yok": "Yokuts",
"yol": "Yola",
"yom": "Yombe",
"yon": "Yongkom",
"yot": "Yotti",
"yox": "Yoron",
"yoy": "Yoy",
"ypa": "Phala",
"ypb": "Labo Phowa",
"ypg": "Phola",
"yph": "Phupha",
"ypk": "Yupik languages",
"ypm": "Phuma",
"ypn": "Ani Phowa",
"ypo": "Alo Phola",
"ypp": "Phupa",
"ypz": "Phuza",
"yra": "Yerakai",
"yrb": "Yareba",
"yre": "Yaouré",
"yrk": "Nenets",
"yrl": "Nhengatu",
"yrm": "Yirrk-Mel",
"yrn": "Yerong",
"yro": "Yaroamë",
"yrs": "Yarsun",
"yrw": "Yarawata",
"yry": "Yarluyandi",
"ysc": "Yassic",
"ysd": "Samatao",
"ysg": "Sonaga",
"ysl": "Yugoslavian Sign Language",
"ysm": "Myanmar Sign Language",
"ysn": "Sani",
"yso": "Nisi (China)",
"ysp": "Southern Lolopo",
"ysr": "Sirenik Yupik",
"yss": "Yessan-Mayo",
"ysy": "Sanie",
"yta": "Talu",
"ytl": "Tanglang",
"ytp": "Thopho",
"ytw": "Yout Wam",
"yty": "Yatay",
"yua": "Yucateco; Yucatec Maya",
"yub": "Yugambal",
"yuc": "Yuchi",
"yud": "Judeo-Tripolitanian Arabic",
"yue": "Yue Chinese; Cantonese",
"yuf": "Havasupai-Walapai-Yavapai",
"yug": "Yug",
"yui": "Yurutí",
"yuj": "Karkar-Yuri",
"yuk": "Yuki",
"yul": "Yulu",
"yum": "Quechan",
"yun": "Bena (Nigeria)",
"yup": "Yukpa",
"yuq": "Yuqui",
"yur": "Yurok",
"yut": "Yopno",
"yuw": "Yau (Morobe Province)",
"yux": "Southern Yukaghir",
"yuy": "East Yugur",
"yuz": "Yuracare",
"yva": "Yawa",
"yvt": "Yavitero",
"ywa": "Kalou",
"ywg": "Yinhawangka",
"ywl": "Western Lalu",
"ywn": "Yawanawa",
"ywq": "Wuding-Luquan Yi",
"ywr": "Yawuru",
"ywt": "Xishanba Lalo; Central Lalo",
"ywu": "Wumeng Nasu",
"yww": "Yawarawarga",
"yxa": "Mayawali",
"yxg": "Yagara",
"yxl": "Yardliyawarra",
"yxm": "Yinwum",
"yxu": "Yuyu",
"yxy": "Yabula Yabula",
"yyr": "Yir Yoront",
"yyu": "Yau (Sandaun Province)",
"yyz": "Ayizi",
"yzg": "E'ma Buyang",
"yzk": "Zokhuo",
"za": "Zhuang; Chuang",
"zaa": "Sierra de Juárez Zapotec",
"zab": "Western Tlacolula Valley Zapotec; San Juan Guelavía Zapotec",
"zac": "Ocotlán Zapotec",
"zad": "Cajonos Zapotec",
"zae": "Yareni Zapotec",
"zaf": "Ayoquesco Zapotec",
"zag": "Zaghawa",
"zah": "Zangwal",
"zai": "Isthmus Zapotec",
"zaj": "Zaramo",
"zak": "Zanaki",
"zal": "Zauzou",
"zam": "Miahuatlán Zapotec",
"zao": "Ozolotepec Zapotec",
"zap": "Zapotec",
"zaq": "Aloápam Zapotec",
"zar": "Rincón Zapotec",
"zas": "Santo Domingo Albarradas Zapotec",
"zat": "Tabaa Zapotec",
"zau": "Zangskari",
"zav": "Yatzachi Zapotec",
"zaw": "Mitla Zapotec",
"zax": "Xadani Zapotec",
"zay": "Zayse-Zergulla; Zaysete",
"zaz": "Zari",
"zba": "Balaibalan",
"zbc": "Central Berawan",
"zbe": "East Berawan",
"zbl": "Blissymbols; Bliss; Blissymbolics",
"zbt": "Batui",
"zbu": "Bu (Bauchi State)",
"zbw": "West Berawan",
"zca": "Coatecas Altas Zapotec",
"zcd": "Las Delicias Zapotec",
"zch": "Central Hongshuihe Zhuang",
"zdj": "Ngazidja Comorian",
"zea": "Zeeuws",
"zeg": "Zenag",
"zeh": "Eastern Hongshuihe Zhuang",
"zen": "Zenaga",
"zga": "Kinga",
"zgb": "Guibei Zhuang",
"zgh": "Standard Moroccan Tamazight",
"zgm": "Minz Zhuang",
"zgn": "Guibian Zhuang",
"zgr": "Magori",
"zh": "Chinese",
"zhb": "Zhaba",
"zhd": "Dai Zhuang",
"zhi": "Zhire",
"zhn": "Nong Zhuang",
"zhw": "Zhoa",
"zhx": "Chinese (family)",
"zia": "Zia",
"zib": "Zimbabwe Sign Language",
"zik": "Zimakani",
"zil": "Zialo",
"zim": "Mesme",
"zin": "Zinza",
"ziw": "Zigula",
"ziz": "Zizilivakan",
"zka": "Kaimbulawa",
"zkb": "Koibal",
"zkd": "Kadu",
"zkg": "Koguryo",
"zkh": "Khorezmian",
"zkk": "Karankawa",
"zkn": "Kanan",
"zko": "Kott",
"zkp": "São Paulo Kaingáng",
"zkr": "Zakhring",
"zkt": "Kitan",
"zku": "Kaurna",
"zkv": "Krevinian",
"zkz": "Khazar",
"zla": "Zula",
"zle": "East Slavic languages",
"zlj": "Liujiang Zhuang",
"zlm": "Malay (individual language)",
"zln": "Lianshan Zhuang",
"zlq": "Liuqian Zhuang",
"zls": "South Slavic languages",
"zlw": "West Slavic languages",
"zma": "Manda (Australia)",
"zmb": "Zimba",
"zmc": "Margany",
"zmd": "Maridan",
"zme": "Mangerr",
"zmf": "Mfinu",
"zmg": "Marti Ke",
"zmh": "Makolkol",
"zmi": "Negeri Sembilan Malay",
"zmj": "Maridjabin",
"zmk": "Mandandanyi",
"zml": "Matngala",
"zmm": "Marimanindji; Marramaninyshi",
"zmn": "Mbangwe",
"zmo": "Molo",
"zmp": "Mpuono",
"zmq": "Mituku",
"zmr": "Maranunggu",
"zms": "Mbesa",
"zmt": "Maringarr",
"zmu": "Muruwari",
"zmv": "Mbariman-Gudhinma",
"zmw": "Mbo (Democratic Republic of Congo)",
"zmx": "Bomitaba",
"zmy": "Mariyedi",
"zmz": "Mbandja",
"zna": "Zan Gula",
"znd": "Zande languages",
"zne": "Zande (individual language)",
"zng": "Mang",
"znk": "Manangkari",
"zns": "Mangas",
"zoc": "Copainalá Zoque",
"zoh": "Chimalapa Zoque",
"zom": "Zou",
"zoo": "Asunción Mixtepec Zapotec",
"zoq": "Tabasco Zoque",
"zor": "Rayón Zoque",
"zos": "Francisco León Zoque",
"zpa": "Lachiguiri Zapotec",
"zpb": "Yautepec Zapotec",
"zpc": "Choapan Zapotec",
"zpd": "Southeastern Ixtlán Zapotec",
"zpe": "Petapa Zapotec",
"zpf": "San Pedro Quiatoni Zapotec",
"zpg": "Guevea De Humboldt Zapotec",
"zph": "Totomachapan Zapotec",
"zpi": "Santa María Quiegolani Zapotec",
"zpj": "Quiavicuzas Zapotec",
"zpk": "Tlacolulita Zapotec",
"zpl": "Lachixío Zapotec",
"zpm": "Mixtepec Zapotec",
"zpn": "Santa Inés Yatzechi Zapotec",
"zpo": "Amatlán Zapotec",
"zpp": "El Alto Zapotec",
"zpq": "Zoogocho Zapotec",
"zpr": "Santiago Xanica Zapotec",
"zps": "Coatlán Zapotec",
"zpt": "San Vicente Coatlán Zapotec",
"zpu": "Yalálag Zapotec",
"zpv": "Chichicapan Zapotec",
"zpw": "Zaniza Zapotec",
"zpx": "San Baltazar Loxicha Zapotec",
"zpy": "Mazaltepec Zapotec",
"zpz": "Texmelucan Zapotec",
"zqe": "Qiubei Zhuang",
"zra": "Kara (Korea)",
"zrg": "Mirgan",
"zrn": "Zerenkel",
"zro": "Záparo",
"zrp": "Zarphatic",
"zrs": "Mairasi",
"zsa": "Sarasira",
"zsk": "Kaskean",
"zsl": "Zambian Sign Language",
"zsm": "Standard Malay",
"zsr": "Southern Rincon Zapotec",
"zsu": "Sukurum",
"zte": "Elotepec Zapotec",
"ztg": "Xanaguía Zapotec",
"ztl": "Lapaguía-Guivini Zapotec",
"ztm": "San Agustín Mixtepec Zapotec",
"ztn": "Santa Catarina Albarradas Zapotec",
"ztp": "Loxicha Zapotec",
"ztq": "Quioquitani-Quierí Zapotec",
"zts": "Tilquiapan Zapotec",
"ztt": "Tejalapan Zapotec",
"ztu": "Güilá Zapotec",
"ztx": "Zaachila Zapotec",
"zty": "Yatee Zapotec",
"zu": "Zulu",
"zua": "Zeem",
"zuh": "Tokano",
"zum": "Kumzari",
"zun": "Zuni",
"zuy": "Zumaya",
"zwa": "Zay",
"zyb": "Yongbei Zhuang",
"zyg": "Yang Zhuang",
"zyj": "Youjiang Zhuang",
"zyn": "Yongnan Zhuang",
"zyp": "Zyphe Chin",
"zza": "Zaza; Dimili; Dimli (macrolanguage); Kirdki; Kirmanjki (macrolanguage); Zazaki",
"zzj": "Zuojiang Zhuang"
} | 0 |
hf_public_repos/datasets/src/datasets/utils | hf_public_repos/datasets/src/datasets/utils/resources/multilingualities.json | {
"monolingual": "contains a single language",
"multilingual": "contains multiple languages",
"translation": "contains translated or aligned text",
"other": "other type of language distribution"
}
| 0 |
hf_public_repos/datasets/src/datasets/utils | hf_public_repos/datasets/src/datasets/utils/resources/readme_structure.yaml | name: "" # Filename comes here
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null # meaning it should not be checked.
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: true
allow_empty_text: true
subsections: null
- name: "Dataset Structure"
allow_empty: false
allow_empty_text: true
subsections:
- name: "Data Instances"
allow_empty: false
allow_empty_text: true
subsections: null
- name: "Data Fields"
allow_empty: false
allow_empty_text: true
subsections: null
- name: "Data Splits"
allow_empty: false
allow_empty_text: true
subsections: null
- name: "Dataset Creation"
allow_empty: false
allow_empty_text: true
subsections:
- name: "Curation Rationale"
allow_empty: true
allow_empty_text: true
subsections: null
- name: "Source Data"
allow_empty: false
allow_empty_text: true
subsections:
- name: "Initial Data Collection and Normalization"
allow_empty: true
allow_empty_text: true
subsections: null
- name: "Who are the source language producers?"
allow_empty: true
allow_empty_text: true
subsections: null
- name: "Annotations"
allow_empty: false
allow_empty_text: true
subsections:
- name: "Annotation process"
allow_empty: true
allow_empty_text: true
subsections: null
- name: "Who are the annotators?"
allow_empty: true
allow_empty_text: true
subsections: null
- name: "Personal and Sensitive Information"
allow_empty: true
allow_empty_text: true
subsections: null
- name: "Considerations for Using the Data"
allow_empty: true
allow_empty_text: true
subsections:
- name: "Social Impact of Dataset"
allow_empty: true
allow_empty_text: true
subsections: null
- name: "Discussion of Biases"
allow_empty: true
allow_empty_text: true
subsections: null
- name: "Other Known Limitations"
allow_empty: true
allow_empty_text: true
subsections: null
- name: "Additional Information"
allow_empty: true
allow_empty_text: true
subsections:
- name: "Dataset Curators"
allow_empty: true
allow_empty_text: true
subsections: null
- name: "Licensing Information"
allow_empty: true
allow_empty_text: true
subsections: null
- name: "Citation Information"
allow_empty: false
allow_empty_text: true
subsections: null
- name: "Contributions"
allow_empty: false
allow_empty_text: false
subsections: null
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/formatting/tf_formatter.py | # Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import tensorflow as tf
class TFFormatter(TensorFormatter[Mapping, "tf.Tensor", Mapping]):
def __init__(self, features=None, **tf_tensor_kwargs):
super().__init__(features=features)
self.tf_tensor_kwargs = tf_tensor_kwargs
import tensorflow as tf # noqa: F401 - import tf at initialization
def _consolidate(self, column):
import tensorflow as tf
if isinstance(column, list) and column:
if all(
isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return tf.stack(column)
elif all(
isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype
for x in column
):
# only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated
return tf.ragged.stack(column)
return column
def _tensorize(self, value):
import tensorflow as tf
if value is None:
return value
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": tf.int64}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": tf.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs})
def _recursive_tensorize(self, data_struct):
import tensorflow as tf
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(data_struct, torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(data_struct, "__array__") and not isinstance(data_struct, tf.Tensor):
data_struct = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
elif isinstance(data_struct, (list, tuple)):
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct, map_list=False)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "tf.Tensor":
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/formatting/torch_formatter.py | # Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class TorchFormatter(TensorFormatter[Mapping, "torch.Tensor", Mapping]):
def __init__(self, features=None, **torch_tensor_kwargs):
super().__init__(features=features)
self.torch_tensor_kwargs = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _consolidate(self, column):
import torch
if isinstance(column, list) and column:
if all(
isinstance(x, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column
):
return torch.stack(column)
return column
def _tensorize(self, value):
import torch
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value.tolist()
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": torch.int64}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": torch.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs})
def _recursive_tensorize(self, data_struct):
import torch
# support for torch, tf, jax etc.
if hasattr(data_struct, "__array__") and not isinstance(data_struct, torch.Tensor):
data_struct = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
elif isinstance(data_struct, (list, tuple)):
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct, map_list=False)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "torch.Tensor":
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/formatting/jax_formatter.py | # Copyright 2021 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
logger = get_logger()
DEVICE_MAPPING: Optional[dict] = None
class JaxFormatter(TensorFormatter[Mapping, "jax.Array", Mapping]):
def __init__(self, features=None, device=None, **jnp_array_kwargs):
super().__init__(features=features)
import jax
from jaxlib.xla_client import Device
if isinstance(device, Device):
raise ValueError(
f"Expected {device} to be a `str` not {type(device)}, as `jaxlib.xla_extension.Device` "
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`."
)
self.device = device if isinstance(device, str) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
DEVICE_MAPPING = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
f"Device with string identifier {self.device} not listed among the available "
f"devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default "
f"device: {str(jax.devices()[0])}."
)
self.device = str(jax.devices()[0])
self.jnp_array_kwargs = jnp_array_kwargs
@staticmethod
def _map_devices_to_str() -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(device): device for device in jax.devices()}
def _consolidate(self, column):
import jax
import jax.numpy as jnp
if isinstance(column, list) and column:
if all(
isinstance(x, jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return jnp.stack(column, axis=0)
return column
def _tensorize(self, value):
import jax
import jax.numpy as jnp
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value.tolist()
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_x64:
default_dtype = {"dtype": jnp.int64}
else:
default_dtype = {"dtype": jnp.int32}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": jnp.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
DEVICE_MAPPING = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(value, **{**default_dtype, **self.jnp_array_kwargs})
def _recursive_tensorize(self, data_struct):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(data_struct, torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(data_struct, "__array__") and not isinstance(data_struct, jax.Array):
data_struct = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
elif isinstance(data_struct, (list, tuple)):
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct, map_list=False)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "jax.Array":
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/formatting/formatting.py | # Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Mapping, MutableMapping
from functools import partial
# Lint as: python3
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
import numpy as np
import pandas as pd
import pyarrow as pa
from packaging import version
from .. import config
from ..features import Features
from ..features.features import _ArrayXDExtensionType, _is_zero_copy_only, decode_nested_example, pandas_types_mapper
from ..table import Table
from ..utils.py_utils import no_op_if_value_is_null
T = TypeVar("T")
RowFormat = TypeVar("RowFormat")
ColumnFormat = TypeVar("ColumnFormat")
BatchFormat = TypeVar("BatchFormat")
def _is_range_contiguous(key: range) -> bool:
return key.step == 1 and key.stop >= key.start
def _raise_bad_key_type(key: Any):
raise TypeError(
f"Wrong key type: '{key}' of type '{type(key)}'. Expected one of int, slice, range, str or Iterable."
)
def _query_table_with_indices_mapping(
table: Table, key: Union[int, slice, range, str, Iterable], indices: Table
) -> pa.Table:
"""
Query a pyarrow Table to extract the subtable that correspond to the given key.
The :obj:`indices` parameter corresponds to the indices mapping in case we cant to take into
account a shuffling or an indices selection for example.
The indices table must contain one column named "indices" of type uint64.
"""
if isinstance(key, int):
key = indices.fast_slice(key % indices.num_rows, 1).column(0)[0].as_py()
return _query_table(table, key)
if isinstance(key, slice):
key = range(*key.indices(indices.num_rows))
if isinstance(key, range):
if _is_range_contiguous(key) and key.start >= 0:
return _query_table(
table, [i.as_py() for i in indices.fast_slice(key.start, key.stop - key.start).column(0)]
)
else:
pass # treat as an iterable
if isinstance(key, str):
table = table.select([key])
return _query_table(table, indices.column(0).to_pylist())
if isinstance(key, Iterable):
return _query_table(table, [indices.fast_slice(i, 1).column(0)[0].as_py() for i in key])
_raise_bad_key_type(key)
def _query_table(table: Table, key: Union[int, slice, range, str, Iterable]) -> pa.Table:
"""
Query a pyarrow Table to extract the subtable that correspond to the given key.
"""
if isinstance(key, int):
return table.fast_slice(key % table.num_rows, 1)
if isinstance(key, slice):
key = range(*key.indices(table.num_rows))
if isinstance(key, range):
if _is_range_contiguous(key) and key.start >= 0:
return table.fast_slice(key.start, key.stop - key.start)
else:
pass # treat as an iterable
if isinstance(key, str):
return table.table.drop([column for column in table.column_names if column != key])
if isinstance(key, Iterable):
key = np.fromiter(key, np.int64)
if len(key) == 0:
return table.table.slice(0, 0)
# don't use pyarrow.Table.take even for pyarrow >=1.0 (see https://issues.apache.org/jira/browse/ARROW-9773)
return table.fast_gather(key % table.num_rows)
_raise_bad_key_type(key)
def _is_array_with_nulls(pa_array: pa.Array) -> bool:
return pa_array.null_count > 0
class BaseArrowExtractor(Generic[RowFormat, ColumnFormat, BatchFormat]):
"""
Arrow extractor are used to extract data from pyarrow tables.
It makes it possible to extract rows, columns and batches.
These three extractions types have to be implemented.
"""
def extract_row(self, pa_table: pa.Table) -> RowFormat:
raise NotImplementedError
def extract_column(self, pa_table: pa.Table) -> ColumnFormat:
raise NotImplementedError
def extract_batch(self, pa_table: pa.Table) -> BatchFormat:
raise NotImplementedError
def _unnest(py_dict: Dict[str, List[T]]) -> Dict[str, T]:
"""Return the first element of a batch (dict) as a row (dict)"""
return {key: array[0] for key, array in py_dict.items()}
class SimpleArrowExtractor(BaseArrowExtractor[pa.Table, pa.Array, pa.Table]):
def extract_row(self, pa_table: pa.Table) -> pa.Table:
return pa_table
def extract_column(self, pa_table: pa.Table) -> pa.Array:
return pa_table.column(0)
def extract_batch(self, pa_table: pa.Table) -> pa.Table:
return pa_table
class PythonArrowExtractor(BaseArrowExtractor[dict, list, dict]):
def extract_row(self, pa_table: pa.Table) -> dict:
return _unnest(pa_table.to_pydict())
def extract_column(self, pa_table: pa.Table) -> list:
return pa_table.column(0).to_pylist()
def extract_batch(self, pa_table: pa.Table) -> dict:
return pa_table.to_pydict()
class NumpyArrowExtractor(BaseArrowExtractor[dict, np.ndarray, dict]):
def __init__(self, **np_array_kwargs):
self.np_array_kwargs = np_array_kwargs
def extract_row(self, pa_table: pa.Table) -> dict:
return _unnest(self.extract_batch(pa_table))
def extract_column(self, pa_table: pa.Table) -> np.ndarray:
return self._arrow_array_to_numpy(pa_table[pa_table.column_names[0]])
def extract_batch(self, pa_table: pa.Table) -> dict:
return {col: self._arrow_array_to_numpy(pa_table[col]) for col in pa_table.column_names}
def _arrow_array_to_numpy(self, pa_array: pa.Array) -> np.ndarray:
if isinstance(pa_array, pa.ChunkedArray):
if isinstance(pa_array.type, _ArrayXDExtensionType):
# don't call to_pylist() to preserve dtype of the fixed-size array
zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True)
array: List = [
row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)
]
else:
zero_copy_only = _is_zero_copy_only(pa_array.type) and all(
not _is_array_with_nulls(chunk) for chunk in pa_array.chunks
)
array: List = [
row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)
]
else:
if isinstance(pa_array.type, _ArrayXDExtensionType):
# don't call to_pylist() to preserve dtype of the fixed-size array
zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True)
array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only)
else:
zero_copy_only = _is_zero_copy_only(pa_array.type) and not _is_array_with_nulls(pa_array)
array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only).tolist()
if len(array) > 0:
if any(
(isinstance(x, np.ndarray) and (x.dtype == object or x.shape != array[0].shape))
or (isinstance(x, float) and np.isnan(x))
for x in array
):
return np.array(array, copy=False, dtype=object)
return np.array(array, copy=False)
class PandasArrowExtractor(BaseArrowExtractor[pd.DataFrame, pd.Series, pd.DataFrame]):
def extract_row(self, pa_table: pa.Table) -> pd.DataFrame:
return pa_table.slice(length=1).to_pandas(types_mapper=pandas_types_mapper)
def extract_column(self, pa_table: pa.Table) -> pd.Series:
return pa_table.select([0]).to_pandas(types_mapper=pandas_types_mapper)[pa_table.column_names[0]]
def extract_batch(self, pa_table: pa.Table) -> pd.DataFrame:
return pa_table.to_pandas(types_mapper=pandas_types_mapper)
class PythonFeaturesDecoder:
def __init__(self, features: Optional[Features]):
self.features = features
def decode_row(self, row: dict) -> dict:
return self.features.decode_example(row) if self.features else row
def decode_column(self, column: list, column_name: str) -> list:
return self.features.decode_column(column, column_name) if self.features else column
def decode_batch(self, batch: dict) -> dict:
return self.features.decode_batch(batch) if self.features else batch
class PandasFeaturesDecoder:
def __init__(self, features: Optional[Features]):
self.features = features
def decode_row(self, row: pd.DataFrame) -> pd.DataFrame:
decode = (
{
column_name: no_op_if_value_is_null(partial(decode_nested_example, feature))
for column_name, feature in self.features.items()
if self.features._column_requires_decoding[column_name]
}
if self.features
else {}
)
if decode:
row[list(decode.keys())] = row.transform(decode)
return row
def decode_column(self, column: pd.Series, column_name: str) -> pd.Series:
decode = (
no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name]))
if self.features and column_name in self.features and self.features._column_requires_decoding[column_name]
else None
)
if decode:
column = column.transform(decode)
return column
def decode_batch(self, batch: pd.DataFrame) -> pd.DataFrame:
return self.decode_row(batch)
class LazyDict(MutableMapping):
"""A dictionary backed by Arrow data. The values are formatted on-the-fly when accessing the dictionary."""
def __init__(self, pa_table: pa.Table, formatter: "Formatter"):
self.pa_table = pa_table
self.formatter = formatter
self.data = {key: None for key in pa_table.column_names}
self.keys_to_format = set(self.data.keys())
def __len__(self):
return len(self.data)
def __getitem__(self, key):
value = self.data[key]
if key in self.keys_to_format:
value = self.format(key)
self.data[key] = value
self.keys_to_format.remove(key)
return value
def __setitem__(self, key, value):
if key in self.keys_to_format:
self.keys_to_format.remove(key)
self.data[key] = value
def __delitem__(self, key) -> None:
if key in self.keys_to_format:
self.keys_to_format.remove(key)
del self.data[key]
def __iter__(self):
return iter(self.data)
def __contains__(self, key):
return key in self.data
def __repr__(self):
self._format_all()
return repr(self.data)
if config.PY_VERSION >= version.parse("3.9"):
# merging with the union ("|") operator is supported in Python 3.9+
def __or__(self, other):
if isinstance(other, LazyDict):
inst = self.copy()
other = other.copy()
other._format_all()
inst.keys_to_format -= other.data.keys()
inst.data = inst.data | other.data
return inst
if isinstance(other, dict):
inst = self.copy()
inst.keys_to_format -= other.keys()
inst.data = inst.data | other
return inst
return NotImplemented
def __ror__(self, other):
if isinstance(other, LazyDict):
inst = self.copy()
other = other.copy()
other._format_all()
inst.keys_to_format -= other.data.keys()
inst.data = other.data | inst.data
return inst
if isinstance(other, dict):
inst = self.copy()
inst.keys_to_format -= other.keys()
inst.data = other | inst.data
return inst
return NotImplemented
def __ior__(self, other):
if isinstance(other, LazyDict):
other = other.copy()
other._format_all()
self.keys_to_format -= other.data.keys()
self.data |= other.data
else:
self.keys_to_format -= other.keys()
self.data |= other
return self
def __copy__(self):
# Identical to `UserDict.__copy__`
inst = self.__class__.__new__(self.__class__)
inst.__dict__.update(self.__dict__)
# Create a copy and avoid triggering descriptors
inst.__dict__["data"] = self.__dict__["data"].copy()
inst.__dict__["keys_to_format"] = self.__dict__["keys_to_format"].copy()
return inst
def copy(self):
import copy
return copy.copy(self)
@classmethod
def fromkeys(cls, iterable, value=None):
raise NotImplementedError
def format(self, key):
raise NotImplementedError
def _format_all(self):
for key in self.keys_to_format:
self.data[key] = self.format(key)
self.keys_to_format.clear()
class LazyRow(LazyDict):
def format(self, key):
return self.formatter.format_column(self.pa_table.select([key]))[0]
class LazyBatch(LazyDict):
def format(self, key):
return self.formatter.format_column(self.pa_table.select([key]))
class Formatter(Generic[RowFormat, ColumnFormat, BatchFormat]):
"""
A formatter is an object that extracts and formats data from pyarrow tables.
It defines the formatting for rows, columns and batches.
"""
simple_arrow_extractor = SimpleArrowExtractor
python_arrow_extractor = PythonArrowExtractor
numpy_arrow_extractor = NumpyArrowExtractor
pandas_arrow_extractor = PandasArrowExtractor
def __init__(self, features: Optional[Features] = None):
self.features = features
self.python_features_decoder = PythonFeaturesDecoder(self.features)
self.pandas_features_decoder = PandasFeaturesDecoder(self.features)
def __call__(self, pa_table: pa.Table, query_type: str) -> Union[RowFormat, ColumnFormat, BatchFormat]:
if query_type == "row":
return self.format_row(pa_table)
elif query_type == "column":
return self.format_column(pa_table)
elif query_type == "batch":
return self.format_batch(pa_table)
def format_row(self, pa_table: pa.Table) -> RowFormat:
raise NotImplementedError
def format_column(self, pa_table: pa.Table) -> ColumnFormat:
raise NotImplementedError
def format_batch(self, pa_table: pa.Table) -> BatchFormat:
raise NotImplementedError
class TensorFormatter(Formatter[RowFormat, ColumnFormat, BatchFormat]):
def recursive_tensorize(self, data_struct: dict):
raise NotImplementedError
class ArrowFormatter(Formatter[pa.Table, pa.Array, pa.Table]):
def format_row(self, pa_table: pa.Table) -> pa.Table:
return self.simple_arrow_extractor().extract_row(pa_table)
def format_column(self, pa_table: pa.Table) -> pa.Array:
return self.simple_arrow_extractor().extract_column(pa_table)
def format_batch(self, pa_table: pa.Table) -> pa.Table:
return self.simple_arrow_extractor().extract_batch(pa_table)
class PythonFormatter(Formatter[Mapping, list, Mapping]):
def __init__(self, features=None, lazy=False):
super().__init__(features)
self.lazy = lazy
def format_row(self, pa_table: pa.Table) -> Mapping:
if self.lazy:
return LazyRow(pa_table, self)
row = self.python_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return row
def format_column(self, pa_table: pa.Table) -> list:
column = self.python_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
if self.lazy:
return LazyBatch(pa_table, self)
batch = self.python_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
return batch
class PandasFormatter(Formatter[pd.DataFrame, pd.Series, pd.DataFrame]):
def format_row(self, pa_table: pa.Table) -> pd.DataFrame:
row = self.pandas_arrow_extractor().extract_row(pa_table)
row = self.pandas_features_decoder.decode_row(row)
return row
def format_column(self, pa_table: pa.Table) -> pd.Series:
column = self.pandas_arrow_extractor().extract_column(pa_table)
column = self.pandas_features_decoder.decode_column(column, pa_table.column_names[0])
return column
def format_batch(self, pa_table: pa.Table) -> pd.DataFrame:
row = self.pandas_arrow_extractor().extract_batch(pa_table)
row = self.pandas_features_decoder.decode_batch(row)
return row
class CustomFormatter(Formatter[dict, ColumnFormat, dict]):
"""
A user-defined custom formatter function defined by a ``transform``.
The transform must take as input a batch of data extracted for an arrow table using the python extractor,
and return a batch.
If the output batch is not a dict, then output_all_columns won't work.
If the ouput batch has several fields, then querying a single column won't work since we don't know which field
to return.
"""
def __init__(self, transform: Callable[[dict], dict], features=None, **kwargs):
super().__init__(features=features)
self.transform = transform
def format_row(self, pa_table: pa.Table) -> dict:
formatted_batch = self.format_batch(pa_table)
try:
return _unnest(formatted_batch)
except Exception as exc:
raise TypeError(
f"Custom formatting function must return a dict of sequences to be able to pick a row, but got {formatted_batch}"
) from exc
def format_column(self, pa_table: pa.Table) -> ColumnFormat:
formatted_batch = self.format_batch(pa_table)
if hasattr(formatted_batch, "keys"):
if len(formatted_batch.keys()) > 1:
raise TypeError(
"Tried to query a column but the custom formatting function returns too many columns. "
f"Only one column was expected but got columns {list(formatted_batch.keys())}."
)
else:
raise TypeError(
f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}"
)
try:
return formatted_batch[pa_table.column_names[0]]
except Exception as exc:
raise TypeError(
f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}"
) from exc
def format_batch(self, pa_table: pa.Table) -> dict:
batch = self.python_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
return self.transform(batch)
def _check_valid_column_key(key: str, columns: List[str]) -> None:
if key not in columns:
raise KeyError(f"Column {key} not in the dataset. Current columns in the dataset: {columns}")
def _check_valid_index_key(key: Union[int, slice, range, Iterable], size: int) -> None:
if isinstance(key, int):
if (key < 0 and key + size < 0) or (key >= size):
raise IndexError(f"Invalid key: {key} is out of bounds for size {size}")
return
elif isinstance(key, slice):
pass
elif isinstance(key, range):
if len(key) > 0:
_check_valid_index_key(max(key), size=size)
_check_valid_index_key(min(key), size=size)
elif isinstance(key, Iterable):
if len(key) > 0:
_check_valid_index_key(int(max(key)), size=size)
_check_valid_index_key(int(min(key)), size=size)
else:
_raise_bad_key_type(key)
def key_to_query_type(key: Union[int, slice, range, str, Iterable]) -> str:
if isinstance(key, int):
return "row"
elif isinstance(key, str):
return "column"
elif isinstance(key, (slice, range, Iterable)):
return "batch"
_raise_bad_key_type(key)
def query_table(
table: Table,
key: Union[int, slice, range, str, Iterable],
indices: Optional[Table] = None,
) -> pa.Table:
"""
Query a Table to extract the subtable that correspond to the given key.
Args:
table (``datasets.table.Table``): The input Table to query from
key (``Union[int, slice, range, str, Iterable]``): The key can be of different types:
- an integer i: the subtable containing only the i-th row
- a slice [i:j:k]: the subtable containing the rows that correspond to this slice
- a range(i, j, k): the subtable containing the rows that correspond to this range
- a string c: the subtable containing all the rows but only the column c
- an iterable l: the subtable that is the concatenation of all the i-th rows for all i in the iterable
indices (Optional ``datasets.table.Table``): If not None, it is used to re-map the given key to the table rows.
The indices table must contain one column named "indices" of type uint64.
This is used in case of shuffling or rows selection.
Returns:
``pyarrow.Table``: the result of the query on the input table
"""
# Check if key is valid
if not isinstance(key, (int, slice, range, str, Iterable)):
_raise_bad_key_type(key)
if isinstance(key, str):
_check_valid_column_key(key, table.column_names)
else:
size = indices.num_rows if indices is not None else table.num_rows
_check_valid_index_key(key, size)
# Query the main table
if indices is None:
pa_subtable = _query_table(table, key)
else:
pa_subtable = _query_table_with_indices_mapping(table, key, indices=indices)
return pa_subtable
def format_table(
table: Table,
key: Union[int, slice, range, str, Iterable],
formatter: Formatter,
format_columns: Optional[list] = None,
output_all_columns=False,
):
"""
Format a Table depending on the key that was used and a Formatter object.
Args:
table (``datasets.table.Table``): The input Table to format
key (``Union[int, slice, range, str, Iterable]``): Depending on the key that was used, the formatter formats
the table as either a row, a column or a batch.
formatter (``datasets.formatting.formatting.Formatter``): Any subclass of a Formatter such as
PythonFormatter, NumpyFormatter, etc.
format_columns (:obj:`List[str]`, optional): if not None, it defines the columns that will be formatted using the
given formatter. Other columns are discarded (unless ``output_all_columns`` is True)
output_all_columns (:obj:`bool`, defaults to False). If True, the formatted output is completed using the columns
that are not in the ``format_columns`` list. For these columns, the PythonFormatter is used.
Returns:
A row, column or batch formatted object defined by the Formatter:
- the PythonFormatter returns a dictionary for a row or a batch, and a list for a column.
- the NumpyFormatter returns a dictionary for a row or a batch, and a np.array for a column.
- the PandasFormatter returns a pd.DataFrame for a row or a batch, and a pd.Series for a column.
- the TorchFormatter returns a dictionary for a row or a batch, and a torch.Tensor for a column.
- the TFFormatter returns a dictionary for a row or a batch, and a tf.Tensor for a column.
"""
if isinstance(table, Table):
pa_table = table.table
else:
pa_table = table
query_type = key_to_query_type(key)
python_formatter = PythonFormatter(features=formatter.features)
if format_columns is None:
return formatter(pa_table, query_type=query_type)
elif query_type == "column":
if key in format_columns:
return formatter(pa_table, query_type)
else:
return python_formatter(pa_table, query_type=query_type)
else:
pa_table_to_format = pa_table.drop(col for col in pa_table.column_names if col not in format_columns)
formatted_output = formatter(pa_table_to_format, query_type=query_type)
if output_all_columns:
if isinstance(formatted_output, MutableMapping):
pa_table_with_remaining_columns = pa_table.drop(
col for col in pa_table.column_names if col in format_columns
)
remaining_columns_dict = python_formatter(pa_table_with_remaining_columns, query_type=query_type)
formatted_output.update(remaining_columns_dict)
else:
raise TypeError(
f"Custom formatting function must return a dict to work with output_all_columns=True, but got {formatted_output}"
)
return formatted_output
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/formatting/__init__.py | # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
logger = logging.get_logger(__name__)
_FORMAT_TYPES: Dict[Optional[str], Type[Formatter]] = {}
_FORMAT_TYPES_ALIASES: Dict[Optional[str], str] = {}
_FORMAT_TYPES_ALIASES_UNAVAILABLE: Dict[Optional[str], Exception] = {}
def _register_formatter(
formatter_cls: type,
format_type: Optional[str],
aliases: Optional[List[str]] = None,
):
"""
Register a Formatter object using a name and optional aliases.
This function must be used on a Formatter class.
"""
aliases = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})"
)
_FORMAT_TYPES[format_type] = formatter_cls
for alias in set(aliases + [format_type]):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})"
)
_FORMAT_TYPES_ALIASES[alias] = format_type
def _register_unavailable_formatter(
unavailable_error: Exception, format_type: Optional[str], aliases: Optional[List[str]] = None
):
"""
Register an unavailable Formatter object using a name and optional aliases.
This function must be used on an Exception object that is raised when trying to get the unavailable formatter.
"""
aliases = aliases if aliases is not None else []
for alias in set(aliases + [format_type]):
_FORMAT_TYPES_ALIASES_UNAVAILABLE[alias] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
_torch_error = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
_tf_error = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
_jax_error = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def get_format_type_from_alias(format_type: Optional[str]) -> Optional[str]:
"""If the given format type is a known alias, then return its main type name. Otherwise return the type with no change."""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def get_formatter(format_type: Optional[str], **format_kwargs) -> Formatter:
"""
Factory function to get a Formatter given its type name and keyword arguments.
A formatter is an object that extracts and formats data from pyarrow table.
It defines the formatting for rows, colums and batches.
If the formatter for a given type name doesn't exist or is not available, an error is raised.
"""
format_type = get_format_type_from_alias(format_type)
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**format_kwargs)
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None)}, but got '{format_type}'"
)
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/formatting/np_formatter.py | # Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from collections.abc import Mapping
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
class NumpyFormatter(TensorFormatter[Mapping, np.ndarray, Mapping]):
def __init__(self, features=None, **np_array_kwargs):
super().__init__(features=features)
self.np_array_kwargs = np_array_kwargs
def _consolidate(self, column):
if isinstance(column, list):
if column and all(
isinstance(x, np.ndarray) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return np.stack(column)
else:
# don't use np.array(column, dtype=object)
# since it fails in certain cases
# see https://stackoverflow.com/q/51005699
out = np.empty(len(column), dtype=object)
out[:] = column
return out
return column
def _tensorize(self, value):
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value
elif isinstance(value, np.number):
return value
default_dtype = {}
if isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": np.int64}
elif isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": np.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
return np.asarray(value, **self.np_array_kwargs)
return np.asarray(value, **{**default_dtype, **self.np_array_kwargs})
def _recursive_tensorize(self, data_struct):
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(data_struct, torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(data_struct, "__array__") and not isinstance(data_struct, (np.ndarray, np.character, np.number)):
data_struct = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object:
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
if isinstance(data_struct, (list, tuple)):
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct, map_list=False)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> np.ndarray:
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/download/download_manager.py | # Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Download manager interface."""
import enum
import io
import os
import posixpath
import tarfile
import warnings
import zipfile
from datetime import datetime
from functools import partial
from itertools import chain
from typing import Callable, Dict, Generator, List, Optional, Tuple, Union
from .. import config
from ..utils import tqdm as hf_tqdm
from ..utils.deprecation_utils import DeprecatedEnum, deprecated
from ..utils.file_utils import (
cached_path,
get_from_cache,
hash_url_to_filename,
is_relative_path,
stack_multiprocessing_download_progress_bars,
url_or_path_join,
)
from ..utils.info_utils import get_size_checksum_dict
from ..utils.logging import get_logger
from ..utils.py_utils import NestedDataStructure, map_nested, size_str
from ..utils.track import TrackedIterable, tracked_str
from .download_config import DownloadConfig
logger = get_logger(__name__)
BASE_KNOWN_EXTENSIONS = [
"txt",
"csv",
"json",
"jsonl",
"tsv",
"conll",
"conllu",
"orig",
"parquet",
"pkl",
"pickle",
"rel",
"xml",
]
MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL = {
bytes.fromhex("504B0304"): "zip",
bytes.fromhex("504B0506"): "zip", # empty archive
bytes.fromhex("504B0708"): "zip", # spanned archive
bytes.fromhex("425A68"): "bz2",
bytes.fromhex("1F8B"): "gzip",
bytes.fromhex("FD377A585A00"): "xz",
bytes.fromhex("04224D18"): "lz4",
bytes.fromhex("28B52FFD"): "zstd",
}
MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL = {
b"Rar!": "rar",
}
MAGIC_NUMBER_MAX_LENGTH = max(
len(magic_number)
for magic_number in chain(MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL, MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL)
)
class DownloadMode(enum.Enum):
"""`Enum` for how to treat pre-existing downloads and data.
The default mode is `REUSE_DATASET_IF_EXISTS`, which will reuse both
raw downloads and the prepared dataset if they exist.
The generations modes:
| | Downloads | Dataset |
|-------------------------------------|-----------|---------|
| `REUSE_DATASET_IF_EXISTS` (default) | Reuse | Reuse |
| `REUSE_CACHE_IF_EXISTS` | Reuse | Fresh |
| `FORCE_REDOWNLOAD` | Fresh | Fresh |
"""
REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists"
REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists"
FORCE_REDOWNLOAD = "force_redownload"
class GenerateMode(DeprecatedEnum):
REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists"
REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists"
FORCE_REDOWNLOAD = "force_redownload"
@property
def help_message(self):
return "Use 'DownloadMode' instead."
def _get_path_extension(path: str) -> str:
# Get extension: train.json.gz -> gz
extension = path.split(".")[-1]
# Remove query params ("dl=1", "raw=true"): gz?dl=1 -> gz
# Remove shards infos (".txt_1", ".txt-00000-of-00100"): txt_1 -> txt
for symb in "?-_":
extension = extension.split(symb)[0]
return extension
def _get_extraction_protocol_with_magic_number(f) -> Optional[str]:
"""read the magic number from a file-like object and return the compression protocol"""
# Check if the file object is seekable even before reading the magic number (to avoid https://bugs.python.org/issue26440)
try:
f.seek(0)
except (AttributeError, io.UnsupportedOperation):
return None
magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH)
f.seek(0)
for i in range(MAGIC_NUMBER_MAX_LENGTH):
compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])
if compression is not None:
return compression
compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])
if compression is not None:
raise NotImplementedError(f"Compression protocol '{compression}' not implemented.")
def _get_extraction_protocol(path: str) -> Optional[str]:
path = str(path)
extension = _get_path_extension(path)
# TODO(mariosasko): The below check will be useful once we can preserve the original extension in the new cache layout (use the `filename` parameter of `hf_hub_download`)
if (
extension in BASE_KNOWN_EXTENSIONS
or extension in ["tgz", "tar"]
or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz"))
):
return None
with open(path, "rb") as f:
return _get_extraction_protocol_with_magic_number(f)
class _IterableFromGenerator(TrackedIterable):
"""Utility class to create an iterable from a generator function, in order to reset the generator when needed."""
def __init__(self, generator: Callable, *args, **kwargs):
super().__init__()
self.generator = generator
self.args = args
self.kwargs = kwargs
def __iter__(self):
for x in self.generator(*self.args, **self.kwargs):
self.last_item = x
yield x
self.last_item = None
class ArchiveIterable(_IterableFromGenerator):
"""An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`"""
@staticmethod
def _iter_tar(f):
stream = tarfile.open(fileobj=f, mode="r|*")
for tarinfo in stream:
file_path = tarinfo.name
if not tarinfo.isreg():
continue
if file_path is None:
continue
if os.path.basename(file_path).startswith((".", "__")):
# skipping hidden files
continue
file_obj = stream.extractfile(tarinfo)
yield file_path, file_obj
stream.members = []
del stream
@staticmethod
def _iter_zip(f):
zipf = zipfile.ZipFile(f)
for member in zipf.infolist():
file_path = member.filename
if member.is_dir():
continue
if file_path is None:
continue
if os.path.basename(file_path).startswith((".", "__")):
# skipping hidden files
continue
file_obj = zipf.open(member)
yield file_path, file_obj
@classmethod
def _iter_from_fileobj(cls, f) -> Generator[Tuple, None, None]:
compression = _get_extraction_protocol_with_magic_number(f)
if compression == "zip":
yield from cls._iter_zip(f)
else:
yield from cls._iter_tar(f)
@classmethod
def _iter_from_path(cls, urlpath: str) -> Generator[Tuple, None, None]:
compression = _get_extraction_protocol(urlpath)
with open(urlpath, "rb") as f:
if compression == "zip":
yield from cls._iter_zip(f)
else:
yield from cls._iter_tar(f)
@classmethod
def from_buf(cls, fileobj) -> "ArchiveIterable":
return cls(cls._iter_from_fileobj, fileobj)
@classmethod
def from_path(cls, urlpath_or_buf) -> "ArchiveIterable":
return cls(cls._iter_from_path, urlpath_or_buf)
class FilesIterable(_IterableFromGenerator):
"""An iterable of paths from a list of directories or files"""
@classmethod
def _iter_from_paths(cls, urlpaths: Union[str, List[str]]) -> Generator[str, None, None]:
if not isinstance(urlpaths, list):
urlpaths = [urlpaths]
for urlpath in urlpaths:
if os.path.isfile(urlpath):
yield urlpath
else:
for dirpath, dirnames, filenames in os.walk(urlpath):
# in-place modification to prune the search
dirnames[:] = sorted([dirname for dirname in dirnames if not dirname.startswith((".", "__"))])
if os.path.basename(dirpath).startswith((".", "__")):
# skipping hidden directories
continue
for filename in sorted(filenames):
if filename.startswith((".", "__")):
# skipping hidden files
continue
yield os.path.join(dirpath, filename)
@classmethod
def from_paths(cls, urlpaths) -> "FilesIterable":
return cls(cls._iter_from_paths, urlpaths)
class DownloadManager:
is_streaming = False
def __init__(
self,
dataset_name: Optional[str] = None,
data_dir: Optional[str] = None,
download_config: Optional[DownloadConfig] = None,
base_path: Optional[str] = None,
record_checksums=True,
):
"""Download manager constructor.
Args:
data_dir:
can be used to specify a manual directory to get the files from.
dataset_name (`str`):
name of dataset this instance will be used for. If
provided, downloads will contain which datasets they were used for.
download_config (`DownloadConfig`):
to specify the cache directory and other
download options
base_path (`str`):
base path that is used when relative paths are used to
download files. This can be a remote url.
record_checksums (`bool`, defaults to `True`):
Whether to record the checksums of the downloaded files. If None, the value is inferred from the builder.
"""
self._dataset_name = dataset_name
self._data_dir = data_dir
self._base_path = base_path or os.path.abspath(".")
# To record what is being used: {url: {num_bytes: int, checksum: str}}
self._recorded_sizes_checksums: Dict[str, Dict[str, Optional[Union[int, str]]]] = {}
self.record_checksums = record_checksums
self.download_config = download_config or DownloadConfig()
self.downloaded_paths = {}
self.extracted_paths = {}
@property
def manual_dir(self):
return self._data_dir
@property
def downloaded_size(self):
"""Returns the total size of downloaded files."""
return sum(checksums_dict["num_bytes"] for checksums_dict in self._recorded_sizes_checksums.values())
@staticmethod
def ship_files_with_pipeline(downloaded_path_or_paths, pipeline):
"""Ship the files using Beam FileSystems to the pipeline temp dir.
Args:
downloaded_path_or_paths (`str` or `list[str]` or `dict[str, str]`):
Nested structure containing the
downloaded path(s).
pipeline ([`utils.beam_utils.BeamPipeline`]):
Apache Beam Pipeline.
Returns:
`str` or `list[str]` or `dict[str, str]`
"""
from ..utils.beam_utils import upload_local_to_remote
remote_dir = pipeline._options.get_all_options().get("temp_location")
if remote_dir is None:
raise ValueError("You need to specify 'temp_location' in PipelineOptions to upload files")
def upload(local_file_path):
remote_file_path = posixpath.join(
remote_dir, config.DOWNLOADED_DATASETS_DIR, os.path.basename(local_file_path)
)
logger.info(
f"Uploading {local_file_path} ({size_str(os.path.getsize(local_file_path))}) to {remote_file_path}."
)
upload_local_to_remote(local_file_path, remote_file_path)
return remote_file_path
uploaded_path_or_paths = map_nested(
lambda local_file_path: upload(local_file_path),
downloaded_path_or_paths,
)
return uploaded_path_or_paths
def _record_sizes_checksums(self, url_or_urls: NestedDataStructure, downloaded_path_or_paths: NestedDataStructure):
"""Record size/checksum of downloaded files."""
delay = 5
for url, path in hf_tqdm(
list(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())),
delay=delay,
desc="Computing checksums",
):
# call str to support PathLike objects
self._recorded_sizes_checksums[str(url)] = get_size_checksum_dict(
path, record_checksum=self.record_checksums
)
@deprecated("Use `.download`/`.download_and_extract` with `fsspec` URLs instead.")
def download_custom(self, url_or_urls, custom_download):
"""
Download given urls(s) by calling `custom_download`.
Args:
url_or_urls (`str` or `list` or `dict`):
URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`.
custom_download (`Callable[src_url, dst_path]`):
The source URL and destination path. For example
`tf.io.gfile.copy`, that lets you download from Google storage.
Returns:
downloaded_path(s): `str`, The downloaded paths matching the given input
`url_or_urls`.
Example:
```py
>>> downloaded_files = dl_manager.download_custom('s3://my-bucket/data.zip', custom_download_for_my_private_bucket)
```
"""
cache_dir = self.download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH
max_retries = self.download_config.max_retries
def url_to_downloaded_path(url):
return os.path.join(cache_dir, hash_url_to_filename(url))
downloaded_path_or_paths = map_nested(url_to_downloaded_path, url_or_urls)
url_or_urls = NestedDataStructure(url_or_urls)
downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths)
for url, path in zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten()):
try:
get_from_cache(
url, cache_dir=cache_dir, local_files_only=True, use_etag=False, max_retries=max_retries
)
cached = True
except FileNotFoundError:
cached = False
if not cached or self.download_config.force_download:
custom_download(url, path)
get_from_cache(
url, cache_dir=cache_dir, local_files_only=True, use_etag=False, max_retries=max_retries
)
self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths)
return downloaded_path_or_paths.data
def download(self, url_or_urls):
"""Download given URL(s).
By default, only one process is used for download. Pass customized `download_config.num_proc` to change this behavior.
Args:
url_or_urls (`str` or `list` or `dict`):
URL or `list` or `dict` of URLs to download. Each URL is a `str`.
Returns:
`str` or `list` or `dict`:
The downloaded paths matching the given input `url_or_urls`.
Example:
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
```
"""
download_config = self.download_config.copy()
download_config.extract_compressed_file = False
if download_config.download_desc is None:
download_config.download_desc = "Downloading data"
download_func = partial(self._download, download_config=download_config)
start_time = datetime.now()
with stack_multiprocessing_download_progress_bars():
downloaded_path_or_paths = map_nested(
download_func,
url_or_urls,
map_tuple=True,
num_proc=download_config.num_proc,
desc="Downloading data files",
)
duration = datetime.now() - start_time
logger.info(f"Downloading took {duration.total_seconds() // 60} min")
url_or_urls = NestedDataStructure(url_or_urls)
downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths)
self.downloaded_paths.update(dict(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())))
start_time = datetime.now()
self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths)
duration = datetime.now() - start_time
logger.info(f"Checksum Computation took {duration.total_seconds() // 60} min")
return downloaded_path_or_paths.data
def _download(self, url_or_filename: str, download_config: DownloadConfig) -> str:
url_or_filename = str(url_or_filename)
if is_relative_path(url_or_filename):
# append the relative path to the base_path
url_or_filename = url_or_path_join(self._base_path, url_or_filename)
out = cached_path(url_or_filename, download_config=download_config)
out = tracked_str(out)
out.set_origin(url_or_filename)
return out
def iter_archive(self, path_or_buf: Union[str, io.BufferedReader]):
"""Iterate over files within an archive.
Args:
path_or_buf (`str` or `io.BufferedReader`):
Archive path or archive binary file object.
Yields:
`tuple[str, io.BufferedReader]`:
2-tuple (path_within_archive, file_object).
File object is opened in binary mode.
Example:
```py
>>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> files = dl_manager.iter_archive(archive)
```
"""
if hasattr(path_or_buf, "read"):
return ArchiveIterable.from_buf(path_or_buf)
else:
return ArchiveIterable.from_path(path_or_buf)
def iter_files(self, paths: Union[str, List[str]]):
"""Iterate over file paths.
Args:
paths (`str` or `list` of `str`):
Root paths.
Yields:
`str`: File path.
Example:
```py
>>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip')
>>> files = dl_manager.iter_files(files)
```
"""
return FilesIterable.from_paths(paths)
def extract(self, path_or_paths, num_proc="deprecated"):
"""Extract given path(s).
Args:
path_or_paths (path or `list` or `dict`):
Path of file to extract. Each path is a `str`.
num_proc (`int`):
Use multi-processing if `num_proc` > 1 and the length of
`path_or_paths` is larger than `num_proc`.
<Deprecated version="2.6.2">
Pass `DownloadConfig(num_proc=<num_proc>)` to the initializer instead.
</Deprecated>
Returns:
extracted_path(s): `str`, The extracted paths matching the given input
path_or_paths.
Example:
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> extracted_files = dl_manager.extract(downloaded_files)
```
"""
if num_proc != "deprecated":
warnings.warn(
"'num_proc' was deprecated in version 2.6.2 and will be removed in 3.0.0. Pass `DownloadConfig(num_proc=<num_proc>)` to the initializer instead.",
FutureWarning,
)
download_config = self.download_config.copy()
download_config.extract_compressed_file = True
extract_func = partial(self._download, download_config=download_config)
extracted_paths = map_nested(
extract_func,
path_or_paths,
num_proc=download_config.num_proc,
desc="Extracting data files",
)
path_or_paths = NestedDataStructure(path_or_paths)
extracted_paths = NestedDataStructure(extracted_paths)
self.extracted_paths.update(dict(zip(path_or_paths.flatten(), extracted_paths.flatten())))
return extracted_paths.data
def download_and_extract(self, url_or_urls):
"""Download and extract given `url_or_urls`.
Is roughly equivalent to:
```
extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls (`str` or `list` or `dict`):
URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`.
Returns:
extracted_path(s): `str`, extracted paths of given URL(s).
"""
return self.extract(self.download(url_or_urls))
def get_recorded_sizes_checksums(self):
return self._recorded_sizes_checksums.copy()
def delete_extracted_files(self):
paths_to_delete = set(self.extracted_paths.values()) - set(self.downloaded_paths.values())
for key, path in list(self.extracted_paths.items()):
if path in paths_to_delete and os.path.isfile(path):
os.remove(path)
del self.extracted_paths[key]
def manage_extracted_files(self):
if self.download_config.delete_extracted:
self.delete_extracted_files()
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/download/download_config.py | import copy
import warnings
from dataclasses import InitVar, dataclass, field
from pathlib import Path
from typing import Any, Dict, Optional, Union
from .. import config
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if an incompletely received file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
use_auth_token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
<Deprecated version="2.14.0">
`use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
</Deprecated>
ignore_url_params (`bool`, defaults to `False`):
Whether to strip all query parameters and fragments from
the download URL before using it for caching the file.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the dataset file-system backend, if any.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
token: Optional[Union[str, bool]] = None
use_auth_token: InitVar[Optional[Union[str, bool]]] = "deprecated"
ignore_url_params: bool = False
storage_options: Dict[str, Any] = field(default_factory=dict)
download_desc: Optional[str] = None
def __post_init__(self, use_auth_token):
if use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={use_auth_token}' instead.",
FutureWarning,
)
self.token = use_auth_token
if "hf" not in self.storage_options:
self.storage_options["hf"] = {"token": self.token, "endpoint": config.HF_ENDPOINT}
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
def __setattr__(self, name, value):
if name == "token" and getattr(self, "storage_options", None) is not None:
if "hf" not in self.storage_options:
self.storage_options["hf"] = {"token": value, "endpoint": config.HF_ENDPOINT}
elif getattr(self.storage_options["hf"], "token", None) is None:
self.storage_options["hf"]["token"] = value
super().__setattr__(name, value)
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/download/streaming_download_manager.py | import glob
import io
import os
import posixpath
import re
import tarfile
import time
import xml.dom.minidom
import zipfile
from asyncio import TimeoutError
from io import BytesIO
from itertools import chain
from pathlib import Path, PurePosixPath
from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Tuple, Union
from xml.etree import ElementTree as ET
import fsspec
from aiohttp.client_exceptions import ClientError
from huggingface_hub.utils import EntryNotFoundError
from .. import config
from ..filesystems import COMPRESSION_FILESYSTEMS
from ..utils.file_utils import (
get_authentication_headers_for_url,
get_datasets_user_agent,
http_head,
is_local_path,
is_relative_path,
url_or_path_join,
)
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .download_config import DownloadConfig
logger = get_logger(__name__)
BASE_KNOWN_EXTENSIONS = [
"txt",
"csv",
"json",
"jsonl",
"tsv",
"conll",
"conllu",
"orig",
"parquet",
"pkl",
"pickle",
"rel",
"xml",
]
COMPRESSION_EXTENSION_TO_PROTOCOL = {
# single file compression
**{fs_class.extension.lstrip("."): fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS},
# archive compression
"zip": "zip",
}
SINGLE_FILE_COMPRESSION_PROTOCOLS = {fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS}
SINGLE_SLASH_AFTER_PROTOCOL_PATTERN = re.compile(r"(?<!:):/")
MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL = {
bytes.fromhex("504B0304"): "zip",
bytes.fromhex("504B0506"): "zip", # empty archive
bytes.fromhex("504B0708"): "zip", # spanned archive
bytes.fromhex("425A68"): "bz2",
bytes.fromhex("1F8B"): "gzip",
bytes.fromhex("FD377A585A00"): "xz",
bytes.fromhex("04224D18"): "lz4",
bytes.fromhex("28B52FFD"): "zstd",
}
MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL = {
b"Rar!": "rar",
}
MAGIC_NUMBER_MAX_LENGTH = max(
len(magic_number)
for magic_number in chain(MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL, MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL)
)
class NonStreamableDatasetError(Exception):
pass
def xjoin(a, *p):
"""
This function extends os.path.join to support the "::" hop separator. It supports both paths and urls.
A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
This is used to access files inside a zip file over http for example.
Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
Then you can just chain the url this way:
zip://folder1/file.txt::https://host.com/archive.zip
The xjoin function allows you to apply the join on the first path of the chain.
Example::
>>> xjoin("zip://folder1::https://host.com/archive.zip", "file.txt")
zip://folder1/file.txt::https://host.com/archive.zip
"""
a, *b = str(a).split("::")
if is_local_path(a):
return os.path.join(a, *p)
else:
a = posixpath.join(a, *p)
return "::".join([a] + b)
def xdirname(a):
"""
This function extends os.path.dirname to support the "::" hop separator. It supports both paths and urls.
A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
This is used to access files inside a zip file over http for example.
Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
Then you can just chain the url this way:
zip://folder1/file.txt::https://host.com/archive.zip
The xdirname function allows you to apply the dirname on the first path of the chain.
Example::
>>> xdirname("zip://folder1/file.txt::https://host.com/archive.zip")
zip://folder1::https://host.com/archive.zip
"""
a, *b = str(a).split("::")
if is_local_path(a):
a = os.path.dirname(Path(a).as_posix())
else:
a = posixpath.dirname(a)
# if we end up at the root of the protocol, we get for example a = 'http:'
# so we have to fix it by adding the '//' that was removed:
if a.endswith(":"):
a += "//"
return "::".join([a] + b)
def xexists(urlpath: str, download_config: Optional[DownloadConfig] = None):
"""Extend `os.path.exists` function to support both local and remote files.
Args:
urlpath (`str`): URL path.
download_config : mainly use token or storage_options to support different platforms and auth types.
Returns:
`bool`
"""
main_hop, *rest_hops = _as_str(urlpath).split("::")
if is_local_path(main_hop):
return os.path.exists(main_hop)
else:
urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
main_hop, *rest_hops = urlpath.split("::")
fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options)
return fs.exists(main_hop)
def xbasename(a):
"""
This function extends os.path.basename to support the "::" hop separator. It supports both paths and urls.
A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
This is used to access files inside a zip file over http for example.
Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
Then you can just chain the url this way:
zip://folder1/file.txt::https://host.com/archive.zip
The xbasename function allows you to apply the basename on the first path of the chain.
Example::
>>> xbasename("zip://folder1/file.txt::https://host.com/archive.zip")
file.txt
"""
a, *b = str(a).split("::")
if is_local_path(a):
return os.path.basename(Path(a).as_posix())
else:
return posixpath.basename(a)
def xsplit(a):
"""
This function extends os.path.split to support the "::" hop separator. It supports both paths and urls.
A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
This is used to access files inside a zip file over http for example.
Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
Then you can just chain the url this way:
zip://folder1/file.txt::https://host.com/archive.zip
The xsplit function allows you to apply the xsplit on the first path of the chain.
Example::
>>> xsplit("zip://folder1/file.txt::https://host.com/archive.zip")
('zip://folder1::https://host.com/archive.zip', 'file.txt')
"""
a, *b = str(a).split("::")
if is_local_path(a):
return os.path.split(Path(a).as_posix())
else:
a, tail = posixpath.split(a)
return "::".join([a + "//" if a.endswith(":") else a] + b), tail
def xsplitext(a):
"""
This function extends os.path.splitext to support the "::" hop separator. It supports both paths and urls.
A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
This is used to access files inside a zip file over http for example.
Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
Then you can just chain the url this way:
zip://folder1/file.txt::https://host.com/archive.zip
The xsplitext function allows you to apply the splitext on the first path of the chain.
Example::
>>> xsplitext("zip://folder1/file.txt::https://host.com/archive.zip")
('zip://folder1/file::https://host.com/archive.zip', '.txt')
"""
a, *b = str(a).split("::")
if is_local_path(a):
return os.path.splitext(Path(a).as_posix())
else:
a, ext = posixpath.splitext(a)
return "::".join([a] + b), ext
def xisfile(path, download_config: Optional[DownloadConfig] = None) -> bool:
"""Extend `os.path.isfile` function to support remote files.
Args:
path (`str`): URL path.
download_config : mainly use token or storage_options to support different platforms and auth types.
Returns:
`bool`
"""
main_hop, *rest_hops = str(path).split("::")
if is_local_path(main_hop):
return os.path.isfile(path)
else:
path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config)
main_hop, *rest_hops = path.split("::")
fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options)
return fs.isfile(main_hop)
def xgetsize(path, download_config: Optional[DownloadConfig] = None) -> int:
"""Extend `os.path.getsize` function to support remote files.
Args:
path (`str`): URL path.
download_config : mainly use token or storage_options to support different platforms and auth types.
Returns:
`int`: optional
"""
main_hop, *rest_hops = str(path).split("::")
if is_local_path(main_hop):
return os.path.getsize(path)
else:
path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config)
main_hop, *rest_hops = path.split("::")
fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options)
try:
size = fs.size(main_hop)
except EntryNotFoundError:
raise FileNotFoundError(f"No such file: {path}")
if size is None:
# use xopen instead of fs.open to make data fetching more robust
with xopen(path, download_config=download_config) as f:
size = len(f.read())
return size
def xisdir(path, download_config: Optional[DownloadConfig] = None) -> bool:
"""Extend `os.path.isdir` function to support remote files.
Args:
path (`str`): URL path.
download_config : mainly use token or storage_options to support different platforms and auth types.
Returns:
`bool`
"""
main_hop, *rest_hops = str(path).split("::")
if is_local_path(main_hop):
return os.path.isdir(path)
else:
path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config)
main_hop, *rest_hops = path.split("::")
fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options)
inner_path = main_hop.split("://")[-1]
if not inner_path.strip("/"):
return True
return fs.isdir(inner_path)
def xrelpath(path, start=None):
"""Extend `os.path.relpath` function to support remote files.
Args:
path (`str`): URL path.
start (`str`): Start URL directory path.
Returns:
`str`
"""
main_hop, *rest_hops = str(path).split("::")
if is_local_path(main_hop):
return os.path.relpath(main_hop, start=start) if start else os.path.relpath(main_hop)
else:
return posixpath.relpath(main_hop, start=str(start).split("::")[0]) if start else os.path.relpath(main_hop)
def _add_retries_to_file_obj_read_method(file_obj):
read = file_obj.read
max_retries = config.STREAMING_READ_MAX_RETRIES
def read_with_retries(*args, **kwargs):
disconnect_err = None
for retry in range(1, max_retries + 1):
try:
out = read(*args, **kwargs)
break
except (ClientError, TimeoutError) as err:
disconnect_err = err
logger.warning(
f"Got disconnected from remote data host. Retrying in {config.STREAMING_READ_RETRY_INTERVAL}sec [{retry}/{max_retries}]"
)
time.sleep(config.STREAMING_READ_RETRY_INTERVAL)
else:
raise ConnectionError("Server Disconnected") from disconnect_err
return out
file_obj.read = read_with_retries
def _get_path_extension(path: str) -> str:
# Get extension: https://foo.bar/train.json.gz -> gz
extension = path.split(".")[-1]
# Remove query params ("dl=1", "raw=true"): gz?dl=1 -> gz
# Remove shards infos (".txt_1", ".txt-00000-of-00100"): txt_1 -> txt
for symb in "?-_":
extension = extension.split(symb)[0]
return extension
def _get_extraction_protocol_with_magic_number(f) -> Optional[str]:
"""read the magic number from a file-like object and return the compression protocol"""
# Check if the file object is seekable even before reading the magic number (to avoid https://bugs.python.org/issue26440)
try:
f.seek(0)
except (AttributeError, io.UnsupportedOperation):
return None
magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH)
f.seek(0)
for i in range(MAGIC_NUMBER_MAX_LENGTH):
compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])
if compression is not None:
return compression
compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])
if compression is not None:
raise NotImplementedError(f"Compression protocol '{compression}' not implemented.")
def _get_extraction_protocol(urlpath: str, download_config: Optional[DownloadConfig] = None) -> Optional[str]:
# get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz
urlpath = str(urlpath)
path = urlpath.split("::")[0]
extension = _get_path_extension(path)
if (
extension in BASE_KNOWN_EXTENSIONS
or extension in ["tgz", "tar"]
or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz"))
):
return None
elif extension in COMPRESSION_EXTENSION_TO_PROTOCOL:
return COMPRESSION_EXTENSION_TO_PROTOCOL[extension]
urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
try:
with fsspec.open(urlpath, **(storage_options or {})) as f:
return _get_extraction_protocol_with_magic_number(f)
except FileNotFoundError:
if urlpath.startswith(config.HF_ENDPOINT):
raise FileNotFoundError(
urlpath + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`."
) from None
else:
raise
def _prepare_path_and_storage_options(
urlpath: str, download_config: Optional[DownloadConfig] = None
) -> Tuple[str, Dict[str, Dict[str, Any]]]:
prepared_urlpath = []
prepared_storage_options = {}
for hop in urlpath.split("::"):
hop, storage_options = _prepare_single_hop_path_and_storage_options(hop, download_config=download_config)
prepared_urlpath.append(hop)
prepared_storage_options.update(storage_options)
return "::".join(prepared_urlpath), storage_options
def _prepare_single_hop_path_and_storage_options(
urlpath: str, download_config: Optional[DownloadConfig] = None
) -> Tuple[str, Dict[str, Dict[str, Any]]]:
"""
Prepare the URL and the kwargs that must be passed to the HttpFileSystem or to requests.get/head
In particular it resolves google drive URLs
It also adds the authentication headers for the Hugging Face Hub, for both https:// and hf:// paths.
Storage options are formatted in the form {protocol: storage_options_for_protocol}
"""
token = None if download_config is None else download_config.token
if urlpath.startswith(config.HF_ENDPOINT) and "/resolve/" in urlpath:
urlpath = "hf://" + urlpath[len(config.HF_ENDPOINT) + 1 :].replace("/resolve/", "@", 1)
protocol = urlpath.split("://")[0] if "://" in urlpath else "file"
if download_config is not None and protocol in download_config.storage_options:
storage_options = download_config.storage_options[protocol]
elif download_config is not None and protocol not in download_config.storage_options:
storage_options = {
option_name: option_value
for option_name, option_value in download_config.storage_options.items()
if option_name not in fsspec.available_protocols()
}
else:
storage_options = {}
if storage_options:
storage_options = {protocol: storage_options}
if protocol in ["http", "https"]:
storage_options[protocol] = {
"headers": {
**get_authentication_headers_for_url(urlpath, token=token),
"user-agent": get_datasets_user_agent(),
},
"client_kwargs": {"trust_env": True}, # Enable reading proxy env variables.
**(storage_options.get(protocol, {})),
}
if "drive.google.com" in urlpath:
response = http_head(urlpath)
cookies = None
for k, v in response.cookies.items():
if k.startswith("download_warning"):
urlpath += "&confirm=" + v
cookies = response.cookies
storage_options[protocol] = {"cookies": cookies, **storage_options.get(protocol, {})}
# Fix Google Drive URL to avoid Virus scan warning
if "drive.google.com" in urlpath and "confirm=" not in urlpath:
urlpath += "&confirm=t"
if urlpath.startswith("https://raw.githubusercontent.com/"):
# Workaround for served data with gzip content-encoding: https://github.com/fsspec/filesystem_spec/issues/389
storage_options[protocol]["headers"]["Accept-Encoding"] = "identity"
elif protocol == "hf":
storage_options[protocol] = {
"token": token,
"endpoint": config.HF_ENDPOINT,
**storage_options.get(protocol, {}),
}
return urlpath, storage_options
def xopen(file: str, mode="r", *args, download_config: Optional[DownloadConfig] = None, **kwargs):
"""Extend `open` function to support remote files using `fsspec`.
It also has a retry mechanism in case connection fails.
The `args` and `kwargs` are passed to `fsspec.open`, except `token` which is used for queries to private repos on huggingface.co
Args:
file (`str`): Path name of the file to be opened.
mode (`str`, *optional*, default "r"): Mode in which the file is opened.
*args: Arguments to be passed to `fsspec.open`.
download_config : mainly use token or storage_options to support different platforms and auth types.
**kwargs: Keyword arguments to be passed to `fsspec.open`.
Returns:
file object
"""
# This works as well for `xopen(str(Path(...)))`
file_str = _as_str(file)
main_hop, *rest_hops = file_str.split("::")
if is_local_path(main_hop):
return open(main_hop, mode, *args, **kwargs)
# add headers and cookies for authentication on the HF Hub and for Google Drive
file, storage_options = _prepare_path_and_storage_options(file_str, download_config=download_config)
kwargs = {**kwargs, **(storage_options or {})}
try:
file_obj = fsspec.open(file, mode=mode, *args, **kwargs).open()
except ValueError as e:
if str(e) == "Cannot seek streaming HTTP file":
raise NonStreamableDatasetError(
"Streaming is not possible for this dataset because data host server doesn't support HTTP range "
"requests. You can still load this dataset in non-streaming mode by passing `streaming=False` (default)"
) from e
else:
raise
except FileNotFoundError:
if file.startswith(config.HF_ENDPOINT):
raise FileNotFoundError(
file + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`."
) from None
else:
raise
_add_retries_to_file_obj_read_method(file_obj)
return file_obj
def xlistdir(path: str, download_config: Optional[DownloadConfig] = None) -> List[str]:
"""Extend `os.listdir` function to support remote files.
Args:
path (`str`): URL path.
download_config : mainly use token or storage_options to support different platforms and auth types.
Returns:
`list` of `str`
"""
main_hop, *rest_hops = _as_str(path).split("::")
if is_local_path(main_hop):
return os.listdir(path)
else:
# globbing inside a zip in a private repo requires authentication
path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config)
main_hop, *rest_hops = path.split("::")
fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options)
inner_path = main_hop.split("://")[-1]
if inner_path.strip("/") and not fs.isdir(inner_path):
raise FileNotFoundError(f"Directory doesn't exist: {path}")
objects = fs.listdir(inner_path)
return [os.path.basename(obj["name"]) for obj in objects]
def xglob(urlpath, *, recursive=False, download_config: Optional[DownloadConfig] = None):
"""Extend `glob.glob` function to support remote files.
Args:
urlpath (`str`): URL path with shell-style wildcard patterns.
recursive (`bool`, default `False`): Whether to match the "**" pattern recursively to zero or more
directories or subdirectories.
download_config : mainly use token or storage_options to support different platforms and auth types.
Returns:
`list` of `str`
"""
main_hop, *rest_hops = _as_str(urlpath).split("::")
if is_local_path(main_hop):
return glob.glob(main_hop, recursive=recursive)
else:
# globbing inside a zip in a private repo requires authentication
urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
main_hop, *rest_hops = urlpath.split("::")
fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options)
# - If there's no "*" in the pattern, get_fs_token_paths() doesn't do any pattern matching
# so to be able to glob patterns like "[0-9]", we have to call `fs.glob`.
# - Also "*" in get_fs_token_paths() only matches files: we have to call `fs.glob` to match directories.
# - If there is "**" in the pattern, `fs.glob` must be called anyway.
inner_path = main_hop.split("://")[1]
globbed_paths = fs.glob(inner_path)
protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1]
return ["::".join([f"{protocol}://{globbed_path}"] + rest_hops) for globbed_path in globbed_paths]
def xwalk(urlpath, download_config: Optional[DownloadConfig] = None, **kwargs):
"""Extend `os.walk` function to support remote files.
Args:
urlpath (`str`): URL root path.
download_config : mainly use token or storage_options to support different platforms and auth types.
**kwargs: Additional keyword arguments forwarded to the underlying filesystem.
Yields:
`tuple`: 3-tuple (dirpath, dirnames, filenames).
"""
main_hop, *rest_hops = _as_str(urlpath).split("::")
if is_local_path(main_hop):
yield from os.walk(main_hop, **kwargs)
else:
# walking inside a zip in a private repo requires authentication
urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
main_hop, *rest_hops = urlpath.split("::")
fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options)
inner_path = main_hop.split("://")[-1]
if inner_path.strip("/") and not fs.isdir(inner_path):
return []
protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1]
for dirpath, dirnames, filenames in fs.walk(inner_path, **kwargs):
yield "::".join([f"{protocol}://{dirpath}"] + rest_hops), dirnames, filenames
class xPath(type(Path())):
"""Extension of `pathlib.Path` to support both local paths and remote URLs."""
def __str__(self):
path_str = super().__str__()
main_hop, *rest_hops = path_str.split("::")
if is_local_path(main_hop):
return main_hop
path_as_posix = path_str.replace("\\", "/")
path_as_posix = SINGLE_SLASH_AFTER_PROTOCOL_PATTERN.sub("://", path_as_posix)
path_as_posix += "//" if path_as_posix.endswith(":") else "" # Add slashes to root of the protocol
return path_as_posix
def exists(self, download_config: Optional[DownloadConfig] = None):
"""Extend `pathlib.Path.exists` method to support both local and remote files.
Args:
download_config : mainly use token or storage_options to support different platforms and auth types.
Returns:
`bool`
"""
return xexists(str(self), download_config=download_config)
def glob(self, pattern, download_config: Optional[DownloadConfig] = None):
"""Glob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Args:
pattern (`str`): Pattern that resulting paths must match.
download_config : mainly use token or storage_options to support different platforms and auth types.
Yields:
[`xPath`]
"""
posix_path = self.as_posix()
main_hop, *rest_hops = posix_path.split("::")
if is_local_path(main_hop):
yield from Path(main_hop).glob(pattern)
else:
# globbing inside a zip in a private repo requires authentication
if rest_hops:
urlpath = rest_hops[0]
urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
storage_options = {urlpath.split("://")[0]: storage_options}
posix_path = "::".join([main_hop, urlpath, *rest_hops[1:]])
else:
storage_options = None
fs, *_ = fsspec.get_fs_token_paths(xjoin(posix_path, pattern), storage_options=storage_options)
# - If there's no "*" in the pattern, get_fs_token_paths() doesn't do any pattern matching
# so to be able to glob patterns like "[0-9]", we have to call `fs.glob`.
# - Also "*" in get_fs_token_paths() only matches files: we have to call `fs.glob` to match directories.
# - If there is "**" in the pattern, `fs.glob` must be called anyway.
globbed_paths = fs.glob(xjoin(main_hop, pattern))
for globbed_path in globbed_paths:
yield type(self)("::".join([f"{fs.protocol}://{globbed_path}"] + rest_hops))
def rglob(self, pattern, **kwargs):
"""Rglob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Args:
pattern (`str`): Pattern that resulting paths must match.
Yields:
[`xPath`]
"""
return self.glob("**/" + pattern, **kwargs)
@property
def parent(self) -> "xPath":
"""Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Returns:
[`xPath`]
"""
return type(self)(xdirname(self.as_posix()))
@property
def name(self) -> str:
"""Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Returns:
`str`
"""
return PurePosixPath(self.as_posix().split("::")[0]).name
@property
def stem(self) -> str:
"""Stem function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Returns:
`str`
"""
return PurePosixPath(self.as_posix().split("::")[0]).stem
@property
def suffix(self) -> str:
"""Suffix function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Returns:
`str`
"""
return PurePosixPath(self.as_posix().split("::")[0]).suffix
def open(self, *args, **kwargs):
"""Extend :func:`xopen` to support argument of type :obj:`~pathlib.Path`.
Args:
**args: Arguments passed to :func:`fsspec.open`.
**kwargs: Keyword arguments passed to :func:`fsspec.open`.
Returns:
`io.FileIO`: File-like object.
"""
return xopen(str(self), *args, **kwargs)
def joinpath(self, *p: Tuple[str, ...]) -> "xPath":
"""Extend :func:`xjoin` to support argument of type :obj:`~pathlib.Path`.
Args:
*p (`tuple` of `str`): Other path components.
Returns:
[`xPath`]
"""
return type(self)(xjoin(self.as_posix(), *p))
def __truediv__(self, p: str) -> "xPath":
return self.joinpath(p)
def with_suffix(self, suffix):
main_hop, *rest_hops = str(self).split("::")
if is_local_path(main_hop):
return type(self)(str(super().with_suffix(suffix)))
return type(self)("::".join([type(self)(PurePosixPath(main_hop).with_suffix(suffix)).as_posix()] + rest_hops))
def _as_str(path: Union[str, Path, xPath]):
return str(path) if isinstance(path, xPath) else str(xPath(str(path)))
def xgzip_open(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs):
import gzip
if hasattr(filepath_or_buffer, "read"):
return gzip.open(filepath_or_buffer, *args, **kwargs)
else:
filepath_or_buffer = str(filepath_or_buffer)
return gzip.open(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs)
def xnumpy_load(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs):
import numpy as np
if hasattr(filepath_or_buffer, "read"):
return np.load(filepath_or_buffer, *args, **kwargs)
else:
filepath_or_buffer = str(filepath_or_buffer)
return np.load(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs)
def xpandas_read_csv(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs):
import pandas as pd
if hasattr(filepath_or_buffer, "read"):
return pd.read_csv(filepath_or_buffer, **kwargs)
else:
filepath_or_buffer = str(filepath_or_buffer)
if kwargs.get("compression", "infer") == "infer":
kwargs["compression"] = _get_extraction_protocol(filepath_or_buffer, download_config=download_config)
return pd.read_csv(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs)
def xpandas_read_excel(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs):
import pandas as pd
if hasattr(filepath_or_buffer, "read"):
try:
return pd.read_excel(filepath_or_buffer, **kwargs)
except ValueError: # Cannot seek streaming HTTP file
return pd.read_excel(BytesIO(filepath_or_buffer.read()), **kwargs)
else:
filepath_or_buffer = str(filepath_or_buffer)
try:
return pd.read_excel(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs)
except ValueError: # Cannot seek streaming HTTP file
return pd.read_excel(
BytesIO(xopen(filepath_or_buffer, "rb", download_config=download_config).read()), **kwargs
)
def xpyarrow_parquet_read_table(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs):
import pyarrow.parquet as pq
if hasattr(filepath_or_buffer, "read"):
return pq.read_table(filepath_or_buffer, **kwargs)
else:
filepath_or_buffer = str(filepath_or_buffer)
return pq.read_table(xopen(filepath_or_buffer, mode="rb", download_config=download_config), **kwargs)
def xsio_loadmat(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs):
import scipy.io as sio
if hasattr(filepath_or_buffer, "read"):
return sio.loadmat(filepath_or_buffer, **kwargs)
else:
return sio.loadmat(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs)
def xet_parse(source, parser=None, download_config: Optional[DownloadConfig] = None):
"""Extend `xml.etree.ElementTree.parse` function to support remote files.
Args:
source: File path or file object.
parser (`XMLParser`, *optional*, default `XMLParser`): Parser instance.
download_config : mainly use token or storage_options to support different platforms and auth types.
Returns:
`xml.etree.ElementTree.Element`: Root element of the given source document.
"""
if hasattr(source, "read"):
return ET.parse(source, parser=parser)
else:
with xopen(source, "rb", download_config=download_config) as f:
return ET.parse(f, parser=parser)
def xxml_dom_minidom_parse(filename_or_file, download_config: Optional[DownloadConfig] = None, **kwargs):
"""Extend `xml.dom.minidom.parse` function to support remote files.
Args:
filename_or_file (`str` or file): File path or file object.
download_config : mainly use token or storage_options to support different platforms and auth types.
**kwargs (optional): Additional keyword arguments passed to `xml.dom.minidom.parse`.
Returns:
:obj:`xml.dom.minidom.Document`: Parsed document.
"""
if hasattr(filename_or_file, "read"):
return xml.dom.minidom.parse(filename_or_file, **kwargs)
else:
with xopen(filename_or_file, "rb", download_config=download_config) as f:
return xml.dom.minidom.parse(f, **kwargs)
class _IterableFromGenerator(Iterable):
"""Utility class to create an iterable from a generator function, in order to reset the generator when needed."""
def __init__(self, generator: Callable, *args, **kwargs):
self.generator = generator
self.args = args
self.kwargs = kwargs
def __iter__(self):
yield from self.generator(*self.args, **self.kwargs)
class ArchiveIterable(_IterableFromGenerator):
"""An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`"""
@staticmethod
def _iter_tar(f):
stream = tarfile.open(fileobj=f, mode="r|*")
for tarinfo in stream:
file_path = tarinfo.name
if not tarinfo.isreg():
continue
if file_path is None:
continue
if os.path.basename(file_path).startswith((".", "__")):
# skipping hidden files
continue
file_obj = stream.extractfile(tarinfo)
yield file_path, file_obj
stream.members = []
del stream
@staticmethod
def _iter_zip(f):
zipf = zipfile.ZipFile(f)
for member in zipf.infolist():
file_path = member.filename
if member.is_dir():
continue
if file_path is None:
continue
if os.path.basename(file_path).startswith((".", "__")):
# skipping hidden files
continue
file_obj = zipf.open(member)
yield file_path, file_obj
@classmethod
def _iter_from_fileobj(cls, f) -> Generator[Tuple, None, None]:
compression = _get_extraction_protocol_with_magic_number(f)
if compression == "zip":
yield from cls._iter_zip(f)
else:
yield from cls._iter_tar(f)
@classmethod
def _iter_from_urlpath(
cls, urlpath: str, download_config: Optional[DownloadConfig] = None
) -> Generator[Tuple, None, None]:
compression = _get_extraction_protocol(urlpath, download_config=download_config)
with xopen(urlpath, "rb", download_config=download_config) as f:
if compression == "zip":
yield from cls._iter_zip(f)
else:
yield from cls._iter_tar(f)
@classmethod
def from_buf(cls, fileobj) -> "ArchiveIterable":
return cls(cls._iter_from_fileobj, fileobj)
@classmethod
def from_urlpath(cls, urlpath_or_buf, download_config: Optional[DownloadConfig] = None) -> "ArchiveIterable":
return cls(cls._iter_from_urlpath, urlpath_or_buf, download_config)
class FilesIterable(_IterableFromGenerator):
"""An iterable of paths from a list of directories or files"""
@classmethod
def _iter_from_urlpaths(
cls, urlpaths: Union[str, List[str]], download_config: Optional[DownloadConfig] = None
) -> Generator[str, None, None]:
if not isinstance(urlpaths, list):
urlpaths = [urlpaths]
for urlpath in urlpaths:
if xisfile(urlpath, download_config=download_config):
yield urlpath
elif xisdir(urlpath, download_config=download_config):
for dirpath, dirnames, filenames in xwalk(urlpath, download_config=download_config):
# in-place modification to prune the search
dirnames[:] = sorted([dirname for dirname in dirnames if not dirname.startswith((".", "__"))])
if xbasename(dirpath).startswith((".", "__")):
# skipping hidden directories
continue
for filename in sorted(filenames):
if filename.startswith((".", "__")):
# skipping hidden files
continue
yield xjoin(dirpath, filename)
else:
raise FileNotFoundError(urlpath)
@classmethod
def from_urlpaths(cls, urlpaths, download_config: Optional[DownloadConfig] = None) -> "FilesIterable":
return cls(cls._iter_from_urlpaths, urlpaths, download_config)
class StreamingDownloadManager:
"""
Download manager that uses the "::" separator to navigate through (possibly remote) compressed archives.
Contrary to the regular `DownloadManager`, the `download` and `extract` methods don't actually download nor extract
data, but they rather return the path or url that could be opened using the `xopen` function which extends the
built-in `open` function to stream data from remote files.
"""
is_streaming = True
def __init__(
self,
dataset_name: Optional[str] = None,
data_dir: Optional[str] = None,
download_config: Optional[DownloadConfig] = None,
base_path: Optional[str] = None,
):
self._dataset_name = dataset_name
self._data_dir = data_dir
self._base_path = base_path or os.path.abspath(".")
self.download_config = download_config or DownloadConfig()
@property
def manual_dir(self):
return self._data_dir
def download(self, url_or_urls):
"""Normalize URL(s) of files to stream data from.
This is the lazy version of `DownloadManager.download` for streaming.
Args:
url_or_urls (`str` or `list` or `dict`):
URL(s) of files to stream data from. Each url is a `str`.
Returns:
url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input url_or_urls.
Example:
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
```
"""
url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True)
return url_or_urls
def _download(self, urlpath: str) -> str:
urlpath = str(urlpath)
if is_relative_path(urlpath):
# append the relative path to the base_path
urlpath = url_or_path_join(self._base_path, urlpath)
return urlpath
def extract(self, url_or_urls):
"""Add extraction protocol for given url(s) for streaming.
This is the lazy version of `DownloadManager.extract` for streaming.
Args:
url_or_urls (`str` or `list` or `dict`):
URL(s) of files to stream data from. Each url is a `str`.
Returns:
url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`.
Example:
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> extracted_files = dl_manager.extract(downloaded_files)
```
"""
urlpaths = map_nested(self._extract, url_or_urls, map_tuple=True)
return urlpaths
def _extract(self, urlpath: str) -> str:
urlpath = str(urlpath)
protocol = _get_extraction_protocol(urlpath, download_config=self.download_config)
# get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz
path = urlpath.split("::")[0]
extension = _get_path_extension(path)
if extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")):
raise NotImplementedError(
f"Extraction protocol for TAR archives like '{urlpath}' is not implemented in streaming mode. "
f"Please use `dl_manager.iter_archive` instead.\n\n"
f"Example usage:\n\n"
f"\turl = dl_manager.download(url)\n"
f"\ttar_archive_iterator = dl_manager.iter_archive(url)\n\n"
f"\tfor filename, file in tar_archive_iterator:\n"
f"\t\t..."
)
if protocol is None:
# no extraction
return urlpath
elif protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS:
# there is one single file which is the uncompressed file
inner_file = os.path.basename(urlpath.split("::")[0])
inner_file = inner_file[: inner_file.rindex(".")] if "." in inner_file else inner_file
return f"{protocol}://{inner_file}::{urlpath}"
else:
return f"{protocol}://::{urlpath}"
def download_and_extract(self, url_or_urls):
"""Prepare given `url_or_urls` for streaming (add extraction protocol).
This is the lazy version of `DownloadManager.download_and_extract` for streaming.
Is equivalent to:
```
urls = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls (`str` or `list` or `dict`):
URL(s) to stream from data from. Each url is a `str`.
Returns:
url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`.
"""
return self.extract(self.download(url_or_urls))
def iter_archive(self, urlpath_or_buf: Union[str, io.BufferedReader]) -> Iterable[Tuple]:
"""Iterate over files within an archive.
Args:
urlpath_or_buf (`str` or `io.BufferedReader`):
Archive path or archive binary file object.
Yields:
`tuple[str, io.BufferedReader]`:
2-tuple (path_within_archive, file_object).
File object is opened in binary mode.
Example:
```py
>>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> files = dl_manager.iter_archive(archive)
```
"""
if hasattr(urlpath_or_buf, "read"):
return ArchiveIterable.from_buf(urlpath_or_buf)
else:
return ArchiveIterable.from_urlpath(urlpath_or_buf, download_config=self.download_config)
def iter_files(self, urlpaths: Union[str, List[str]]) -> Iterable[str]:
"""Iterate over files.
Args:
urlpaths (`str` or `list` of `str`):
Root paths.
Yields:
str: File URL path.
Example:
```py
>>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip')
>>> files = dl_manager.iter_files(files)
```
"""
return FilesIterable.from_urlpaths(urlpaths, download_config=self.download_config)
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/download/__init__.py | __all__ = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/download/mock_download_manager.py | # Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Mock download manager interface."""
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
logger = get_logger(__name__)
class MockDownloadManager:
dummy_file_name = "dummy_data"
datasets_scripts_dir = "datasets"
is_streaming = False
def __init__(
self,
dataset_name: str,
config: str,
version: Union[Version, str],
cache_dir: Optional[str] = None,
use_local_dummy_data: bool = False,
load_existing_dummy_data: bool = True,
download_callbacks: Optional[List[Callable]] = None,
):
self.downloaded_size = 0
self.dataset_name = dataset_name
self.cache_dir = cache_dir
self.use_local_dummy_data = use_local_dummy_data
self.config = config
# download_callbacks take a single url as input
self.download_callbacks: List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
self.load_existing_dummy_data = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
self.version_name = str(version)
# to be downloaded
self._dummy_file = None
self._bucket_url = None
@property
def dummy_file(self):
if self._dummy_file is None:
self._dummy_file = self.download_dummy_data()
return self._dummy_file
@property
def dummy_data_folder(self):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy", self.config.name, self.version_name)
# structure is dummy / version_name
return os.path.join("dummy", self.version_name)
@property
def dummy_zip_file(self):
return os.path.join(self.dummy_data_folder, "dummy_data.zip")
def download_dummy_data(self):
path_to_dummy_data_dir = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
local_path = cached_path(
path_to_dummy_data_dir, cache_dir=self.cache_dir, extract_compressed_file=True, force_extract=True
)
return os.path.join(local_path, self.dummy_file_name)
@property
def local_path_to_dummy_data(self):
return os.path.join(self.datasets_scripts_dir, self.dataset_name, self.dummy_zip_file)
@property
def github_path_to_dummy_data(self):
if self._bucket_url is None:
self._bucket_url = hf_github_url(self.dataset_name, self.dummy_zip_file.replace(os.sep, "/"))
return self._bucket_url
@property
def manual_dir(self):
# return full path if its a dir
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep, "/").split("/")[:-1])
# this function has to be in the manager under this name so that testing works
def download_and_extract(self, data_url, *args):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
dummy_file = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
dummy_file = self.dummy_file_name
# special case when data_url is a dict
if isinstance(data_url, dict):
return self.create_dummy_data_dict(dummy_file, data_url)
elif isinstance(data_url, (list, tuple)):
return self.create_dummy_data_list(dummy_file, data_url)
else:
return self.create_dummy_data_single(dummy_file, data_url)
# this function has to be in the manager under this name so that testing works
def download(self, data_url, *args):
return self.download_and_extract(data_url)
# this function has to be in the manager under this name so that testing works
def download_custom(self, data_url, custom_download):
return self.download_and_extract(data_url)
# this function has to be in the manager under this name so that testing works
def extract(self, path, *args, **kwargs):
return path
# this function has to be in the manager under this name so that testing works
def get_recorded_sizes_checksums(self):
return {}
def create_dummy_data_dict(self, path_to_dummy_data, data_url):
dummy_data_dict = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(single_urls, list):
for single_url in single_urls:
download_callback(single_url)
else:
single_url = single_urls
download_callback(single_url)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(single_urls, list):
value = [os.path.join(path_to_dummy_data, urllib.parse.quote_plus(Path(x).name)) for x in single_urls]
else:
single_url = single_urls
value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(Path(single_url).name))
dummy_data_dict[key] = value
# make sure that values are unique
if all(isinstance(i, str) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()
):
# append key to value to make its name unique
dummy_data_dict = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def create_dummy_data_list(self, path_to_dummy_data, data_url):
dummy_data_list = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
is_tf_records = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}", url)) for url in data_url)
is_pubmed_records = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed") for url in data_url
)
if data_url and (is_tf_records or is_pubmed_records):
data_url = [data_url[0]] * len(data_url)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(single_url)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(single_url.split("/")[-1]))
dummy_data_list.append(value)
return dummy_data_list
def create_dummy_data_single(self, path_to_dummy_data, data_url):
for download_callback in self.download_callbacks:
download_callback(data_url)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(data_url.split("/")[-1]))
if os.path.exists(value) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def delete_extracted_files(self):
pass
def manage_extracted_files(self):
pass
def iter_archive(self, path):
def _iter_archive_members(path):
# this preserves the order of the members inside the ZIP archive
dummy_parent_path = Path(self.dummy_file).parent
relative_path = path.relative_to(dummy_parent_path)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
members = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(member)
path = Path(path)
file_paths = _iter_archive_members(path) if self.use_local_dummy_data else path.rglob("*")
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__")):
yield file_path.relative_to(path).as_posix(), file_path.open("rb")
def iter_files(self, paths):
if not isinstance(paths, list):
paths = [paths]
for path in paths:
if os.path.isfile(path):
yield path
else:
for dirpath, dirnames, filenames in os.walk(path):
if os.path.basename(dirpath).startswith((".", "__")):
continue
dirnames.sort()
for filename in sorted(filenames):
if filename.startswith((".", "__")):
continue
yield os.path.join(dirpath, filename)
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/commands/test.py | import logging
import os
from argparse import ArgumentParser
from pathlib import Path
from shutil import copyfile, rmtree
from typing import Generator
import datasets.config
from datasets.builder import DatasetBuilder
from datasets.commands import BaseDatasetsCLICommand
from datasets.download.download_manager import DownloadMode
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.info_utils import VerificationMode
from datasets.utils.logging import ERROR, get_logger
logger = get_logger(__name__)
def _test_command_factory(args):
return TestCommand(
args.dataset,
args.name,
args.cache_dir,
args.data_dir,
args.all_configs,
args.save_info or args.save_infos,
args.ignore_verifications,
args.force_redownload,
args.clear_cache,
)
class TestCommand(BaseDatasetsCLICommand):
__test__ = False # to tell pytest it's not a test class
@staticmethod
def register_subcommand(parser: ArgumentParser):
test_parser = parser.add_parser("test", help="Test dataset implementation.")
test_parser.add_argument("--name", type=str, default=None, help="Dataset processing name")
test_parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="Cache directory where the datasets are stored.",
)
test_parser.add_argument(
"--data_dir",
type=str,
default=None,
help="Can be used to specify a manual directory to get the files from.",
)
test_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations")
test_parser.add_argument(
"--save_info", action="store_true", help="Save the dataset infos in the dataset card (README.md)"
)
test_parser.add_argument(
"--ignore_verifications",
action="store_true",
help="Run the test without checksums and splits checks.",
)
test_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload")
test_parser.add_argument(
"--clear_cache",
action="store_true",
help="Remove downloaded files and cached datasets after each config test",
)
# aliases
test_parser.add_argument("--save_infos", action="store_true", help="alias to save_info")
test_parser.add_argument("dataset", type=str, help="Name of the dataset to download")
test_parser.set_defaults(func=_test_command_factory)
def __init__(
self,
dataset: str,
name: str,
cache_dir: str,
data_dir: str,
all_configs: bool,
save_infos: bool,
ignore_verifications: bool,
force_redownload: bool,
clear_cache: bool,
):
self._dataset = dataset
self._name = name
self._cache_dir = cache_dir
self._data_dir = data_dir
self._all_configs = all_configs
self._save_infos = save_infos
self._ignore_verifications = ignore_verifications
self._force_redownload = force_redownload
self._clear_cache = clear_cache
if clear_cache and not cache_dir:
print(
"When --clear_cache is used, specifying a cache directory is mandatory.\n"
"The 'download' folder of the cache directory and the dataset builder cache will be deleted after each configuration test.\n"
"Please provide a --cache_dir that will be used to test the dataset script."
)
exit(1)
if save_infos:
self._ignore_verifications = True
def run(self):
logging.getLogger("filelock").setLevel(ERROR)
if self._name is not None and self._all_configs:
print("Both parameters `config` and `all_configs` can't be used at once.")
exit(1)
path, config_name = self._dataset, self._name
module = dataset_module_factory(path)
builder_cls = import_main_class(module.module_path)
n_builders = len(builder_cls.BUILDER_CONFIGS) if self._all_configs and builder_cls.BUILDER_CONFIGS else 1
def get_builders() -> Generator[DatasetBuilder, None, None]:
if self._all_configs and builder_cls.BUILDER_CONFIGS:
for i, config in enumerate(builder_cls.BUILDER_CONFIGS):
if "config_name" in module.builder_kwargs:
yield builder_cls(
cache_dir=self._cache_dir,
data_dir=self._data_dir,
**module.builder_kwargs,
)
else:
yield builder_cls(
config_name=config.name,
cache_dir=self._cache_dir,
data_dir=self._data_dir,
**module.builder_kwargs,
)
else:
if "config_name" in module.builder_kwargs:
yield builder_cls(cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs)
else:
yield builder_cls(
config_name=config_name,
cache_dir=self._cache_dir,
data_dir=self._data_dir,
**module.builder_kwargs,
)
for j, builder in enumerate(get_builders()):
print(f"Testing builder '{builder.config.name}' ({j + 1}/{n_builders})")
builder._record_infos = os.path.exists(
os.path.join(builder.get_imported_module_dir(), datasets.config.DATASETDICT_INFOS_FILENAME)
) # record checksums only if we need to update a (deprecated) dataset_infos.json
builder.download_and_prepare(
download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS
if not self._force_redownload
else DownloadMode.FORCE_REDOWNLOAD,
verification_mode=VerificationMode.NO_CHECKS
if self._ignore_verifications
else VerificationMode.ALL_CHECKS,
try_from_hf_gcs=False,
)
builder.as_dataset()
if self._save_infos:
builder._save_infos()
# If save_infos=True, the dataset card (README.md) is created next to the loaded module file.
# The dataset_infos are saved in the YAML part of the README.md
# Let's move it to the original directory of the dataset script, to allow the user to
# upload them on S3 at the same time afterwards.
if self._save_infos:
dataset_readme_path = os.path.join(
builder_cls.get_imported_module_dir(), datasets.config.REPOCARD_FILENAME
)
name = Path(path).name + ".py"
combined_path = os.path.join(path, name)
if os.path.isfile(path):
dataset_dir = os.path.dirname(path)
elif os.path.isfile(combined_path):
dataset_dir = path
elif os.path.isdir(path): # for local directories containing only data files
dataset_dir = path
else: # in case of a remote dataset
dataset_dir = None
print(f"Dataset card saved at {dataset_readme_path}")
# Move dataset_info back to the user
if dataset_dir is not None:
user_dataset_readme_path = os.path.join(dataset_dir, datasets.config.REPOCARD_FILENAME)
copyfile(dataset_readme_path, user_dataset_readme_path)
print(f"Dataset card saved at {user_dataset_readme_path}")
# If clear_cache=True, the download folder and the dataset builder cache directory are deleted
if self._clear_cache:
if os.path.isdir(builder._cache_dir):
logger.warning(f"Clearing cache at {builder._cache_dir}")
rmtree(builder._cache_dir)
download_dir = os.path.join(self._cache_dir, datasets.config.DOWNLOADED_DATASETS_DIR)
if os.path.isdir(download_dir):
logger.warning(f"Clearing cache at {download_dir}")
rmtree(download_dir)
print("Test successful.")
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/commands/dummy_data.py | import fnmatch
import json
import os
import shutil
import tempfile
import xml.etree.ElementTree as ET
from argparse import ArgumentParser
from pathlib import Path
from typing import Optional
from datasets import config
from datasets.commands import BaseDatasetsCLICommand
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.download.mock_download_manager import MockDownloadManager
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.deprecation_utils import deprecated
from datasets.utils.logging import get_logger, set_verbosity_warning
from datasets.utils.py_utils import map_nested
logger = get_logger(__name__)
DEFAULT_ENCODING = "utf-8"
def dummy_data_command_factory(args):
return DummyDataCommand(
args.path_to_dataset,
args.auto_generate,
args.n_lines,
args.json_field,
args.xml_tag,
args.match_text_files,
args.keep_uncompressed,
args.cache_dir,
args.encoding,
)
class DummyDataGeneratorDownloadManager(DownloadManager):
def __init__(self, mock_download_manager, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mock_download_manager = mock_download_manager
self.downloaded_dummy_paths = []
self.expected_dummy_paths = []
def download(self, url_or_urls):
output = super().download(url_or_urls)
dummy_output = self.mock_download_manager.download(url_or_urls)
map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True)
map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True)
return output
def download_and_extract(self, url_or_urls):
output = super().extract(super().download(url_or_urls))
dummy_output = self.mock_download_manager.download(url_or_urls)
map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True)
map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True)
return output
def auto_generate_dummy_data_folder(
self,
n_lines: int = 5,
json_field: Optional[str] = None,
xml_tag: Optional[str] = None,
match_text_files: Optional[str] = None,
encoding: Optional[str] = None,
) -> bool:
os.makedirs(
os.path.join(
self.mock_download_manager.datasets_scripts_dir,
self.mock_download_manager.dataset_name,
self.mock_download_manager.dummy_data_folder,
"dummy_data",
),
exist_ok=True,
)
total = 0
self.mock_download_manager.load_existing_dummy_data = False
for src_path, relative_dst_path in zip(self.downloaded_dummy_paths, self.expected_dummy_paths):
dst_path = os.path.join(
self.mock_download_manager.datasets_scripts_dir,
self.mock_download_manager.dataset_name,
self.mock_download_manager.dummy_data_folder,
relative_dst_path,
)
total += self._create_dummy_data(
src_path,
dst_path,
n_lines=n_lines,
json_field=json_field,
xml_tag=xml_tag,
match_text_files=match_text_files,
encoding=encoding,
)
if total == 0:
logger.error(
"Dummy data generation failed: no dummy files were created. "
"Make sure the data files format is supported by the auto-generation."
)
return total > 0
def _create_dummy_data(
self,
src_path: str,
dst_path: str,
n_lines: int,
json_field: Optional[str] = None,
xml_tag: Optional[str] = None,
match_text_files: Optional[str] = None,
encoding: Optional[str] = None,
) -> int:
encoding = encoding or DEFAULT_ENCODING
if os.path.isfile(src_path):
logger.debug(f"Trying to generate dummy data file {dst_path}")
dst_path_extensions = Path(dst_path).suffixes
line_by_line_extensions = [".txt", ".csv", ".jsonl", ".tsv"]
is_line_by_line_text_file = any(extension in dst_path_extensions for extension in line_by_line_extensions)
if match_text_files is not None:
file_name = os.path.basename(dst_path)
for pattern in match_text_files.split(","):
is_line_by_line_text_file |= fnmatch.fnmatch(file_name, pattern)
# Line by line text file (txt, csv etc.)
if is_line_by_line_text_file:
Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
with open(src_path, encoding=encoding) as src_file:
with open(dst_path, "w", encoding=encoding) as dst_file:
first_lines = []
for i, line in enumerate(src_file):
if i >= n_lines:
break
first_lines.append(line)
dst_file.write("".join(first_lines).strip())
return 1
# json file
elif ".json" in dst_path_extensions:
with open(src_path, encoding=encoding) as src_file:
json_data = json.load(src_file)
if json_field is not None:
json_data = json_data[json_field]
if isinstance(json_data, dict):
if not all(isinstance(v, list) for v in json_data.values()):
raise ValueError(
f"Couldn't parse columns {list(json_data.keys())}. "
"Maybe specify which json field must be used "
"to read the data with --json_field <my_field>."
)
first_json_data = {k: v[:n_lines] for k, v in json_data.items()}
else:
first_json_data = json_data[:n_lines]
if json_field is not None:
first_json_data = {json_field: first_json_data}
Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
with open(dst_path, "w", encoding=encoding) as dst_file:
json.dump(first_json_data, dst_file)
return 1
# xml file
elif any(extension in dst_path_extensions for extension in [".xml", ".txm"]):
if xml_tag is None:
logger.warning("Found xml file but 'xml_tag' is set to None. Please provide --xml_tag")
else:
self._create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=n_lines, encoding=encoding)
return 1
logger.warning(
f"Couldn't generate dummy file '{dst_path}'. " "Ignore that if this file is not useful for dummy data."
)
return 0
# directory, iterate through all files
elif os.path.isdir(src_path):
total = 0
for path, _, files in os.walk(src_path):
for name in files:
if not name.startswith("."): # ignore files like .DS_Store etc.
src_file_path = os.path.join(path, name)
dst_file_path = os.path.join(dst_path, Path(src_file_path).relative_to(src_path))
total += self._create_dummy_data(
src_file_path,
dst_file_path,
n_lines=n_lines,
json_field=json_field,
xml_tag=xml_tag,
match_text_files=match_text_files,
encoding=encoding,
)
return total
@staticmethod
def _create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=5, encoding=DEFAULT_ENCODING):
Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
with open(src_path, encoding=encoding) as src_file:
n_line = 0
parents = []
for event, elem in ET.iterparse(src_file, events=("start", "end")):
if event == "start":
parents.append(elem)
else:
_ = parents.pop()
if elem.tag == xml_tag:
if n_line < n_lines:
n_line += 1
else:
if parents:
parents[-1].remove(elem)
ET.ElementTree(element=elem).write(dst_path, encoding=encoding)
def compress_autogenerated_dummy_data(self, path_to_dataset):
root_dir = os.path.join(path_to_dataset, self.mock_download_manager.dummy_data_folder)
base_name = os.path.join(root_dir, "dummy_data")
base_dir = "dummy_data"
logger.info(f"Compressing dummy data folder to '{base_name}.zip'")
shutil.make_archive(base_name, "zip", root_dir, base_dir)
shutil.rmtree(base_name)
@deprecated(
"The `datasets` repository does not host the dataset scripts anymore. Therefore, dummy data is no longer needed to test their loading with CI."
)
class DummyDataCommand(BaseDatasetsCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
test_parser = parser.add_parser("dummy_data", help="Generate dummy data.")
test_parser.add_argument("--auto_generate", action="store_true", help="Automatically generate dummy data")
test_parser.add_argument(
"--n_lines", type=int, default=5, help="Number of lines or samples to keep when auto-generating dummy data"
)
test_parser.add_argument(
"--json_field",
type=str,
default=None,
help="Optional, json field to read the data from when auto-generating dummy data. In the json data files, this field must point to a list of samples as json objects (ex: the 'data' field for squad-like files)",
)
test_parser.add_argument(
"--xml_tag",
type=str,
default=None,
help="Optional, xml tag name of the samples inside the xml files when auto-generating dummy data.",
)
test_parser.add_argument(
"--match_text_files",
type=str,
default=None,
help="Optional, a comma separated list of file patterns that looks for line-by-line text files other than *.txt or *.csv. Example: --match_text_files *.label",
)
test_parser.add_argument(
"--keep_uncompressed",
action="store_true",
help="Whether to leave the dummy data folders uncompressed when auto-generating dummy data. Useful for debugging for to do manual adjustements before compressing.",
)
test_parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="Cache directory to download and cache files when auto-generating dummy data",
)
test_parser.add_argument(
"--encoding",
type=str,
default=None,
help=f"Encoding to use when auto-generating dummy data. Defaults to {DEFAULT_ENCODING}",
)
test_parser.add_argument("path_to_dataset", type=str, help="Path to the dataset (example: ./datasets/squad)")
test_parser.set_defaults(func=dummy_data_command_factory)
def __init__(
self,
path_to_dataset: str,
auto_generate: bool,
n_lines: int,
json_field: Optional[str],
xml_tag: Optional[str],
match_text_files: Optional[str],
keep_uncompressed: bool,
cache_dir: Optional[str],
encoding: Optional[str],
):
self._path_to_dataset = path_to_dataset
if os.path.isdir(path_to_dataset):
self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-1]
else:
self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-2]
cache_dir = os.path.expanduser(cache_dir or config.HF_DATASETS_CACHE)
self._auto_generate = auto_generate
self._n_lines = n_lines
self._json_field = json_field
self._xml_tag = xml_tag
self._match_text_files = match_text_files
self._keep_uncompressed = keep_uncompressed
self._cache_dir = cache_dir
self._encoding = encoding
def run(self):
set_verbosity_warning()
dataset_module = dataset_module_factory(self._path_to_dataset)
builder_cls = import_main_class(dataset_module.module_path)
# use `None` as config if no configs
builder_configs = builder_cls.BUILDER_CONFIGS or [None]
auto_generate_results = []
with tempfile.TemporaryDirectory() as tmp_dir:
for builder_config in builder_configs:
config_name = builder_config.name if builder_config else None
dataset_builder = builder_cls(config_name=config_name, hash=dataset_module.hash, cache_dir=tmp_dir)
version = builder_config.version if builder_config else dataset_builder.config.version
mock_dl_manager = MockDownloadManager(
dataset_name=self._dataset_name,
config=builder_config,
version=version,
use_local_dummy_data=True,
load_existing_dummy_data=False,
)
if self._auto_generate:
auto_generate_results.append(
self._autogenerate_dummy_data(
dataset_builder=dataset_builder,
mock_dl_manager=mock_dl_manager,
keep_uncompressed=self._keep_uncompressed,
)
)
else:
self._print_dummy_data_instructions(
dataset_builder=dataset_builder, mock_dl_manager=mock_dl_manager
)
if self._auto_generate and not self._keep_uncompressed:
if all(auto_generate_results):
print(f"Automatic dummy data generation succeeded for all configs of '{self._path_to_dataset}'")
else:
print(f"Automatic dummy data generation failed for some configs of '{self._path_to_dataset}'")
def _autogenerate_dummy_data(self, dataset_builder, mock_dl_manager, keep_uncompressed) -> Optional[bool]:
dl_cache_dir = (
os.path.join(self._cache_dir, config.DOWNLOADED_DATASETS_DIR)
if self._cache_dir
else config.DOWNLOADED_DATASETS_PATH
)
download_config = DownloadConfig(cache_dir=dl_cache_dir)
dl_manager = DummyDataGeneratorDownloadManager(
dataset_name=self._dataset_name, mock_download_manager=mock_dl_manager, download_config=download_config
)
dataset_builder._split_generators(dl_manager)
mock_dl_manager.load_existing_dummy_data = False # don't use real dummy data
dl_manager.auto_generate_dummy_data_folder(
n_lines=self._n_lines,
json_field=self._json_field,
xml_tag=self._xml_tag,
match_text_files=self._match_text_files,
encoding=self._encoding,
)
if not keep_uncompressed:
path_do_dataset = os.path.join(mock_dl_manager.datasets_scripts_dir, mock_dl_manager.dataset_name)
dl_manager.compress_autogenerated_dummy_data(path_do_dataset)
# now test that the dummy_data.zip file actually works
mock_dl_manager.load_existing_dummy_data = True # use real dummy data
n_examples_per_split = {}
os.makedirs(dataset_builder._cache_dir, exist_ok=True)
try:
split_generators = dataset_builder._split_generators(mock_dl_manager)
for split_generator in split_generators:
dataset_builder._prepare_split(split_generator, check_duplicate_keys=False)
n_examples_per_split[split_generator.name] = split_generator.split_info.num_examples
except OSError as e:
logger.error(
f"Failed to load dummy data for config '{dataset_builder.config.name}''.\nOriginal error:\n"
+ str(e)
)
return False
else:
if all(n_examples > 0 for n_examples in n_examples_per_split.values()):
logger.warning(
f"Dummy data generation done and dummy data test succeeded for config '{dataset_builder.config.name}''."
)
return True
else:
empty_splits = [
split_name for split_name in n_examples_per_split if n_examples_per_split[split_name] == 0
]
logger.warning(
f"Dummy data generation done but dummy data test failed since splits {empty_splits} have 0 examples for config '{dataset_builder.config.name}''."
)
return False
else:
generated_dummy_data_dir = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder)
logger.info(
f"Dummy data generated in directory '{generated_dummy_data_dir}' but kept uncompressed. "
"Please compress this directory into a zip file to use it for dummy data tests."
)
def _print_dummy_data_instructions(self, dataset_builder, mock_dl_manager):
dummy_data_folder = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder)
logger.info(f"Creating dummy folder structure for {dummy_data_folder}... ")
os.makedirs(dummy_data_folder, exist_ok=True)
try:
generator_splits = dataset_builder._split_generators(mock_dl_manager)
except FileNotFoundError as e:
print(
f"Dataset {self._dataset_name} with config {mock_dl_manager.config} seems to already open files in the method `_split_generators(...)`. You might consider to instead only open files in the method `_generate_examples(...)` instead. If this is not possible the dummy data has to be created with less guidance. Make sure you create the file {e.filename}."
)
files_to_create = set()
split_names = []
dummy_file_name = mock_dl_manager.dummy_file_name
for split in generator_splits:
logger.info(f"Collecting dummy data file paths to create for {split.name}")
split_names.append(split.name)
gen_kwargs = split.gen_kwargs
generator = dataset_builder._generate_examples(**gen_kwargs)
try:
dummy_data_guidance_print = "\n" + 30 * "=" + "DUMMY DATA INSTRUCTIONS" + 30 * "=" + "\n"
config_string = (
f"config {mock_dl_manager.config.name} of " if mock_dl_manager.config is not None else ""
)
dummy_data_guidance_print += (
"- In order to create the dummy data for "
+ config_string
+ f"{self._dataset_name}, please go into the folder '{dummy_data_folder}' with `cd {dummy_data_folder}` . \n\n"
)
# trigger generate function
for key, record in generator:
pass
dummy_data_guidance_print += f"- It appears that the function `_generate_examples(...)` expects one or more files in the folder {dummy_file_name} using the function `glob.glob(...)`. In this case, please refer to the `_generate_examples(...)` method to see under which filename the dummy data files should be created. \n\n"
except FileNotFoundError as e:
files_to_create.add(e.filename)
split_names = ", ".join(split_names)
if len(files_to_create) > 0:
# no glob.glob(...) in `_generate_examples(...)`
if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name:
dummy_data_guidance_print += f"- Please create a single dummy data file called '{next(iter(files_to_create))}' from the folder '{dummy_data_folder}'. Make sure that the dummy data file provides at least one example for the split(s) '{split_names}' \n\n"
files_string = dummy_file_name
else:
files_string = ", ".join(files_to_create)
dummy_data_guidance_print += f"- Please create the following dummy data files '{files_string}' from the folder '{dummy_data_folder}'\n\n"
dummy_data_guidance_print += f"- For each of the splits '{split_names}', make sure that one or more of the dummy data files provide at least one example \n\n"
dummy_data_guidance_print += f"- If the method `_generate_examples(...)` includes multiple `open()` statements, you might have to create other files in addition to '{files_string}'. In this case please refer to the `_generate_examples(...)` method \n\n"
if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name:
dummy_data_guidance_print += f"- After the dummy data file is created, it should be zipped to '{dummy_file_name}.zip' with the command `zip {dummy_file_name}.zip {dummy_file_name}` \n\n"
dummy_data_guidance_print += (
f"- You can now delete the file '{dummy_file_name}' with the command `rm {dummy_file_name}` \n\n"
)
dummy_data_guidance_print += f"- To get the file '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n"
else:
dummy_data_guidance_print += f"- After all dummy data files are created, they should be zipped recursively to '{dummy_file_name}.zip' with the command `zip -r {dummy_file_name}.zip {dummy_file_name}/` \n\n"
dummy_data_guidance_print += (
f"- You can now delete the folder '{dummy_file_name}' with the command `rm -r {dummy_file_name}` \n\n"
)
dummy_data_guidance_print += f"- To get the folder '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n"
dummy_data_guidance_print += (
f"- Make sure you have created the file '{dummy_file_name}.zip' in '{dummy_data_folder}' \n"
)
dummy_data_guidance_print += 83 * "=" + "\n"
print(dummy_data_guidance_print)
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/commands/__init__.py | from abc import ABC, abstractmethod
from argparse import ArgumentParser
class BaseDatasetsCLICommand(ABC):
@staticmethod
@abstractmethod
def register_subcommand(parser: ArgumentParser):
raise NotImplementedError()
@abstractmethod
def run(self):
raise NotImplementedError()
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/commands/env.py | import platform
from argparse import ArgumentParser
import fsspec
import huggingface_hub
import pandas
import pyarrow
from datasets import __version__ as version
from datasets.commands import BaseDatasetsCLICommand
def info_command_factory(_):
return EnvironmentCommand()
class EnvironmentCommand(BaseDatasetsCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
download_parser = parser.add_parser("env", help="Print relevant system environment info.")
download_parser.set_defaults(func=info_command_factory)
def run(self):
info = {
"`datasets` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"`huggingface_hub` version": huggingface_hub.__version__,
"PyArrow version": pyarrow.__version__,
"Pandas version": pandas.__version__,
"`fsspec` version": fsspec.__version__,
}
print("\nCopy-and-paste the text below in your GitHub issue.\n")
print(self.format_dict(info))
return info
@staticmethod
def format_dict(d):
return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/commands/run_beam.py | import os
from argparse import ArgumentParser
from pathlib import Path
from shutil import copyfile
from typing import List
from datasets import config
from datasets.builder import DatasetBuilder
from datasets.commands import BaseDatasetsCLICommand
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadMode
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.info_utils import VerificationMode
def run_beam_command_factory(args, **kwargs):
return RunBeamCommand(
args.dataset,
args.name,
args.cache_dir,
args.beam_pipeline_options,
args.data_dir,
args.all_configs,
args.save_info or args.save_infos,
args.ignore_verifications,
args.force_redownload,
**kwargs,
)
class RunBeamCommand(BaseDatasetsCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
run_beam_parser = parser.add_parser("run_beam", help="Run a Beam dataset processing pipeline")
run_beam_parser.add_argument("dataset", type=str, help="Name of the dataset to download")
run_beam_parser.add_argument("--name", type=str, default=None, help="Dataset config name")
run_beam_parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="Cache directory where the datasets are stored",
)
run_beam_parser.add_argument(
"--beam_pipeline_options",
type=str,
default="",
help="Beam pipeline options, separated by commas. Example:: `--beam_pipeline_options=job_name=my-job,project=my-project`",
)
run_beam_parser.add_argument(
"--data_dir",
type=str,
default=None,
help="Can be used to specify a manual directory to get the files from",
)
run_beam_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations")
run_beam_parser.add_argument("--save_info", action="store_true", help="Save the dataset infos file")
run_beam_parser.add_argument(
"--ignore_verifications", action="store_true", help="Run the test without checksums and splits checks"
)
run_beam_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload")
# aliases
run_beam_parser.add_argument("--save_infos", action="store_true", help="alias for save_info")
run_beam_parser.set_defaults(func=run_beam_command_factory)
def __init__(
self,
dataset: str,
name: str,
cache_dir: str,
beam_pipeline_options: str,
data_dir: str,
all_configs: bool,
save_infos: bool,
ignore_verifications: bool,
force_redownload: bool,
**config_kwargs,
):
self._dataset = dataset
self._name = name
self._cache_dir = cache_dir
self._beam_pipeline_options = beam_pipeline_options
self._data_dir = data_dir
self._all_configs = all_configs
self._save_infos = save_infos
self._ignore_verifications = ignore_verifications
self._force_redownload = force_redownload
self._config_kwargs = config_kwargs
def run(self):
import apache_beam as beam
if self._name is not None and self._all_configs:
print("Both parameters `name` and `all_configs` can't be used at once.")
exit(1)
path, config_name = self._dataset, self._name
dataset_module = dataset_module_factory(path)
builder_cls = import_main_class(dataset_module.module_path)
builders: List[DatasetBuilder] = []
if self._beam_pipeline_options:
beam_options = beam.options.pipeline_options.PipelineOptions(
flags=[f"--{opt.strip()}" for opt in self._beam_pipeline_options.split(",") if opt]
)
else:
beam_options = None
if self._all_configs and len(builder_cls.BUILDER_CONFIGS) > 0:
for builder_config in builder_cls.BUILDER_CONFIGS:
builders.append(
builder_cls(
config_name=builder_config.name,
data_dir=self._data_dir,
hash=dataset_module.hash,
beam_options=beam_options,
cache_dir=self._cache_dir,
base_path=dataset_module.builder_kwargs.get("base_path"),
)
)
else:
builders.append(
builder_cls(
config_name=config_name,
data_dir=self._data_dir,
beam_options=beam_options,
cache_dir=self._cache_dir,
base_path=dataset_module.builder_kwargs.get("base_path"),
**self._config_kwargs,
)
)
for builder in builders:
builder.download_and_prepare(
download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS
if not self._force_redownload
else DownloadMode.FORCE_REDOWNLOAD,
download_config=DownloadConfig(cache_dir=config.DOWNLOADED_DATASETS_PATH),
verification_mode=VerificationMode.NO_CHECKS
if self._ignore_verifications
else VerificationMode.ALL_CHECKS,
try_from_hf_gcs=False,
)
if self._save_infos:
builder._save_infos()
print("Apache beam run successful.")
# If save_infos=True, the dataset infos file is created next to the loaded module file.
# Let's move it to the original directory of the dataset script, to allow the user to
# upload them on S3 at the same time afterwards.
if self._save_infos:
dataset_infos_path = os.path.join(builder_cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME)
name = Path(path).name + ".py"
combined_path = os.path.join(path, name)
if os.path.isfile(path):
dataset_dir = os.path.dirname(path)
elif os.path.isfile(combined_path):
dataset_dir = path
else: # in case of a remote dataset
print(f"Dataset Infos file saved at {dataset_infos_path}")
exit(1)
# Move datasetinfo back to the user
user_dataset_infos_path = os.path.join(dataset_dir, config.DATASETDICT_INFOS_FILENAME)
copyfile(dataset_infos_path, user_dataset_infos_path)
print(f"Dataset Infos file saved at {user_dataset_infos_path}")
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/commands/datasets_cli.py | #!/usr/bin/env python
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def parse_unknown_args(unknown_args):
return {key.lstrip("-"): value for key, value in zip(unknown_args[::2], unknown_args[1::2])}
def main():
parser = ArgumentParser(
"HuggingFace Datasets CLI tool", usage="datasets-cli <command> [<args>]", allow_abbrev=False
)
commands_parser = parser.add_subparsers(help="datasets-cli command helpers")
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(commands_parser)
EnvironmentCommand.register_subcommand(commands_parser)
TestCommand.register_subcommand(commands_parser)
RunBeamCommand.register_subcommand(commands_parser)
DummyDataCommand.register_subcommand(commands_parser)
# Parse args
args, unknown_args = parser.parse_known_args()
if not hasattr(args, "func"):
parser.print_help()
exit(1)
kwargs = parse_unknown_args(unknown_args)
# Run
service = args.func(args, **kwargs)
service.run()
if __name__ == "__main__":
main()
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/commands/convert.py | import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
HIGHLIGHT_MESSAGE_PRE = """<<<<<<< This should probably be modified because it mentions: """
HIGHLIGHT_MESSAGE_POST = """=======
>>>>>>>
"""
TO_HIGHLIGHT = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
TO_CONVERT = [
# (pattern, replacement)
# Order is important here for some replacements
(r"tfds\.core", r"datasets"),
(r"tf\.io\.gfile\.GFile", r"open"),
(r"tf\.([\w\d]+)", r"datasets.Value('\1')"),
(r"tfds\.features\.Text\(\)", r"datasets.Value('string')"),
(r"tfds\.features\.Text\(", r"datasets.Value('string'),"),
(r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("),
(r"tfds\.features\.FeaturesDict\(", r"dict("),
(r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(r"tfds\.", r"datasets."),
(r"dl_manager\.manual_dir", r"self.config.data_dir"),
(r"self\.builder_config", r"self.config"),
]
def convert_command_factory(args: Namespace):
"""
Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
Returns: ConvertCommand
"""
return ConvertCommand(args.tfds_path, args.datasets_directory)
class ConvertCommand(BaseDatasetsCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the datasets-cli
Args:
parser: Root parser to register command-specific arguments
"""
train_parser = parser.add_parser(
"convert",
help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.",
)
train_parser.add_argument(
"--tfds_path",
type=str,
required=True,
help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.",
)
train_parser.add_argument(
"--datasets_directory", type=str, required=True, help="Path to the HuggingFace Datasets folder."
)
train_parser.set_defaults(func=convert_command_factory)
def __init__(self, tfds_path: str, datasets_directory: str, *args):
self._logger = get_logger("datasets-cli/converting")
self._tfds_path = tfds_path
self._datasets_directory = datasets_directory
def run(self):
if os.path.isdir(self._tfds_path):
abs_tfds_path = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
abs_tfds_path = os.path.dirname(self._tfds_path)
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path.")
abs_datasets_path = os.path.abspath(self._datasets_directory)
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}")
utils_files = []
with_manual_update = []
imports_to_builder_map = {}
if os.path.isdir(self._tfds_path):
file_names = os.listdir(abs_tfds_path)
else:
file_names = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}")
input_file = os.path.join(abs_tfds_path, f_name)
output_file = os.path.join(abs_datasets_path, f_name)
if not os.path.isfile(input_file) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file")
continue
with open(input_file, encoding="utf-8") as f:
lines = f.readlines()
out_lines = []
is_builder = False
needs_manual_update = False
tfds_imports = []
for line in lines:
out_line = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
out_line = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
out_line = ""
continue
elif "from absl import logging" in out_line:
out_line = "from datasets import logging\n"
elif "getLogger" in out_line:
out_line = out_line.replace("getLogger", "get_logger")
elif any(expression in out_line for expression in TO_HIGHLIGHT):
needs_manual_update = True
to_remove = list(filter(lambda e: e in out_line, TO_HIGHLIGHT))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(to_remove) + "\n")
out_lines.append(out_line)
out_lines.append(HIGHLIGHT_MESSAGE_POST)
continue
else:
for pattern, replacement in TO_CONVERT:
out_line = re.sub(pattern, replacement, out_line)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
match = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)", out_line)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(","))
out_line = "from . import " + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}")
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
is_builder = True
out_lines.append(out_line)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
dir_name = f_name.replace(".py", "")
output_dir = os.path.join(abs_datasets_path, dir_name)
output_file = os.path.join(output_dir, f_name)
os.makedirs(output_dir, exist_ok=True)
self._logger.info(f"Adding directory {output_dir}")
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(output_file)
if needs_manual_update:
with_manual_update.append(output_file)
with open(output_file, "w", encoding="utf-8") as f:
f.writelines(out_lines)
self._logger.info(f"Converted in {output_file}")
for utils_file in utils_files:
try:
f_name = os.path.basename(utils_file)
dest_folder = imports_to_builder_map[f_name.replace(".py", "")]
self._logger.info(f"Moving {dest_folder} to {utils_file}")
shutil.copy(utils_file, dest_folder)
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually.")
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'."
)
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/filesystems/compression.py | import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class BaseCompressedFileFileSystem(AbstractArchiveFileSystem):
"""Read contents of compressed file as a filesystem with one file inside."""
root_marker = ""
protocol: str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
compression: str = None # compression type in fsspec. ex: "gzip"
extension: str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__(
self, fo: str = "", target_protocol: Optional[str] = None, target_options: Optional[dict] = None, **kwargs
):
"""
The compressed file system can be instantiated from any compressed file.
It reads the contents of compressed file as a filesystem with one file inside, as if it was an archive.
The single file inside the filesystem is named after the compresssed file,
without the compression extension at the end of the filename.
Args:
fo (:obj:``str``): Path to compressed file. Will fetch file using ``fsspec.open()``
mode (:obj:``str``): Currently, only 'rb' accepted
target_protocol(:obj:``str``, optional): To override the FS protocol inferred from a URL.
target_options (:obj:``dict``, optional): Kwargs passed when instantiating the target FS.
"""
super().__init__(self, **kwargs)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
self.file = fsspec.open(
fo,
mode="rb",
protocol=target_protocol,
compression=self.compression,
client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs", {}), # To avoid issues if it was already passed.
},
**(target_options or {}),
)
self.compressed_name = os.path.basename(self.file.path.split("::")[0])
self.uncompressed_name = (
self.compressed_name[: self.compressed_name.rindex(".")]
if "." in self.compressed_name
else self.compressed_name
)
self.dir_cache = None
@classmethod
def _strip_protocol(cls, path):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(path).lstrip("/")
def _get_dirs(self):
if self.dir_cache is None:
f = {**self.file.fs.info(self.file.path), "name": self.uncompressed_name}
self.dir_cache = {f["name"]: f}
def cat(self, path: str):
return self.file.open().read()
def _open(
self,
path: str,
mode: str = "rb",
block_size=None,
autocommit=True,
cache_options=None,
**kwargs,
):
path = self._strip_protocol(path)
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'")
return self.file.open()
class Bz2FileSystem(BaseCompressedFileFileSystem):
"""Read contents of BZ2 file as a filesystem with one file inside."""
protocol = "bz2"
compression = "bz2"
extension = ".bz2"
class GzipFileSystem(BaseCompressedFileFileSystem):
"""Read contents of GZIP file as a filesystem with one file inside."""
protocol = "gzip"
compression = "gzip"
extension = ".gz"
class Lz4FileSystem(BaseCompressedFileFileSystem):
"""Read contents of LZ4 file as a filesystem with one file inside."""
protocol = "lz4"
compression = "lz4"
extension = ".lz4"
class XzFileSystem(BaseCompressedFileFileSystem):
"""Read contents of .xz (LZMA) file as a filesystem with one file inside."""
protocol = "xz"
compression = "xz"
extension = ".xz"
class ZstdFileSystem(BaseCompressedFileFileSystem):
"""
Read contents of zstd file as a filesystem with one file inside.
Note that reading in binary mode with fsspec isn't supported yet:
https://github.com/indygreg/python-zstandard/issues/136
"""
protocol = "zstd"
compression = "zstd"
extension = ".zst"
def __init__(
self,
fo: str,
mode: str = "rb",
target_protocol: Optional[str] = None,
target_options: Optional[dict] = None,
block_size: int = DEFAULT_BLOCK_SIZE,
**kwargs,
):
super().__init__(
fo=fo,
mode=mode,
target_protocol=target_protocol,
target_options=target_options,
block_size=block_size,
**kwargs,
)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_enter = self.file.__enter__
class WrappedFile:
def __init__(self, file_):
self._file = file_
def __enter__(self):
self._file.__enter__()
return self
def __exit__(self, *args, **kwargs):
self._file.__exit__(*args, **kwargs)
def __iter__(self):
return iter(self._file)
def __next__(self):
return next(self._file)
def __getattr__(self, attr):
return getattr(self._file, attr)
def fixed_enter(*args, **kwargs):
return WrappedFile(_enter(*args, **kwargs))
self.file.__enter__ = fixed_enter
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/filesystems/__init__.py | import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from fsspec.implementations.local import LocalFileSystem
from ..utils.deprecation_utils import deprecated
from . import compression
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from .s3filesystem import S3FileSystem # noqa: F401
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
@deprecated(
"This function is deprecated and will be removed in a future version. Please use `fsspec.core.strip_protocol` instead."
)
def extract_path_from_uri(dataset_path: str) -> str:
"""
Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
Args:
dataset_path (`str`):
Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
"""
if "://" in dataset_path:
dataset_path = dataset_path.split("://")[1]
return dataset_path
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Checks if `fs` is a remote filesystem.
Args:
fs (`fsspec.spec.AbstractFileSystem`):
An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
"""
return not isinstance(fs, LocalFileSystem)
def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
"""
Renames the file `src` in `fs` to `dst`.
"""
if not is_remote_filesystem(fs):
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
else:
fs.mv(src, dst, recursive=True)
def _reset_fsspec_lock() -> None:
"""
Clear reference to the loop and thread.
This is necessary otherwise HTTPFileSystem hangs in the ML training loop.
Only required for fsspec >= 0.9.0
See https://github.com/fsspec/gcsfs/issues/379
"""
if hasattr(fsspec.asyn, "reset_lock"):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
fsspec.asyn.iothread[0] = None
fsspec.asyn.loop[0] = None
fsspec.asyn.lock = threading.Lock()
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/filesystems/s3filesystem.py | import s3fs
from ..utils.deprecation_utils import deprecated
@deprecated("Use s3fs.S3FileSystem instead.")
class S3FileSystem(s3fs.S3FileSystem):
"""
`datasets.filesystems.S3FileSystem` is a subclass of [`s3fs.S3FileSystem`](https://s3fs.readthedocs.io/en/latest/api.html).
Users can use this class to access S3 as if it were a file system. It exposes a filesystem-like API (ls, cp, open, etc.) on top of S3 storage. Provide credentials either explicitly (`key=`, `secret=`) or with boto's credential methods. See botocore documentation for more information. If no credentials are available, use `anon=True`.
Args:
anon (`bool`, default to `False`):
Whether to use anonymous connection (public buckets only). If `False`, uses the key/secret given,
or boto's credential resolver (client_kwargs, environment, variables, config files, EC2 IAM server, in that order).
key (`str`):
If not anonymous, use this access key ID, if specified.
secret (`str`):
If not anonymous, use this secret access key, if specified.
token (`str`):
If not anonymous, use this security token, if specified.
use_ssl (`bool`, defaults to `True`):
Whether to use SSL in connections to S3; may be faster without, but insecure. If `use_ssl` is
also set in `client_kwargs`, the value set in `client_kwargs` will take priority.
s3_additional_kwargs (`dict`):
Parameters that are used when calling S3 API methods. Typically used for things
like ServerSideEncryption.
client_kwargs (`dict`):
Parameters for the botocore client.
requester_pays (`bool`, defaults to `False`):
Whether `RequesterPays` buckets are supported.
default_block_size (`int`):
If given, the default block size value used for `open()`, if no specific value is given at all time.
The built-in default is 5MB.
default_fill_cache (`bool`, defaults to `True`):
Whether to use cache filling with open by default. Refer to `S3File.open`.
default_cache_type (`str`, defaults to `bytes`):
If given, the default `cache_type` value used for `open()`. Set to `none` if no
caching is desired. See fsspec's documentation for other available `cache_type` values.
version_aware (`bool`, defaults to `False`):
Whether to support bucket versioning. If enable this will require the user to have
the necessary IAM permissions for dealing with versioned objects.
cache_regions (`bool`, defaults to `False`):
Whether to cache bucket regions. Whenever a new bucket is used, it will
first find out which region it belongs to and then use the client for that region.
asynchronous (`bool`, defaults to `False`):
Whether this instance is to be used from inside coroutines.
config_kwargs (`dict`):
Parameters passed to `botocore.client.Config`.
**kwargs:
Other parameters for core session.
session (`aiobotocore.session.AioSession`):
Session to be used for all connections. This session will be used inplace of creating
a new session inside S3FileSystem. For example: `aiobotocore.session.AioSession(profile='test_user')`.
skip_instance_cache (`bool`):
Control reuse of instances. Passed on to `fsspec`.
use_listings_cache (`bool`):
Control reuse of directory listings. Passed on to `fsspec`.
listings_expiry_time (`int` or `float`):
Control reuse of directory listings. Passed on to `fsspec`.
max_paths (`int`): Control reuse of directory listings. Passed on to `fsspec`.
Examples:
Listing files from public S3 bucket.
```py
>>> import datasets
>>> s3 = datasets.filesystems.S3FileSystem(anon=True) # doctest: +SKIP
>>> s3.ls('public-datasets/imdb/train') # doctest: +SKIP
['dataset_info.json.json','dataset.arrow','state.json']
```
Listing files from private S3 bucket using `aws_access_key_id` and `aws_secret_access_key`.
```py
>>> import datasets
>>> s3 = datasets.filesystems.S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
>>> s3.ls('my-private-datasets/imdb/train') # doctest: +SKIP
['dataset_info.json.json','dataset.arrow','state.json']
```
Using `S3Filesystem` with `botocore.session.Session` and custom `aws_profile`.
```py
>>> import botocore
>>> from datasets.filesystems import S3Filesystem
>>> s3_session = botocore.session.Session(profile_name='my_profile_name')
>>> s3 = S3FileSystem(session=s3_session) # doctest: +SKIP
```
Loading dataset from S3 using `S3Filesystem` and [`load_from_disk`].
```py
>>> from datasets import load_from_disk
>>> from datasets.filesystems import S3Filesystem
>>> s3 = S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
>>> dataset = load_from_disk('s3://my-private-datasets/imdb/train', storage_options=s3.storage_options) # doctest: +SKIP
>>> print(len(dataset))
25000
```
Saving dataset to S3 using `S3Filesystem` and [`Dataset.save_to_disk`].
```py
>>> from datasets import load_dataset
>>> from datasets.filesystems import S3Filesystem
>>> dataset = load_dataset("imdb")
>>> s3 = S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
>>> dataset.save_to_disk('s3://my-private-datasets/imdb/train', storage_options=s3.storage_options) # doctest: +SKIP
```
"""
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/tasks/image_classification.py | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=True)
class ImageClassification(TaskTemplate):
task: str = field(default="image-classification", metadata={"include_in_asdict_even_if_is_default": True})
input_schema: ClassVar[Features] = Features({"image": Image()})
label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
image_column: str = "image"
label_column: str = "labels"
def align_with_features(self, features):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features.")
if not isinstance(features[self.label_column], ClassLabel):
raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
task_template = copy.deepcopy(self)
label_schema = self.label_schema.copy()
label_schema["labels"] = features[self.label_column]
task_template.__dict__["label_schema"] = label_schema
return task_template
@property
def column_mapping(self) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/tasks/language_modeling.py | from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=True)
class LanguageModeling(TaskTemplate):
task: str = field(default="language-modeling", metadata={"include_in_asdict_even_if_is_default": True})
input_schema: ClassVar[Features] = Features({"text": Value("string")})
label_schema: ClassVar[Features] = Features({})
text_column: str = "text"
@property
def column_mapping(self) -> Dict[str, str]:
return {self.text_column: "text"}
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/tasks/automatic_speech_recognition.py | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=True)
class AutomaticSpeechRecognition(TaskTemplate):
task: str = field(default="automatic-speech-recognition", metadata={"include_in_asdict_even_if_is_default": True})
input_schema: ClassVar[Features] = Features({"audio": Audio()})
label_schema: ClassVar[Features] = Features({"transcription": Value("string")})
audio_column: str = "audio"
transcription_column: str = "transcription"
def align_with_features(self, features):
if self.audio_column not in features:
raise ValueError(f"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column], Audio):
raise ValueError(f"Column {self.audio_column} is not an Audio type.")
task_template = copy.deepcopy(self)
input_schema = self.input_schema.copy()
input_schema["audio"] = features[self.audio_column]
task_template.__dict__["input_schema"] = input_schema
return task_template
@property
def column_mapping(self) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/tasks/audio_classification.py | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=True)
class AudioClassification(TaskTemplate):
task: str = field(default="audio-classification", metadata={"include_in_asdict_even_if_is_default": True})
input_schema: ClassVar[Features] = Features({"audio": Audio()})
label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
audio_column: str = "audio"
label_column: str = "labels"
def align_with_features(self, features):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features.")
if not isinstance(features[self.label_column], ClassLabel):
raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
task_template = copy.deepcopy(self)
label_schema = self.label_schema.copy()
label_schema["labels"] = features[self.label_column]
task_template.__dict__["label_schema"] = label_schema
return task_template
@property
def column_mapping(self) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/tasks/__init__.py | from typing import Optional
from ..utils.logging import get_logger
from .audio_classification import AudioClassification
from .automatic_speech_recognition import AutomaticSpeechRecognition
from .base import TaskTemplate
from .image_classification import ImageClassification
from .language_modeling import LanguageModeling
from .question_answering import QuestionAnsweringExtractive
from .summarization import Summarization
from .text_classification import TextClassification
__all__ = [
"AutomaticSpeechRecognition",
"AudioClassification",
"ImageClassification",
"LanguageModeling",
"QuestionAnsweringExtractive",
"Summarization",
"TaskTemplate",
"TextClassification",
]
logger = get_logger(__name__)
NAME2TEMPLATE = {
AutomaticSpeechRecognition.task: AutomaticSpeechRecognition,
AudioClassification.task: AudioClassification,
ImageClassification.task: ImageClassification,
LanguageModeling.task: LanguageModeling,
QuestionAnsweringExtractive.task: QuestionAnsweringExtractive,
Summarization.task: Summarization,
TextClassification.task: TextClassification,
}
def task_template_from_dict(task_template_dict: dict) -> Optional[TaskTemplate]:
"""Create one of the supported task templates in :py:mod:`datasets.tasks` from a dictionary."""
task_name = task_template_dict.get("task")
if task_name is None:
logger.warning(f"Couldn't find template for task '{task_name}'. Available templates: {list(NAME2TEMPLATE)}")
return None
template = NAME2TEMPLATE.get(task_name)
return template.from_dict(task_template_dict)
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/tasks/question_answering.py | from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=True)
class QuestionAnsweringExtractive(TaskTemplate):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
task: str = field(default="question-answering-extractive", metadata={"include_in_asdict_even_if_is_default": True})
input_schema: ClassVar[Features] = Features({"question": Value("string"), "context": Value("string")})
label_schema: ClassVar[Features] = Features(
{
"answers": Sequence(
{
"text": Value("string"),
"answer_start": Value("int32"),
}
)
}
)
question_column: str = "question"
context_column: str = "context"
answers_column: str = "answers"
@property
def column_mapping(self) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/tasks/text_classification.py | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=True)
class TextClassification(TaskTemplate):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
task: str = field(default="text-classification", metadata={"include_in_asdict_even_if_is_default": True})
input_schema: ClassVar[Features] = Features({"text": Value("string")})
label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
text_column: str = "text"
label_column: str = "labels"
def align_with_features(self, features):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features.")
if not isinstance(features[self.label_column], ClassLabel):
raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
task_template = copy.deepcopy(self)
label_schema = self.label_schema.copy()
label_schema["labels"] = features[self.label_column]
task_template.__dict__["label_schema"] = label_schema
return task_template
@property
def column_mapping(self) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/tasks/base.py | import abc
import copy
import dataclasses
from dataclasses import dataclass
from typing import ClassVar, Dict, Type, TypeVar
from ..features import Features
T = TypeVar("T", bound="TaskTemplate")
@dataclass(frozen=True)
class TaskTemplate(abc.ABC):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
task: str
input_schema: ClassVar[Features]
label_schema: ClassVar[Features]
def align_with_features(self: T, features: Features) -> T:
"""
Align features with the task template.
"""
# No-op
return copy.deepcopy(self)
@property
def features(self) -> Features:
return Features(**self.input_schema, **self.label_schema)
@property
@abc.abstractmethod
def column_mapping(self) -> Dict[str, str]:
raise NotImplementedError
@classmethod
def from_dict(cls: Type[T], template_dict: dict) -> T:
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in template_dict.items() if k in field_names})
| 0 |
hf_public_repos/datasets/src/datasets | hf_public_repos/datasets/src/datasets/tasks/summarization.py | from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=True)
class Summarization(TaskTemplate):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
task: str = field(default="summarization", metadata={"include_in_asdict_even_if_is_default": True})
input_schema: ClassVar[Features] = Features({"text": Value("string")})
label_schema: ClassVar[Features] = Features({"summary": Value("string")})
text_column: str = "text"
summary_column: str = "summary"
@property
def column_mapping(self) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 0 |
hf_public_repos/datasets/.dvc | hf_public_repos/datasets/.dvc/plots/smooth.json | {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"data": {
"values": "<DVC_METRIC_DATA>"
},
"title": "<DVC_METRIC_TITLE>",
"mark": {
"type": "line"
},
"encoding": {
"x": {
"field": "<DVC_METRIC_X>",
"type": "quantitative",
"title": "<DVC_METRIC_X_LABEL>"
},
"y": {
"field": "<DVC_METRIC_Y>",
"type": "quantitative",
"title": "<DVC_METRIC_Y_LABEL>",
"scale": {
"zero": false
}
},
"color": {
"field": "rev",
"type": "nominal"
}
},
"transform": [
{
"loess": "<DVC_METRIC_Y>",
"on": "<DVC_METRIC_X>",
"groupby": [
"rev"
],
"bandwidth": 0.3
}
]
}
| 0 |
hf_public_repos/datasets/.dvc | hf_public_repos/datasets/.dvc/plots/confusion.json | {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"data": {
"values": "<DVC_METRIC_DATA>"
},
"title": "<DVC_METRIC_TITLE>",
"mark": "rect",
"encoding": {
"x": {
"field": "<DVC_METRIC_X>",
"type": "nominal",
"sort": "ascending",
"title": "<DVC_METRIC_X_LABEL>"
},
"y": {
"field": "<DVC_METRIC_Y>",
"type": "nominal",
"sort": "ascending",
"title": "<DVC_METRIC_Y_LABEL>"
},
"color": {
"aggregate": "count",
"type": "quantitative"
},
"facet": {
"field": "rev",
"type": "nominal"
}
}
}
| 0 |
hf_public_repos/datasets/.dvc | hf_public_repos/datasets/.dvc/plots/scatter.json | {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"data": {
"values": "<DVC_METRIC_DATA>"
},
"title": "<DVC_METRIC_TITLE>",
"mark": "point",
"encoding": {
"x": {
"field": "<DVC_METRIC_X>",
"type": "quantitative",
"title": "<DVC_METRIC_X_LABEL>"
},
"y": {
"field": "<DVC_METRIC_Y>",
"type": "quantitative",
"title": "<DVC_METRIC_Y_LABEL>",
"scale": {
"zero": false
}
},
"color": {
"field": "rev",
"type": "nominal"
}
}
}
| 0 |
hf_public_repos/datasets/.dvc | hf_public_repos/datasets/.dvc/plots/default.json | {
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
"data": {
"values": "<DVC_METRIC_DATA>"
},
"title": "<DVC_METRIC_TITLE>",
"mark": {
"type": "line"
},
"encoding": {
"x": {
"field": "<DVC_METRIC_X>",
"type": "quantitative",
"title": "<DVC_METRIC_X_LABEL>"
},
"y": {
"field": "<DVC_METRIC_Y>",
"type": "quantitative",
"title": "<DVC_METRIC_Y_LABEL>",
"scale": {
"zero": false
}
},
"color": {
"field": "rev",
"type": "nominal"
}
}
}
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/setup.cfg | [dist_conda]
conda_name_differences = 'torch:pytorch'
channels = pytorch
noarch = True
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/benchmark.py | #!/usr/bin/env python3
""" Model Benchmark Script
An inference and train step benchmark script for timm models.
Hacked together by Ross Wightman (https://github.com/rwightman)
"""
import argparse
import csv
import json
import logging
import time
from collections import OrderedDict
from contextlib import suppress
from functools import partial
import torch
import torch.nn as nn
import torch.nn.parallel
from timm.data import resolve_data_config
from timm.layers import set_fast_norm
from timm.models import create_model, is_model, list_models
from timm.optim import create_optimizer_v2
from timm.utils import setup_default_logging, set_jit_fuser, decay_batch_step, check_batch_size_retry, ParseKwargs,\
reparameterize_model
has_apex = False
try:
from apex import amp
has_apex = True
except ImportError:
pass
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
try:
from deepspeed.profiling.flops_profiler import get_model_profile
has_deepspeed_profiling = True
except ImportError as e:
has_deepspeed_profiling = False
try:
from fvcore.nn import FlopCountAnalysis, flop_count_str, ActivationCountAnalysis
has_fvcore_profiling = True
except ImportError as e:
FlopCountAnalysis = None
has_fvcore_profiling = False
try:
from functorch.compile import memory_efficient_fusion
has_functorch = True
except ImportError as e:
has_functorch = False
has_compile = hasattr(torch, 'compile')
if torch.cuda.is_available():
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('validate')
parser = argparse.ArgumentParser(description='PyTorch Benchmark')
# benchmark specific args
parser.add_argument('--model-list', metavar='NAME', default='',
help='txt file based list of model names to benchmark')
parser.add_argument('--bench', default='both', type=str,
help="Benchmark mode. One of 'inference', 'train', 'both'. Defaults to 'both'")
parser.add_argument('--detail', action='store_true', default=False,
help='Provide train fwd/bwd/opt breakdown detail if True. Defaults to False')
parser.add_argument('--no-retry', action='store_true', default=False,
help='Do not decay batch size and retry on error.')
parser.add_argument('--results-file', default='', type=str,
help='Output csv file for validation results (summary)')
parser.add_argument('--results-format', default='csv', type=str,
help='Format for results file one of (csv, json) (default: csv).')
parser.add_argument('--num-warm-iter', default=10, type=int,
help='Number of warmup iterations (default: 10)')
parser.add_argument('--num-bench-iter', default=40, type=int,
help='Number of benchmark iterations (default: 40)')
parser.add_argument('--device', default='cuda', type=str,
help="device to run benchmark on")
# common inference / train args
parser.add_argument('--model', '-m', metavar='NAME', default='resnet50',
help='model architecture (default: resnet50)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--use-train-size', action='store_true', default=False,
help='Run inference at train size, not test-input-size if it exists.')
parser.add_argument('--num-classes', type=int, default=None,
help='Number classes in dataset')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--grad-checkpointing', action='store_true', default=False,
help='Enable gradient checkpointing through model blocks/stages')
parser.add_argument('--amp', action='store_true', default=False,
help='use PyTorch Native AMP for mixed precision training. Overrides --precision arg.')
parser.add_argument('--amp-dtype', default='float16', type=str,
help='lower precision AMP dtype (default: float16). Overrides --precision arg if args.amp True.')
parser.add_argument('--precision', default='float32', type=str,
help='Numeric precision. One of (amp, float32, float16, bfloat16, tf32)')
parser.add_argument('--fuser', default='', type=str,
help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')")
parser.add_argument('--fast-norm', default=False, action='store_true',
help='enable experimental fast-norm')
parser.add_argument('--reparam', default=False, action='store_true',
help='Reparameterize model')
parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs)
# codegen (model compilation) options
scripting_group = parser.add_mutually_exclusive_group()
scripting_group.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor',
help="Enable compilation w/ specified backend (default: inductor).")
scripting_group.add_argument('--aot-autograd', default=False, action='store_true',
help="Enable AOT Autograd optimization.")
# train optimizer parameters
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: None, use opt default)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='Optimizer momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.0001,
help='weight decay (default: 0.0001)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--clip-mode', type=str, default='norm',
help='Gradient clipping mode. One of ("norm", "value", "agc")')
# model regularization / loss params that impact model or loss fn
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=None, metavar='PCT',
help='Drop path rate (default: None)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
def timestamp(sync=False):
return time.perf_counter()
def cuda_timestamp(sync=False, device=None):
if sync:
torch.cuda.synchronize(device=device)
return time.perf_counter()
def count_params(model: nn.Module):
return sum([m.numel() for m in model.parameters()])
def resolve_precision(precision: str):
assert precision in ('amp', 'amp_bfloat16', 'float16', 'bfloat16', 'float32')
amp_dtype = None # amp disabled
model_dtype = torch.float32
data_dtype = torch.float32
if precision == 'amp':
amp_dtype = torch.float16
elif precision == 'amp_bfloat16':
amp_dtype = torch.bfloat16
elif precision == 'float16':
model_dtype = torch.float16
data_dtype = torch.float16
elif precision == 'bfloat16':
model_dtype = torch.bfloat16
data_dtype = torch.bfloat16
return amp_dtype, model_dtype, data_dtype
def profile_deepspeed(model, input_size=(3, 224, 224), batch_size=1, detailed=False):
_, macs, _ = get_model_profile(
model=model,
input_shape=(batch_size,) + input_size, # input shape/resolution
print_profile=detailed, # prints the model graph with the measured profile attached to each module
detailed=detailed, # print the detailed profile
warm_up=10, # the number of warm-ups before measuring the time of each module
as_string=False, # print raw numbers (e.g. 1000) or as human-readable strings (e.g. 1k)
output_file=None, # path to the output file. If None, the profiler prints to stdout.
ignore_modules=None) # the list of modules to ignore in the profiling
return macs, 0 # no activation count in DS
def profile_fvcore(model, input_size=(3, 224, 224), batch_size=1, detailed=False, force_cpu=False):
if force_cpu:
model = model.to('cpu')
device, dtype = next(model.parameters()).device, next(model.parameters()).dtype
example_input = torch.ones((batch_size,) + input_size, device=device, dtype=dtype)
fca = FlopCountAnalysis(model, example_input)
aca = ActivationCountAnalysis(model, example_input)
if detailed:
fcs = flop_count_str(fca)
print(fcs)
return fca.total(), aca.total()
class BenchmarkRunner:
def __init__(
self,
model_name,
detail=False,
device='cuda',
torchscript=False,
torchcompile=None,
aot_autograd=False,
reparam=False,
precision='float32',
fuser='',
num_warm_iter=10,
num_bench_iter=50,
use_train_size=False,
**kwargs
):
self.model_name = model_name
self.detail = detail
self.device = device
self.amp_dtype, self.model_dtype, self.data_dtype = resolve_precision(precision)
self.channels_last = kwargs.pop('channels_last', False)
if self.amp_dtype is not None:
self.amp_autocast = partial(torch.cuda.amp.autocast, dtype=self.amp_dtype)
else:
self.amp_autocast = suppress
if fuser:
set_jit_fuser(fuser)
self.model = create_model(
model_name,
num_classes=kwargs.pop('num_classes', None),
in_chans=3,
global_pool=kwargs.pop('gp', 'fast'),
scriptable=torchscript,
drop_rate=kwargs.pop('drop', 0.),
drop_path_rate=kwargs.pop('drop_path', None),
drop_block_rate=kwargs.pop('drop_block', None),
**kwargs.pop('model_kwargs', {}),
)
if reparam:
self.model = reparameterize_model(self.model)
self.model.to(
device=self.device,
dtype=self.model_dtype,
memory_format=torch.channels_last if self.channels_last else None,
)
self.num_classes = self.model.num_classes
self.param_count = count_params(self.model)
_logger.info('Model %s created, param count: %d' % (model_name, self.param_count))
data_config = resolve_data_config(kwargs, model=self.model, use_test_size=not use_train_size)
self.input_size = data_config['input_size']
self.batch_size = kwargs.pop('batch_size', 256)
self.compiled = False
if torchscript:
self.model = torch.jit.script(self.model)
self.compiled = True
elif torchcompile:
assert has_compile, 'A version of torch w/ torch.compile() is required, possibly a nightly.'
torch._dynamo.reset()
self.model = torch.compile(self.model, backend=torchcompile)
self.compiled = True
elif aot_autograd:
assert has_functorch, "functorch is needed for --aot-autograd"
self.model = memory_efficient_fusion(self.model)
self.compiled = True
self.example_inputs = None
self.num_warm_iter = num_warm_iter
self.num_bench_iter = num_bench_iter
self.log_freq = num_bench_iter // 5
if 'cuda' in self.device:
self.time_fn = partial(cuda_timestamp, device=self.device)
else:
self.time_fn = timestamp
def _init_input(self):
self.example_inputs = torch.randn(
(self.batch_size,) + self.input_size, device=self.device, dtype=self.data_dtype)
if self.channels_last:
self.example_inputs = self.example_inputs.contiguous(memory_format=torch.channels_last)
class InferenceBenchmarkRunner(BenchmarkRunner):
def __init__(
self,
model_name,
device='cuda',
torchscript=False,
**kwargs
):
super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs)
self.model.eval()
def run(self):
def _step():
t_step_start = self.time_fn()
with self.amp_autocast():
output = self.model(self.example_inputs)
t_step_end = self.time_fn(True)
return t_step_end - t_step_start
_logger.info(
f'Running inference benchmark on {self.model_name} for {self.num_bench_iter} steps w/ '
f'input size {self.input_size} and batch size {self.batch_size}.')
with torch.no_grad():
self._init_input()
for _ in range(self.num_warm_iter):
_step()
total_step = 0.
num_samples = 0
t_run_start = self.time_fn()
for i in range(self.num_bench_iter):
delta_fwd = _step()
total_step += delta_fwd
num_samples += self.batch_size
num_steps = i + 1
if num_steps % self.log_freq == 0:
_logger.info(
f"Infer [{num_steps}/{self.num_bench_iter}]."
f" {num_samples / total_step:0.2f} samples/sec."
f" {1000 * total_step / num_steps:0.3f} ms/step.")
t_run_end = self.time_fn(True)
t_run_elapsed = t_run_end - t_run_start
results = dict(
samples_per_sec=round(num_samples / t_run_elapsed, 2),
step_time=round(1000 * total_step / self.num_bench_iter, 3),
batch_size=self.batch_size,
img_size=self.input_size[-1],
param_count=round(self.param_count / 1e6, 2),
)
retries = 0 if self.compiled else 2 # skip profiling if model is scripted
while retries:
retries -= 1
try:
if has_deepspeed_profiling:
macs, _ = profile_deepspeed(self.model, self.input_size)
results['gmacs'] = round(macs / 1e9, 2)
elif has_fvcore_profiling:
macs, activations = profile_fvcore(self.model, self.input_size, force_cpu=not retries)
results['gmacs'] = round(macs / 1e9, 2)
results['macts'] = round(activations / 1e6, 2)
except RuntimeError as e:
pass
_logger.info(
f"Inference benchmark of {self.model_name} done. "
f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/step")
return results
class TrainBenchmarkRunner(BenchmarkRunner):
def __init__(
self,
model_name,
device='cuda',
torchscript=False,
**kwargs
):
super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs)
self.model.train()
self.loss = nn.CrossEntropyLoss().to(self.device)
self.target_shape = tuple()
self.optimizer = create_optimizer_v2(
self.model,
opt=kwargs.pop('opt', 'sgd'),
lr=kwargs.pop('lr', 1e-4))
if kwargs.pop('grad_checkpointing', False):
self.model.set_grad_checkpointing()
def _gen_target(self, batch_size):
return torch.empty(
(batch_size,) + self.target_shape, device=self.device, dtype=torch.long).random_(self.num_classes)
def run(self):
def _step(detail=False):
self.optimizer.zero_grad() # can this be ignored?
t_start = self.time_fn()
t_fwd_end = t_start
t_bwd_end = t_start
with self.amp_autocast():
output = self.model(self.example_inputs)
if isinstance(output, tuple):
output = output[0]
if detail:
t_fwd_end = self.time_fn(True)
target = self._gen_target(output.shape[0])
self.loss(output, target).backward()
if detail:
t_bwd_end = self.time_fn(True)
self.optimizer.step()
t_end = self.time_fn(True)
if detail:
delta_fwd = t_fwd_end - t_start
delta_bwd = t_bwd_end - t_fwd_end
delta_opt = t_end - t_bwd_end
return delta_fwd, delta_bwd, delta_opt
else:
delta_step = t_end - t_start
return delta_step
_logger.info(
f'Running train benchmark on {self.model_name} for {self.num_bench_iter} steps w/ '
f'input size {self.input_size} and batch size {self.batch_size}.')
self._init_input()
for _ in range(self.num_warm_iter):
_step()
t_run_start = self.time_fn()
if self.detail:
total_fwd = 0.
total_bwd = 0.
total_opt = 0.
num_samples = 0
for i in range(self.num_bench_iter):
delta_fwd, delta_bwd, delta_opt = _step(True)
num_samples += self.batch_size
total_fwd += delta_fwd
total_bwd += delta_bwd
total_opt += delta_opt
num_steps = (i + 1)
if num_steps % self.log_freq == 0:
total_step = total_fwd + total_bwd + total_opt
_logger.info(
f"Train [{num_steps}/{self.num_bench_iter}]."
f" {num_samples / total_step:0.2f} samples/sec."
f" {1000 * total_fwd / num_steps:0.3f} ms/step fwd,"
f" {1000 * total_bwd / num_steps:0.3f} ms/step bwd,"
f" {1000 * total_opt / num_steps:0.3f} ms/step opt."
)
total_step = total_fwd + total_bwd + total_opt
t_run_elapsed = self.time_fn() - t_run_start
results = dict(
samples_per_sec=round(num_samples / t_run_elapsed, 2),
step_time=round(1000 * total_step / self.num_bench_iter, 3),
fwd_time=round(1000 * total_fwd / self.num_bench_iter, 3),
bwd_time=round(1000 * total_bwd / self.num_bench_iter, 3),
opt_time=round(1000 * total_opt / self.num_bench_iter, 3),
batch_size=self.batch_size,
img_size=self.input_size[-1],
param_count=round(self.param_count / 1e6, 2),
)
else:
total_step = 0.
num_samples = 0
for i in range(self.num_bench_iter):
delta_step = _step(False)
num_samples += self.batch_size
total_step += delta_step
num_steps = (i + 1)
if num_steps % self.log_freq == 0:
_logger.info(
f"Train [{num_steps}/{self.num_bench_iter}]."
f" {num_samples / total_step:0.2f} samples/sec."
f" {1000 * total_step / num_steps:0.3f} ms/step.")
t_run_elapsed = self.time_fn() - t_run_start
results = dict(
samples_per_sec=round(num_samples / t_run_elapsed, 2),
step_time=round(1000 * total_step / self.num_bench_iter, 3),
batch_size=self.batch_size,
img_size=self.input_size[-1],
param_count=round(self.param_count / 1e6, 2),
)
_logger.info(
f"Train benchmark of {self.model_name} done. "
f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/sample")
return results
class ProfileRunner(BenchmarkRunner):
def __init__(self, model_name, device='cuda', profiler='', **kwargs):
super().__init__(model_name=model_name, device=device, **kwargs)
if not profiler:
if has_deepspeed_profiling:
profiler = 'deepspeed'
elif has_fvcore_profiling:
profiler = 'fvcore'
assert profiler, "One of deepspeed or fvcore needs to be installed for profiling to work."
self.profiler = profiler
self.model.eval()
def run(self):
_logger.info(
f'Running profiler on {self.model_name} w/ '
f'input size {self.input_size} and batch size {self.batch_size}.')
macs = 0
activations = 0
if self.profiler == 'deepspeed':
macs, _ = profile_deepspeed(self.model, self.input_size, batch_size=self.batch_size, detailed=True)
elif self.profiler == 'fvcore':
macs, activations = profile_fvcore(self.model, self.input_size, batch_size=self.batch_size, detailed=True)
results = dict(
gmacs=round(macs / 1e9, 2),
macts=round(activations / 1e6, 2),
batch_size=self.batch_size,
img_size=self.input_size[-1],
param_count=round(self.param_count / 1e6, 2),
)
_logger.info(
f"Profile of {self.model_name} done. "
f"{results['gmacs']:.2f} GMACs, {results['param_count']:.2f} M params.")
return results
def _try_run(
model_name,
bench_fn,
bench_kwargs,
initial_batch_size,
no_batch_size_retry=False
):
batch_size = initial_batch_size
results = dict()
error_str = 'Unknown'
while batch_size:
try:
torch.cuda.empty_cache()
bench = bench_fn(model_name=model_name, batch_size=batch_size, **bench_kwargs)
results = bench.run()
return results
except RuntimeError as e:
error_str = str(e)
_logger.error(f'"{error_str}" while running benchmark.')
if not check_batch_size_retry(error_str):
_logger.error(f'Unrecoverable error encountered while benchmarking {model_name}, skipping.')
break
if no_batch_size_retry:
break
batch_size = decay_batch_step(batch_size)
_logger.warning(f'Reducing batch size to {batch_size} for retry.')
results['error'] = error_str
return results
def benchmark(args):
if args.amp:
_logger.warning("Overriding precision to 'amp' since --amp flag set.")
args.precision = 'amp' if args.amp_dtype == 'float16' else '_'.join(['amp', args.amp_dtype])
_logger.info(f'Benchmarking in {args.precision} precision. '
f'{"NHWC" if args.channels_last else "NCHW"} layout. '
f'torchscript {"enabled" if args.torchscript else "disabled"}')
bench_kwargs = vars(args).copy()
bench_kwargs.pop('amp')
model = bench_kwargs.pop('model')
batch_size = bench_kwargs.pop('batch_size')
bench_fns = (InferenceBenchmarkRunner,)
prefixes = ('infer',)
if args.bench == 'both':
bench_fns = (
InferenceBenchmarkRunner,
TrainBenchmarkRunner
)
prefixes = ('infer', 'train')
elif args.bench == 'train':
bench_fns = TrainBenchmarkRunner,
prefixes = 'train',
elif args.bench.startswith('profile'):
# specific profiler used if included in bench mode string, otherwise default to deepspeed, fallback to fvcore
if 'deepspeed' in args.bench:
assert has_deepspeed_profiling, "deepspeed must be installed to use deepspeed flop counter"
bench_kwargs['profiler'] = 'deepspeed'
elif 'fvcore' in args.bench:
assert has_fvcore_profiling, "fvcore must be installed to use fvcore flop counter"
bench_kwargs['profiler'] = 'fvcore'
bench_fns = ProfileRunner,
batch_size = 1
model_results = OrderedDict(model=model)
for prefix, bench_fn in zip(prefixes, bench_fns):
run_results = _try_run(
model,
bench_fn,
bench_kwargs=bench_kwargs,
initial_batch_size=batch_size,
no_batch_size_retry=args.no_retry,
)
if prefix and 'error' not in run_results:
run_results = {'_'.join([prefix, k]): v for k, v in run_results.items()}
model_results.update(run_results)
if 'error' in run_results:
break
if 'error' not in model_results:
param_count = model_results.pop('infer_param_count', model_results.pop('train_param_count', 0))
model_results.setdefault('param_count', param_count)
model_results.pop('train_param_count', 0)
return model_results
def main():
setup_default_logging()
args = parser.parse_args()
model_cfgs = []
model_names = []
if args.fast_norm:
set_fast_norm()
if args.model_list:
args.model = ''
with open(args.model_list) as f:
model_names = [line.rstrip() for line in f]
model_cfgs = [(n, None) for n in model_names]
elif args.model == 'all':
# validate all models in a list of names with pretrained checkpoints
args.pretrained = True
model_names = list_models(pretrained=True, exclude_filters=['*in21k'])
model_cfgs = [(n, None) for n in model_names]
elif not is_model(args.model):
# model name doesn't exist, try as wildcard filter
model_names = list_models(args.model)
model_cfgs = [(n, None) for n in model_names]
if len(model_cfgs):
_logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names)))
results = []
try:
for m, _ in model_cfgs:
if not m:
continue
args.model = m
r = benchmark(args)
if r:
results.append(r)
time.sleep(10)
except KeyboardInterrupt as e:
pass
sort_key = 'infer_samples_per_sec'
if 'train' in args.bench:
sort_key = 'train_samples_per_sec'
elif 'profile' in args.bench:
sort_key = 'infer_gmacs'
results = filter(lambda x: sort_key in x, results)
results = sorted(results, key=lambda x: x[sort_key], reverse=True)
else:
results = benchmark(args)
if args.results_file:
write_results(args.results_file, results, format=args.results_format)
# output results in JSON to stdout w/ delimiter for runner script
print(f'--result\n{json.dumps(results, indent=4)}')
def write_results(results_file, results, format='csv'):
with open(results_file, mode='w') as cf:
if format == 'json':
json.dump(results, cf, indent=4)
else:
if not isinstance(results, (list, tuple)):
results = [results]
if not results:
return
dw = csv.DictWriter(cf, fieldnames=results[0].keys())
dw.writeheader()
for r in results:
dw.writerow(r)
cf.flush()
if __name__ == '__main__':
main()
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/inference.py | #!/usr/bin/env python3
"""PyTorch Inference Script
An example inference script that outputs top-k class ids for images in a folder into a csv.
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import argparse
import json
import logging
import os
import time
from contextlib import suppress
from functools import partial
import numpy as np
import pandas as pd
import torch
from timm.data import create_dataset, create_loader, resolve_data_config, ImageNetInfo, infer_imagenet_subset
from timm.layers import apply_test_time_pool
from timm.models import create_model
from timm.utils import AverageMeter, setup_default_logging, set_jit_fuser, ParseKwargs
try:
from apex import amp
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
try:
from functorch.compile import memory_efficient_fusion
has_functorch = True
except ImportError as e:
has_functorch = False
has_compile = hasattr(torch, 'compile')
_FMT_EXT = {
'json': '.json',
'json-record': '.json',
'json-split': '.json',
'parquet': '.parquet',
'csv': '.csv',
}
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('inference')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Inference')
parser.add_argument('data', nargs='?', metavar='DIR', const=None,
help='path to dataset (*deprecated*, use --data-dir)')
parser.add_argument('--data-dir', metavar='DIR',
help='path to dataset (root dir)')
parser.add_argument('--dataset', metavar='NAME', default='',
help='dataset type + name ("<type>/<name>") (default: ImageFolder or ImageTar if empty)')
parser.add_argument('--split', metavar='NAME', default='validation',
help='dataset split (default: validation)')
parser.add_argument('--model', '-m', metavar='MODEL', default='resnet50',
help='model architecture (default: resnet50)')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--in-chans', type=int, default=None, metavar='N',
help='Image input channels (default: None => 3)')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--use-train-size', action='store_true', default=False,
help='force use of train input size, even when test size is specified in pretrained cfg')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop pct')
parser.add_argument('--crop-mode', default=None, type=str,
metavar='N', help='Input image crop mode (squash, border, center). Model default if None.')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--num-classes', type=int, default=None,
help='Number classes in dataset')
parser.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
parser.add_argument('--log-freq', default=10, type=int,
metavar='N', help='batch logging frequency (default: 10)')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--test-pool', dest='test_pool', action='store_true',
help='enable test time pool')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--device', default='cuda', type=str,
help="Device (accelerator) to use.")
parser.add_argument('--amp', action='store_true', default=False,
help='use Native AMP for mixed precision training')
parser.add_argument('--amp-dtype', default='float16', type=str,
help='lower precision AMP dtype (default: float16)')
parser.add_argument('--fuser', default='', type=str,
help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')")
parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs)
scripting_group = parser.add_mutually_exclusive_group()
scripting_group.add_argument('--torchscript', default=False, action='store_true',
help='torch.jit.script the full model')
scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor',
help="Enable compilation w/ specified backend (default: inductor).")
scripting_group.add_argument('--aot-autograd', default=False, action='store_true',
help="Enable AOT Autograd support.")
parser.add_argument('--results-dir', type=str, default=None,
help='folder for output results')
parser.add_argument('--results-file', type=str, default=None,
help='results filename (relative to results-dir)')
parser.add_argument('--results-format', type=str, nargs='+', default=['csv'],
help='results format (one of "csv", "json", "json-split", "parquet")')
parser.add_argument('--results-separate-col', action='store_true', default=False,
help='separate output columns per result index.')
parser.add_argument('--topk', default=1, type=int,
metavar='N', help='Top-k to output to CSV')
parser.add_argument('--fullname', action='store_true', default=False,
help='use full sample name in output (not just basename).')
parser.add_argument('--filename-col', type=str, default='filename',
help='name for filename / sample name column')
parser.add_argument('--index-col', type=str, default='index',
help='name for output indices column(s)')
parser.add_argument('--label-col', type=str, default='label',
help='name for output indices column(s)')
parser.add_argument('--output-col', type=str, default=None,
help='name for logit/probs output column(s)')
parser.add_argument('--output-type', type=str, default='prob',
help='output type colum ("prob" for probabilities, "logit" for raw logits)')
parser.add_argument('--label-type', type=str, default='description',
help='type of label to output, one of "none", "name", "description", "detailed"')
parser.add_argument('--include-index', action='store_true', default=False,
help='include the class index in results')
parser.add_argument('--exclude-output', action='store_true', default=False,
help='exclude logits/probs from results, just indices. topk must be set !=0.')
def main():
setup_default_logging()
args = parser.parse_args()
# might as well try to do something useful...
args.pretrained = args.pretrained or not args.checkpoint
if torch.cuda.is_available():
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = True
device = torch.device(args.device)
# resolve AMP arguments based on PyTorch / Apex availability
amp_autocast = suppress
if args.amp:
assert has_native_amp, 'Please update PyTorch to a version with native AMP (or use APEX).'
assert args.amp_dtype in ('float16', 'bfloat16')
amp_dtype = torch.bfloat16 if args.amp_dtype == 'bfloat16' else torch.float16
amp_autocast = partial(torch.autocast, device_type=device.type, dtype=amp_dtype)
_logger.info('Running inference in mixed precision with native PyTorch AMP.')
else:
_logger.info('Running inference in float32. AMP not enabled.')
if args.fuser:
set_jit_fuser(args.fuser)
# create model
in_chans = 3
if args.in_chans is not None:
in_chans = args.in_chans
elif args.input_size is not None:
in_chans = args.input_size[0]
model = create_model(
args.model,
num_classes=args.num_classes,
in_chans=in_chans,
pretrained=args.pretrained,
checkpoint_path=args.checkpoint,
**args.model_kwargs,
)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes
_logger.info(
f'Model {args.model} created, param count: {sum([m.numel() for m in model.parameters()])}')
data_config = resolve_data_config(vars(args), model=model)
test_time_pool = False
if args.test_pool:
model, test_time_pool = apply_test_time_pool(model, data_config)
model = model.to(device)
model.eval()
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
if args.torchscript:
model = torch.jit.script(model)
elif args.torchcompile:
assert has_compile, 'A version of torch w/ torch.compile() is required for --compile, possibly a nightly.'
torch._dynamo.reset()
model = torch.compile(model, backend=args.torchcompile)
elif args.aot_autograd:
assert has_functorch, "functorch is needed for --aot-autograd"
model = memory_efficient_fusion(model)
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu)))
root_dir = args.data or args.data_dir
dataset = create_dataset(
root=root_dir,
name=args.dataset,
split=args.split,
class_map=args.class_map,
)
if test_time_pool:
data_config['crop_pct'] = 1.0
workers = 1 if 'tfds' in args.dataset or 'wds' in args.dataset else args.workers
loader = create_loader(
dataset,
batch_size=args.batch_size,
use_prefetcher=True,
num_workers=workers,
**data_config,
)
to_label = None
if args.label_type in ('name', 'description', 'detail'):
imagenet_subset = infer_imagenet_subset(model)
if imagenet_subset is not None:
dataset_info = ImageNetInfo(imagenet_subset)
if args.label_type == 'name':
to_label = lambda x: dataset_info.index_to_label_name(x)
elif args.label_type == 'detail':
to_label = lambda x: dataset_info.index_to_description(x, detailed=True)
else:
to_label = lambda x: dataset_info.index_to_description(x)
to_label = np.vectorize(to_label)
else:
_logger.error("Cannot deduce ImageNet subset from model, no labelling will be performed.")
top_k = min(args.topk, args.num_classes)
batch_time = AverageMeter()
end = time.time()
all_indices = []
all_labels = []
all_outputs = []
use_probs = args.output_type == 'prob'
with torch.no_grad():
for batch_idx, (input, _) in enumerate(loader):
with amp_autocast():
output = model(input)
if use_probs:
output = output.softmax(-1)
if top_k:
output, indices = output.topk(top_k)
np_indices = indices.cpu().numpy()
if args.include_index:
all_indices.append(np_indices)
if to_label is not None:
np_labels = to_label(np_indices)
all_labels.append(np_labels)
all_outputs.append(output.cpu().numpy())
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.log_freq == 0:
_logger.info('Predict: [{0}/{1}] Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
batch_idx, len(loader), batch_time=batch_time))
all_indices = np.concatenate(all_indices, axis=0) if all_indices else None
all_labels = np.concatenate(all_labels, axis=0) if all_labels else None
all_outputs = np.concatenate(all_outputs, axis=0).astype(np.float32)
filenames = loader.dataset.filenames(basename=not args.fullname)
output_col = args.output_col or ('prob' if use_probs else 'logit')
data_dict = {args.filename_col: filenames}
if args.results_separate_col and all_outputs.shape[-1] > 1:
if all_indices is not None:
for i in range(all_indices.shape[-1]):
data_dict[f'{args.index_col}_{i}'] = all_indices[:, i]
if all_labels is not None:
for i in range(all_labels.shape[-1]):
data_dict[f'{args.label_col}_{i}'] = all_labels[:, i]
for i in range(all_outputs.shape[-1]):
data_dict[f'{output_col}_{i}'] = all_outputs[:, i]
else:
if all_indices is not None:
if all_indices.shape[-1] == 1:
all_indices = all_indices.squeeze(-1)
data_dict[args.index_col] = list(all_indices)
if all_labels is not None:
if all_labels.shape[-1] == 1:
all_labels = all_labels.squeeze(-1)
data_dict[args.label_col] = list(all_labels)
if all_outputs.shape[-1] == 1:
all_outputs = all_outputs.squeeze(-1)
data_dict[output_col] = list(all_outputs)
df = pd.DataFrame(data=data_dict)
results_filename = args.results_file
if results_filename:
filename_no_ext, ext = os.path.splitext(results_filename)
if ext and ext in _FMT_EXT.values():
# if filename provided with one of expected ext,
# remove it as it will be added back
results_filename = filename_no_ext
else:
# base default filename on model name + img-size
img_size = data_config["input_size"][1]
results_filename = f'{args.model}-{img_size}'
if args.results_dir:
results_filename = os.path.join(args.results_dir, results_filename)
for fmt in args.results_format:
save_results(df, results_filename, fmt)
print(f'--result')
print(df.set_index(args.filename_col).to_json(orient='index', indent=4))
def save_results(df, results_filename, results_format='csv', filename_col='filename'):
results_filename += _FMT_EXT[results_format]
if results_format == 'parquet':
df.set_index(filename_col).to_parquet(results_filename)
elif results_format == 'json':
df.set_index(filename_col).to_json(results_filename, indent=4, orient='index')
elif results_format == 'json-records':
df.to_json(results_filename, lines=True, orient='records')
elif results_format == 'json-split':
df.to_json(results_filename, indent=4, orient='split', index=False)
else:
df.to_csv(results_filename, index=False)
if __name__ == '__main__':
main()
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/bulk_runner.py | #!/usr/bin/env python3
""" Bulk Model Script Runner
Run validation or benchmark script in separate process for each model
Benchmark all 'vit*' models:
python bulk_runner.py --model-list 'vit*' --results-file vit_bench.csv benchmark.py --amp -b 512
Validate all models:
python bulk_runner.py --model-list all --results-file val.csv --pretrained validate.py /imagenet/validation/ --amp -b 512 --retry
Hacked together by Ross Wightman (https://github.com/rwightman)
"""
import argparse
import os
import sys
import csv
import json
import subprocess
import time
from typing import Callable, List, Tuple, Union
from timm.models import is_model, list_models, get_pretrained_cfg
parser = argparse.ArgumentParser(description='Per-model process launcher')
# model and results args
parser.add_argument(
'--model-list', metavar='NAME', default='',
help='txt file based list of model names to benchmark')
parser.add_argument(
'--results-file', default='', type=str, metavar='FILENAME',
help='Output csv file for validation results (summary)')
parser.add_argument(
'--sort-key', default='', type=str, metavar='COL',
help='Specify sort key for results csv')
parser.add_argument(
"--pretrained", action='store_true',
help="only run models with pretrained weights")
parser.add_argument(
"--delay",
type=float,
default=0,
help="Interval, in seconds, to delay between model invocations.",
)
parser.add_argument(
"--start_method", type=str, default="spawn", choices=["spawn", "fork", "forkserver"],
help="Multiprocessing start method to use when creating workers.",
)
parser.add_argument(
"--no_python",
help="Skip prepending the script with 'python' - just execute it directly. Useful "
"when the script is not a Python script.",
)
parser.add_argument(
"-m",
"--module",
help="Change each process to interpret the launch script as a Python module, executing "
"with the same behavior as 'python -m'.",
)
# positional
parser.add_argument(
"script", type=str,
help="Full path to the program/script to be launched for each model config.",
)
parser.add_argument("script_args", nargs=argparse.REMAINDER)
def cmd_from_args(args) -> Tuple[Union[Callable, str], List[str]]:
# If ``args`` not passed, defaults to ``sys.argv[:1]``
with_python = not args.no_python
cmd: Union[Callable, str]
cmd_args = []
if with_python:
cmd = os.getenv("PYTHON_EXEC", sys.executable)
cmd_args.append("-u")
if args.module:
cmd_args.append("-m")
cmd_args.append(args.script)
else:
if args.module:
raise ValueError(
"Don't use both the '--no_python' flag"
" and the '--module' flag at the same time."
)
cmd = args.script
cmd_args.extend(args.script_args)
return cmd, cmd_args
def main():
args = parser.parse_args()
cmd, cmd_args = cmd_from_args(args)
model_cfgs = []
if args.model_list == 'all':
model_names = list_models(
pretrained=args.pretrained, # only include models w/ pretrained checkpoints if set
)
model_cfgs = [(n, None) for n in model_names]
elif args.model_list == 'all_in1k':
model_names = list_models(pretrained=True)
model_cfgs = []
for n in model_names:
pt_cfg = get_pretrained_cfg(n)
if getattr(pt_cfg, 'num_classes', 0) == 1000:
print(n, pt_cfg.num_classes)
model_cfgs.append((n, None))
elif args.model_list == 'all_res':
model_names = list_models()
model_names += list_models(pretrained=True)
model_cfgs = set()
for n in model_names:
pt_cfg = get_pretrained_cfg(n)
if pt_cfg is None:
print(f'Model {n} is missing pretrained cfg, skipping.')
continue
n = n.split('.')[0]
model_cfgs.add((n, pt_cfg.input_size[-1]))
if pt_cfg.test_input_size is not None:
model_cfgs.add((n, pt_cfg.test_input_size[-1]))
model_cfgs = [(n, {'img-size': r}) for n, r in sorted(model_cfgs)]
elif not is_model(args.model_list):
# model name doesn't exist, try as wildcard filter
model_names = list_models(args.model_list)
model_cfgs = [(n, None) for n in model_names]
if not model_cfgs and os.path.exists(args.model_list):
with open(args.model_list) as f:
model_names = [line.rstrip() for line in f]
model_cfgs = [(n, None) for n in model_names]
if len(model_cfgs):
results_file = args.results_file or './results.csv'
results = []
errors = []
model_strings = '\n'.join([f'{x[0]}, {x[1]}' for x in model_cfgs])
print(f"Running script on these models:\n {model_strings}")
if not args.sort_key:
if 'benchmark' in args.script:
if any(['train' in a for a in args.script_args]):
sort_key = 'train_samples_per_sec'
else:
sort_key = 'infer_samples_per_sec'
else:
sort_key = 'top1'
else:
sort_key = args.sort_key
print(f'Script: {args.script}, Args: {args.script_args}, Sort key: {sort_key}')
try:
for m, ax in model_cfgs:
if not m:
continue
args_str = (cmd, *[str(e) for e in cmd_args], '--model', m)
if ax is not None:
extra_args = [(f'--{k}', str(v)) for k, v in ax.items()]
extra_args = [i for t in extra_args for i in t]
args_str += tuple(extra_args)
try:
o = subprocess.check_output(args=args_str).decode('utf-8').split('--result')[-1]
r = json.loads(o)
results.append(r)
except Exception as e:
# FIXME batch_size retry loop is currently done in either validation.py or benchmark.py
# for further robustness (but more overhead), we may want to manage that by looping here...
errors.append(dict(model=m, error=str(e)))
if args.delay:
time.sleep(args.delay)
except KeyboardInterrupt as e:
pass
errors.extend(list(filter(lambda x: 'error' in x, results)))
if errors:
print(f'{len(errors)} models had errors during run.')
for e in errors:
if 'model' in e:
print(f"\t {e['model']} ({e.get('error', 'Unknown')})")
else:
print(e)
results = list(filter(lambda x: 'error' not in x, results))
no_sortkey = list(filter(lambda x: sort_key not in x, results))
if no_sortkey:
print(f'{len(no_sortkey)} results missing sort key, skipping sort.')
else:
results = sorted(results, key=lambda x: x[sort_key], reverse=True)
if len(results):
print(f'{len(results)} models run successfully. Saving results to {results_file}.')
write_results(results_file, results)
def write_results(results_file, results):
with open(results_file, mode='w') as cf:
dw = csv.DictWriter(cf, fieldnames=results[0].keys())
dw.writeheader()
for r in results:
dw.writerow(r)
cf.flush()
if __name__ == '__main__':
main()
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/onnx_export.py | """ ONNX export script
Export PyTorch models as ONNX graphs.
This export script originally started as an adaptation of code snippets found at
https://pytorch.org/tutorials/advanced/super_resolution_with_onnxruntime.html
The default parameters work with PyTorch 1.6 and ONNX 1.7 and produce an optimal ONNX graph
for hosting in the ONNX runtime (see onnx_validate.py). To export an ONNX model compatible
with caffe2 (see caffe2_benchmark.py and caffe2_validate.py), the --keep-init and --aten-fallback
flags are currently required.
Older versions of PyTorch/ONNX (tested PyTorch 1.4, ONNX 1.5) do not need extra flags for
caffe2 compatibility, but they produce a model that isn't as fast running on ONNX runtime.
Most new release of PyTorch and ONNX cause some sort of breakage in the export / usage of ONNX models.
Please do your research and search ONNX and PyTorch issue tracker before asking me. Thanks.
Copyright 2020 Ross Wightman
"""
import argparse
import timm
from timm.utils.model import reparameterize_model
from timm.utils.onnx import onnx_export
parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')
parser.add_argument('output', metavar='ONNX_FILE',
help='output model filename')
parser.add_argument('--model', '-m', metavar='MODEL', default='mobilenetv3_large_100',
help='model architecture (default: mobilenetv3_large_100)')
parser.add_argument('--opset', type=int, default=None,
help='ONNX opset to use (default: 10)')
parser.add_argument('--keep-init', action='store_true', default=False,
help='Keep initializers as input. Needed for Caffe2 compatible export in newer PyTorch/ONNX.')
parser.add_argument('--aten-fallback', action='store_true', default=False,
help='Fallback to ATEN ops. Helps fix AdaptiveAvgPool issue with Caffe2 in newer PyTorch/ONNX.')
parser.add_argument('--dynamic-size', action='store_true', default=False,
help='Export model width dynamic width/height. Not recommended for "tf" models with SAME padding.')
parser.add_argument('--check-forward', action='store_true', default=False,
help='Do a full check of torch vs onnx forward after export.')
parser.add_argument('-b', '--batch-size', default=1, type=int,
metavar='N', help='mini-batch size (default: 1)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--num-classes', type=int, default=1000,
help='Number classes in dataset')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to checkpoint (default: none)')
parser.add_argument('--reparam', default=False, action='store_true',
help='Reparameterize model')
parser.add_argument('--training', default=False, action='store_true',
help='Export in training mode (default is eval)')
parser.add_argument('--verbose', default=False, action='store_true',
help='Extra stdout output')
def main():
args = parser.parse_args()
args.pretrained = True
if args.checkpoint:
args.pretrained = False
print("==> Creating PyTorch {} model".format(args.model))
# NOTE exportable=True flag disables autofn/jit scripted activations and uses Conv2dSameExport layers
# for models using SAME padding
model = timm.create_model(
args.model,
num_classes=args.num_classes,
in_chans=3,
pretrained=args.pretrained,
checkpoint_path=args.checkpoint,
exportable=True,
)
if args.reparam:
model = reparameterize_model(model)
onnx_export(
model,
args.output,
opset=args.opset,
dynamic_size=args.dynamic_size,
aten_fallback=args.aten_fallback,
keep_initializers=args.keep_init,
check_forward=args.check_forward,
training=args.training,
verbose=args.verbose,
input_size=(3, args.img_size, args.img_size),
batch_size=args.batch_size,
)
if __name__ == '__main__':
main()
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/mkdocs.yml | site_name: 'Pytorch Image Models'
site_description: 'Pretained Image Recognition Models'
repo_name: 'rwightman/pytorch-image-models'
repo_url: 'https://github.com/rwightman/pytorch-image-models'
nav:
- index.md
- models.md
- ... | models/*.md
- results.md
- scripts.md
- training_hparam_examples.md
- feature_extraction.md
- changes.md
- archived_changes.md
theme:
name: 'material'
feature:
tabs: false
extra_javascript:
- 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-MML-AM_CHTML'
- https://cdnjs.cloudflare.com/ajax/libs/tablesort/5.2.1/tablesort.min.js
- javascripts/tables.js
markdown_extensions:
- codehilite:
linenums: true
- admonition
- pymdownx.arithmatex
- pymdownx.betterem:
smart_enable: all
- pymdownx.caret
- pymdownx.critic
- pymdownx.details
- pymdownx.emoji:
emoji_generator: !!python/name:pymdownx.emoji.to_svg
- pymdownx.inlinehilite
- pymdownx.magiclink
- pymdownx.mark
- pymdownx.smartsymbols
- pymdownx.superfences
- pymdownx.tasklist:
custom_checkbox: true
- pymdownx.tilde
- mdx_truly_sane_lists
plugins:
- search
- awesome-pages
- redirects:
redirect_maps:
'index.md': 'https://huggingface.co/docs/timm/index'
'models.md': 'https://huggingface.co/docs/timm/models'
'results.md': 'https://huggingface.co/docs/timm/results'
'scripts.md': 'https://huggingface.co/docs/timm/training_script'
'training_hparam_examples.md': 'https://huggingface.co/docs/timm/training_script#training-examples'
'feature_extraction.md': 'https://huggingface.co/docs/timm/feature_extraction'
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/README.md | # PyTorch Image Models
- [What's New](#whats-new)
- [Introduction](#introduction)
- [Models](#models)
- [Features](#features)
- [Results](#results)
- [Getting Started (Documentation)](#getting-started-documentation)
- [Train, Validation, Inference Scripts](#train-validation-inference-scripts)
- [Awesome PyTorch Resources](#awesome-pytorch-resources)
- [Licenses](#licenses)
- [Citing](#citing)
## What's New
❗Updates after Oct 10, 2022 are available in version >= 0.9❗
* Many changes since the last 0.6.x stable releases. They were previewed in 0.8.x dev releases but not everyone transitioned.
* `timm.models.layers` moved to `timm.layers`:
* `from timm.models.layers import name` will still work via deprecation mapping (but please transition to `timm.layers`).
* `import timm.models.layers.module` or `from timm.models.layers.module import name` needs to be changed now.
* Builder, helper, non-model modules in `timm.models` have a `_` prefix added, ie `timm.models.helpers` -> `timm.models._helpers`, there are temporary deprecation mapping files but those will be removed.
* All models now support `architecture.pretrained_tag` naming (ex `resnet50.rsb_a1`).
* The pretrained_tag is the specific weight variant (different head) for the architecture.
* Using only `architecture` defaults to the first weights in the default_cfgs for that model architecture.
* In adding pretrained tags, many model names that existed to differentiate were renamed to use the tag (ex: `vit_base_patch16_224_in21k` -> `vit_base_patch16_224.augreg_in21k`). There are deprecation mappings for these.
* A number of models had their checkpoints remaped to match architecture changes needed to better support `features_only=True`, there are `checkpoint_filter_fn` methods in any model module that was remapped. These can be passed to `timm.models.load_checkpoint(..., filter_fn=timm.models.swin_transformer_v2.checkpoint_filter_fn)` to remap your existing checkpoint.
* The Hugging Face Hub (https://huggingface.co/timm) is now the primary source for `timm` weights. Model cards include link to papers, original source, license.
* Previous 0.6.x can be cloned from [0.6.x](https://github.com/rwightman/pytorch-image-models/tree/0.6.x) branch or installed via pip with version.
### Jan 8, 2024
Datasets & transform refactoring
* HuggingFace streaming (iterable) dataset support (`--dataset hfids:org/dataset`)
* Webdataset wrapper tweaks for improved split info fetching, can auto fetch splits from supported HF hub webdataset
* Tested HF `datasets` and webdataset wrapper streaming from HF hub with recent `timm` ImageNet uploads to https://huggingface.co/timm
* Make input & target column/field keys consistent across datasets and pass via args
* Full monochrome support when using e:g: `--input-size 1 224 224` or `--in-chans 1`, sets PIL image conversion appropriately in dataset
* Improved several alternate crop & resize transforms (ResizeKeepRatio, RandomCropOrPad, etc) for use in PixParse document AI project
* Add SimCLR style color jitter prob along with grayscale and gaussian blur options to augmentations and args
* Allow train without validation set (`--val-split ''`) in train script
* Add `--bce-sum` (sum over class dim) and `--bce-pos-weight` (positive weighting) args for training as they're common BCE loss tweaks I was often hard coding
### Nov 23, 2023
* Added EfficientViT-Large models, thanks [SeeFun](https://github.com/seefun)
* Fix Python 3.7 compat, will be dropping support for it soon
* Other misc fixes
* Release 0.9.12
### Nov 20, 2023
* Added significant flexibility for Hugging Face Hub based timm models via `model_args` config entry. `model_args` will be passed as kwargs through to models on creation.
* See example at https://huggingface.co/gaunernst/vit_base_patch16_1024_128.audiomae_as2m_ft_as20k/blob/main/config.json
* Usage: https://github.com/huggingface/pytorch-image-models/discussions/2035
* Updated imagenet eval and test set csv files with latest models
* `vision_transformer.py` typing and doc cleanup by [Laureηt](https://github.com/Laurent2916)
* 0.9.11 release
### Nov 3, 2023
* [DFN (Data Filtering Networks)](https://huggingface.co/papers/2309.17425) and [MetaCLIP](https://huggingface.co/papers/2309.16671) ViT weights added
* DINOv2 'register' ViT model weights added (https://huggingface.co/papers/2309.16588, https://huggingface.co/papers/2304.07193)
* Add `quickgelu` ViT variants for OpenAI, DFN, MetaCLIP weights that use it (less efficient)
* Improved typing added to ResNet, MobileNet-v3 thanks to [Aryan](https://github.com/a-r-r-o-w)
* ImageNet-12k fine-tuned (from LAION-2B CLIP) `convnext_xxlarge`
* 0.9.9 release
### Oct 20, 2023
* [SigLIP](https://huggingface.co/papers/2303.15343) image tower weights supported in `vision_transformer.py`.
* Great potential for fine-tune and downstream feature use.
* Experimental 'register' support in vit models as per [Vision Transformers Need Registers](https://huggingface.co/papers/2309.16588)
* Updated RepViT with new weight release. Thanks [wangao](https://github.com/jameslahm)
* Add patch resizing support (on pretrained weight load) to Swin models
* 0.9.8 release pending
### Sep 1, 2023
* TinyViT added by [SeeFun](https://github.com/seefun)
* Fix EfficientViT (MIT) to use torch.autocast so it works back to PT 1.10
* 0.9.7 release
### Aug 28, 2023
* Add dynamic img size support to models in `vision_transformer.py`, `vision_transformer_hybrid.py`, `deit.py`, and `eva.py` w/o breaking backward compat.
* Add `dynamic_img_size=True` to args at model creation time to allow changing the grid size (interpolate abs and/or ROPE pos embed each forward pass).
* Add `dynamic_img_pad=True` to allow image sizes that aren't divisible by patch size (pad bottom right to patch size each forward pass).
* Enabling either dynamic mode will break FX tracing unless PatchEmbed module added as leaf.
* Existing method of resizing position embedding by passing different `img_size` (interpolate pretrained embed weights once) on creation still works.
* Existing method of changing `patch_size` (resize pretrained patch_embed weights once) on creation still works.
* Example validation cmd `python validate.py /imagenet --model vit_base_patch16_224 --amp --amp-dtype bfloat16 --img-size 255 --crop-pct 1.0 --model-kwargs dynamic_img_size=True dyamic_img_pad=True`
### Aug 25, 2023
* Many new models since last release
* FastViT - https://arxiv.org/abs/2303.14189
* MobileOne - https://arxiv.org/abs/2206.04040
* InceptionNeXt - https://arxiv.org/abs/2303.16900
* RepGhostNet - https://arxiv.org/abs/2211.06088 (thanks https://github.com/ChengpengChen)
* GhostNetV2 - https://arxiv.org/abs/2211.12905 (thanks https://github.com/yehuitang)
* EfficientViT (MSRA) - https://arxiv.org/abs/2305.07027 (thanks https://github.com/seefun)
* EfficientViT (MIT) - https://arxiv.org/abs/2205.14756 (thanks https://github.com/seefun)
* Add `--reparam` arg to `benchmark.py`, `onnx_export.py`, and `validate.py` to trigger layer reparameterization / fusion for models with any one of `reparameterize()`, `switch_to_deploy()` or `fuse()`
* Including FastViT, MobileOne, RepGhostNet, EfficientViT (MSRA), RepViT, RepVGG, and LeViT
* Preparing 0.9.6 'back to school' release
### Aug 11, 2023
* Swin, MaxViT, CoAtNet, and BEiT models support resizing of image/window size on creation with adaptation of pretrained weights
* Example validation cmd to test w/ non-square resize `python validate.py /imagenet --model swin_base_patch4_window7_224.ms_in22k_ft_in1k --amp --amp-dtype bfloat16 --input-size 3 256 320 --model-kwargs window_size=8,10 img_size=256,320`
### Aug 3, 2023
* Add GluonCV weights for HRNet w18_small and w18_small_v2. Converted by [SeeFun](https://github.com/seefun)
* Fix `selecsls*` model naming regression
* Patch and position embedding for ViT/EVA works for bfloat16/float16 weights on load (or activations for on-the-fly resize)
* v0.9.5 release prep
### July 27, 2023
* Added timm trained `seresnextaa201d_32x8d.sw_in12k_ft_in1k_384` weights (and `.sw_in12k` pretrain) with 87.3% top-1 on ImageNet-1k, best ImageNet ResNet family model I'm aware of.
* RepViT model and weights (https://arxiv.org/abs/2307.09283) added by [wangao](https://github.com/jameslahm)
* I-JEPA ViT feature weights (no classifier) added by [SeeFun](https://github.com/seefun)
* SAM-ViT (segment anything) feature weights (no classifier) added by [SeeFun](https://github.com/seefun)
* Add support for alternative feat extraction methods and -ve indices to EfficientNet
* Add NAdamW optimizer
* Misc fixes
### May 11, 2023
* `timm` 0.9 released, transition from 0.8.xdev releases
### May 10, 2023
* Hugging Face Hub downloading is now default, 1132 models on https://huggingface.co/timm, 1163 weights in `timm`
* DINOv2 vit feature backbone weights added thanks to [Leng Yue](https://github.com/leng-yue)
* FB MAE vit feature backbone weights added
* OpenCLIP DataComp-XL L/14 feat backbone weights added
* MetaFormer (poolformer-v2, caformer, convformer, updated poolformer (v1)) w/ weights added by [Fredo Guan](https://github.com/fffffgggg54)
* Experimental `get_intermediate_layers` function on vit/deit models for grabbing hidden states (inspired by DINO impl). This is WIP and may change significantly... feedback welcome.
* Model creation throws error if `pretrained=True` and no weights exist (instead of continuing with random initialization)
* Fix regression with inception / nasnet TF sourced weights with 1001 classes in original classifiers
* bitsandbytes (https://github.com/TimDettmers/bitsandbytes) optimizers added to factory, use `bnb` prefix, ie `bnbadam8bit`
* Misc cleanup and fixes
* Final testing before switching to a 0.9 and bringing `timm` out of pre-release state
### April 27, 2023
* 97% of `timm` models uploaded to HF Hub and almost all updated to support multi-weight pretrained configs
* Minor cleanup and refactoring of another batch of models as multi-weight added. More fused_attn (F.sdpa) and features_only support, and torchscript fixes.
### April 21, 2023
* Gradient accumulation support added to train script and tested (`--grad-accum-steps`), thanks [Taeksang Kim](https://github.com/voidbag)
* More weights on HF Hub (cspnet, cait, volo, xcit, tresnet, hardcorenas, densenet, dpn, vovnet, xception_aligned)
* Added `--head-init-scale` and `--head-init-bias` to train.py to scale classiifer head and set fixed bias for fine-tune
* Remove all InplaceABN (`inplace_abn`) use, replaced use in tresnet with standard BatchNorm (modified weights accordingly).
### April 12, 2023
* Add ONNX export script, validate script, helpers that I've had kicking around for along time. Tweak 'same' padding for better export w/ recent ONNX + pytorch.
* Refactor dropout args for vit and vit-like models, separate drop_rate into `drop_rate` (classifier dropout), `proj_drop_rate` (block mlp / out projections), `pos_drop_rate` (position embedding drop), `attn_drop_rate` (attention dropout). Also add patch dropout (FLIP) to vit and eva models.
* fused F.scaled_dot_product_attention support to more vit models, add env var (TIMM_FUSED_ATTN) to control, and config interface to enable/disable
* Add EVA-CLIP backbones w/ image tower weights, all the way up to 4B param 'enormous' model, and 336x336 OpenAI ViT mode that was missed.
### April 5, 2023
* ALL ResNet models pushed to Hugging Face Hub with multi-weight support
* All past `timm` trained weights added with recipe based tags to differentiate
* All ResNet strikes back A1/A2/A3 (seed 0) and R50 example B/C1/C2/D weights available
* Add torchvision v2 recipe weights to existing torchvision originals
* See comparison table in https://huggingface.co/timm/seresnextaa101d_32x8d.sw_in12k_ft_in1k_288#model-comparison
* New ImageNet-12k + ImageNet-1k fine-tunes available for a few anti-aliased ResNet models
* `resnetaa50d.sw_in12k_ft_in1k` - 81.7 @ 224, 82.6 @ 288
* `resnetaa101d.sw_in12k_ft_in1k` - 83.5 @ 224, 84.1 @ 288
* `seresnextaa101d_32x8d.sw_in12k_ft_in1k` - 86.0 @ 224, 86.5 @ 288
* `seresnextaa101d_32x8d.sw_in12k_ft_in1k_288` - 86.5 @ 288, 86.7 @ 320
### March 31, 2023
* Add first ConvNext-XXLarge CLIP -> IN-1k fine-tune and IN-12k intermediate fine-tunes for convnext-base/large CLIP models.
| model |top1 |top5 |img_size|param_count|gmacs |macts |
|----------------------------------------------------------------------------------------------------------------------|------|------|--------|-----------|------|------|
| [convnext_xxlarge.clip_laion2b_soup_ft_in1k](https://huggingface.co/timm/convnext_xxlarge.clip_laion2b_soup_ft_in1k) |88.612|98.704|256 |846.47 |198.09|124.45|
| convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_384 |88.312|98.578|384 |200.13 |101.11|126.74|
| convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_320 |87.968|98.47 |320 |200.13 |70.21 |88.02 |
| convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384 |87.138|98.212|384 |88.59 |45.21 |84.49 |
| convnext_base.clip_laion2b_augreg_ft_in12k_in1k |86.344|97.97 |256 |88.59 |20.09 |37.55 |
* Add EVA-02 MIM pretrained and fine-tuned weights, push to HF hub and update model cards for all EVA models. First model over 90% top-1 (99% top-5)! Check out the original code & weights at https://github.com/baaivision/EVA for more details on their work blending MIM, CLIP w/ many model, dataset, and train recipe tweaks.
| model |top1 |top5 |param_count|img_size|
|----------------------------------------------------|------|------|-----------|--------|
| [eva02_large_patch14_448.mim_m38m_ft_in22k_in1k](https://huggingface.co/timm/eva02_large_patch14_448.mim_m38m_ft_in1k) |90.054|99.042|305.08 |448 |
| eva02_large_patch14_448.mim_in22k_ft_in22k_in1k |89.946|99.01 |305.08 |448 |
| eva_giant_patch14_560.m30m_ft_in22k_in1k |89.792|98.992|1014.45 |560 |
| eva02_large_patch14_448.mim_in22k_ft_in1k |89.626|98.954|305.08 |448 |
| eva02_large_patch14_448.mim_m38m_ft_in1k |89.57 |98.918|305.08 |448 |
| eva_giant_patch14_336.m30m_ft_in22k_in1k |89.56 |98.956|1013.01 |336 |
| eva_giant_patch14_336.clip_ft_in1k |89.466|98.82 |1013.01 |336 |
| eva_large_patch14_336.in22k_ft_in22k_in1k |89.214|98.854|304.53 |336 |
| eva_giant_patch14_224.clip_ft_in1k |88.882|98.678|1012.56 |224 |
| eva02_base_patch14_448.mim_in22k_ft_in22k_in1k |88.692|98.722|87.12 |448 |
| eva_large_patch14_336.in22k_ft_in1k |88.652|98.722|304.53 |336 |
| eva_large_patch14_196.in22k_ft_in22k_in1k |88.592|98.656|304.14 |196 |
| eva02_base_patch14_448.mim_in22k_ft_in1k |88.23 |98.564|87.12 |448 |
| eva_large_patch14_196.in22k_ft_in1k |87.934|98.504|304.14 |196 |
| eva02_small_patch14_336.mim_in22k_ft_in1k |85.74 |97.614|22.13 |336 |
| eva02_tiny_patch14_336.mim_in22k_ft_in1k |80.658|95.524|5.76 |336 |
* Multi-weight and HF hub for DeiT and MLP-Mixer based models
### March 22, 2023
* More weights pushed to HF hub along with multi-weight support, including: `regnet.py`, `rexnet.py`, `byobnet.py`, `resnetv2.py`, `swin_transformer.py`, `swin_transformer_v2.py`, `swin_transformer_v2_cr.py`
* Swin Transformer models support feature extraction (NCHW feat maps for `swinv2_cr_*`, and NHWC for all others) and spatial embedding outputs.
* FocalNet (from https://github.com/microsoft/FocalNet) models and weights added with significant refactoring, feature extraction, no fixed resolution / sizing constraint
* RegNet weights increased with HF hub push, SWAG, SEER, and torchvision v2 weights. SEER is pretty poor wrt to performance for model size, but possibly useful.
* More ImageNet-12k pretrained and 1k fine-tuned `timm` weights:
* `rexnetr_200.sw_in12k_ft_in1k` - 82.6 @ 224, 83.2 @ 288
* `rexnetr_300.sw_in12k_ft_in1k` - 84.0 @ 224, 84.5 @ 288
* `regnety_120.sw_in12k_ft_in1k` - 85.0 @ 224, 85.4 @ 288
* `regnety_160.lion_in12k_ft_in1k` - 85.6 @ 224, 86.0 @ 288
* `regnety_160.sw_in12k_ft_in1k` - 85.6 @ 224, 86.0 @ 288 (compare to SWAG PT + 1k FT this is same BUT much lower res, blows SEER FT away)
* Model name deprecation + remapping functionality added (a milestone for bringing 0.8.x out of pre-release). Mappings being added...
* Minor bug fixes and improvements.
### Feb 26, 2023
* Add ConvNeXt-XXLarge CLIP pretrained image tower weights for fine-tune & features (fine-tuning TBD) -- see [model card](https://huggingface.co/laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-soup)
* Update `convnext_xxlarge` default LayerNorm eps to 1e-5 (for CLIP weights, improved stability)
* 0.8.15dev0
### Feb 20, 2023
* Add 320x320 `convnext_large_mlp.clip_laion2b_ft_320` and `convnext_lage_mlp.clip_laion2b_ft_soup_320` CLIP image tower weights for features & fine-tune
* 0.8.13dev0 pypi release for latest changes w/ move to huggingface org
### Feb 16, 2023
* `safetensor` checkpoint support added
* Add ideas from 'Scaling Vision Transformers to 22 B. Params' (https://arxiv.org/abs/2302.05442) -- qk norm, RmsNorm, parallel block
* Add F.scaled_dot_product_attention support (PyTorch 2.0 only) to `vit_*`, `vit_relpos*`, `coatnet` / `maxxvit` (to start)
* Lion optimizer (w/ multi-tensor option) added (https://arxiv.org/abs/2302.06675)
* gradient checkpointing works with `features_only=True`
### Feb 7, 2023
* New inference benchmark numbers added in [results](results/) folder.
* Add convnext LAION CLIP trained weights and initial set of in1k fine-tunes
* `convnext_base.clip_laion2b_augreg_ft_in1k` - 86.2% @ 256x256
* `convnext_base.clip_laiona_augreg_ft_in1k_384` - 86.5% @ 384x384
* `convnext_large_mlp.clip_laion2b_augreg_ft_in1k` - 87.3% @ 256x256
* `convnext_large_mlp.clip_laion2b_augreg_ft_in1k_384` - 87.9% @ 384x384
* Add DaViT models. Supports `features_only=True`. Adapted from https://github.com/dingmyu/davit by [Fredo](https://github.com/fffffgggg54).
* Use a common NormMlpClassifierHead across MaxViT, ConvNeXt, DaViT
* Add EfficientFormer-V2 model, update EfficientFormer, and refactor LeViT (closely related architectures). Weights on HF hub.
* New EfficientFormer-V2 arch, significant refactor from original at (https://github.com/snap-research/EfficientFormer). Supports `features_only=True`.
* Minor updates to EfficientFormer.
* Refactor LeViT models to stages, add `features_only=True` support to new `conv` variants, weight remap required.
* Move ImageNet meta-data (synsets, indices) from `/results` to [`timm/data/_info`](timm/data/_info/).
* Add ImageNetInfo / DatasetInfo classes to provide labelling for various ImageNet classifier layouts in `timm`
* Update `inference.py` to use, try: `python inference.py /folder/to/images --model convnext_small.in12k --label-type detail --topk 5`
* Ready for 0.8.10 pypi pre-release (final testing).
### Jan 20, 2023
* Add two convnext 12k -> 1k fine-tunes at 384x384
* `convnext_tiny.in12k_ft_in1k_384` - 85.1 @ 384
* `convnext_small.in12k_ft_in1k_384` - 86.2 @ 384
* Push all MaxxViT weights to HF hub, and add new ImageNet-12k -> 1k fine-tunes for `rw` base MaxViT and CoAtNet 1/2 models
|model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)|
|------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:|
|[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22|
|[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76|
|[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99|
|[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15|
|[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84|
|[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90|
|[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95|
|[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74|
|[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43|
|[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64|
|[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77|
|[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99|
|[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22|
|[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15|
|[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78|
|[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90|
|[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84|
|[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77|
|[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59|
|[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65|
|[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42|
|[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35|
|[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13|
|[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01|
|[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38|
|[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78|
|[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30|
|[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17|
|[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92|
|[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60|
|[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11|
|[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78|
|[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47|
|[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05|
|[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05|
|[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92|
|[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28|
|[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04|
|[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73|
|[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34|
|[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80|
|[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41|
|[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86|
### Jan 11, 2023
* Update ConvNeXt ImageNet-12k pretrain series w/ two new fine-tuned weights (and pre FT `.in12k` tags)
* `convnext_nano.in12k_ft_in1k` - 82.3 @ 224, 82.9 @ 288 (previously released)
* `convnext_tiny.in12k_ft_in1k` - 84.2 @ 224, 84.5 @ 288
* `convnext_small.in12k_ft_in1k` - 85.2 @ 224, 85.3 @ 288
### Jan 6, 2023
* Finally got around to adding `--model-kwargs` and `--opt-kwargs` to scripts to pass through rare args directly to model classes from cmd line
* `train.py /imagenet --model resnet50 --amp --model-kwargs output_stride=16 act_layer=silu`
* `train.py /imagenet --model vit_base_patch16_clip_224 --img-size 240 --amp --model-kwargs img_size=240 patch_size=12`
* Cleanup some popular models to better support arg passthrough / merge with model configs, more to go.
### Jan 5, 2023
* ConvNeXt-V2 models and weights added to existing `convnext.py`
* Paper: [ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](http://arxiv.org/abs/2301.00808)
* Reference impl: https://github.com/facebookresearch/ConvNeXt-V2 (NOTE: weights currently CC-BY-NC)
### Dec 23, 2022 🎄☃
* Add FlexiViT models and weights from https://github.com/google-research/big_vision (check out paper at https://arxiv.org/abs/2212.08013)
* NOTE currently resizing is static on model creation, on-the-fly dynamic / train patch size sampling is a WIP
* Many more models updated to multi-weight and downloadable via HF hub now (convnext, efficientnet, mobilenet, vision_transformer*, beit)
* More model pretrained tag and adjustments, some model names changed (working on deprecation translations, consider main branch DEV branch right now, use 0.6.x for stable use)
* More ImageNet-12k (subset of 22k) pretrain models popping up:
* `efficientnet_b5.in12k_ft_in1k` - 85.9 @ 448x448
* `vit_medium_patch16_gap_384.in12k_ft_in1k` - 85.5 @ 384x384
* `vit_medium_patch16_gap_256.in12k_ft_in1k` - 84.5 @ 256x256
* `convnext_nano.in12k_ft_in1k` - 82.9 @ 288x288
### Dec 8, 2022
* Add 'EVA l' to `vision_transformer.py`, MAE style ViT-L/14 MIM pretrain w/ EVA-CLIP targets, FT on ImageNet-1k (w/ ImageNet-22k intermediate for some)
* original source: https://github.com/baaivision/EVA
| model | top1 | param_count | gmac | macts | hub |
|:------------------------------------------|-----:|------------:|------:|------:|:----------------------------------------|
| eva_large_patch14_336.in22k_ft_in22k_in1k | 89.2 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/BAAI/EVA) |
| eva_large_patch14_336.in22k_ft_in1k | 88.7 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/BAAI/EVA) |
| eva_large_patch14_196.in22k_ft_in22k_in1k | 88.6 | 304.1 | 61.6 | 63.5 | [link](https://huggingface.co/BAAI/EVA) |
| eva_large_patch14_196.in22k_ft_in1k | 87.9 | 304.1 | 61.6 | 63.5 | [link](https://huggingface.co/BAAI/EVA) |
### Dec 6, 2022
* Add 'EVA g', BEiT style ViT-g/14 model weights w/ both MIM pretrain and CLIP pretrain to `beit.py`.
* original source: https://github.com/baaivision/EVA
* paper: https://arxiv.org/abs/2211.07636
| model | top1 | param_count | gmac | macts | hub |
|:-----------------------------------------|-------:|--------------:|-------:|--------:|:----------------------------------------|
| eva_giant_patch14_560.m30m_ft_in22k_in1k | 89.8 | 1014.4 | 1906.8 | 2577.2 | [link](https://huggingface.co/BAAI/EVA) |
| eva_giant_patch14_336.m30m_ft_in22k_in1k | 89.6 | 1013 | 620.6 | 550.7 | [link](https://huggingface.co/BAAI/EVA) |
| eva_giant_patch14_336.clip_ft_in1k | 89.4 | 1013 | 620.6 | 550.7 | [link](https://huggingface.co/BAAI/EVA) |
| eva_giant_patch14_224.clip_ft_in1k | 89.1 | 1012.6 | 267.2 | 192.6 | [link](https://huggingface.co/BAAI/EVA) |
### Dec 5, 2022
* Pre-release (`0.8.0dev0`) of multi-weight support (`model_arch.pretrained_tag`). Install with `pip install --pre timm`
* vision_transformer, maxvit, convnext are the first three model impl w/ support
* model names are changing with this (previous _21k, etc. fn will merge), still sorting out deprecation handling
* bugs are likely, but I need feedback so please try it out
* if stability is needed, please use 0.6.x pypi releases or clone from [0.6.x branch](https://github.com/rwightman/pytorch-image-models/tree/0.6.x)
* Support for PyTorch 2.0 compile is added in train/validate/inference/benchmark, use `--torchcompile` argument
* Inference script allows more control over output, select k for top-class index + prob json, csv or parquet output
* Add a full set of fine-tuned CLIP image tower weights from both LAION-2B and original OpenAI CLIP models
| model | top1 | param_count | gmac | macts | hub |
|:-------------------------------------------------|-------:|--------------:|-------:|--------:|:-------------------------------------------------------------------------------------|
| vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k | 88.6 | 632.5 | 391 | 407.5 | [link](https://huggingface.co/timm/vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k) |
| vit_large_patch14_clip_336.openai_ft_in12k_in1k | 88.3 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/timm/vit_large_patch14_clip_336.openai_ft_in12k_in1k) |
| vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k | 88.2 | 632 | 167.4 | 139.4 | [link](https://huggingface.co/timm/vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k) |
| vit_large_patch14_clip_336.laion2b_ft_in12k_in1k | 88.2 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/timm/vit_large_patch14_clip_336.laion2b_ft_in12k_in1k) |
| vit_large_patch14_clip_224.openai_ft_in12k_in1k | 88.2 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.openai_ft_in12k_in1k) |
| vit_large_patch14_clip_224.laion2b_ft_in12k_in1k | 87.9 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.laion2b_ft_in12k_in1k) |
| vit_large_patch14_clip_224.openai_ft_in1k | 87.9 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.openai_ft_in1k) |
| vit_large_patch14_clip_336.laion2b_ft_in1k | 87.9 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/timm/vit_large_patch14_clip_336.laion2b_ft_in1k) |
| vit_huge_patch14_clip_224.laion2b_ft_in1k | 87.6 | 632 | 167.4 | 139.4 | [link](https://huggingface.co/timm/vit_huge_patch14_clip_224.laion2b_ft_in1k) |
| vit_large_patch14_clip_224.laion2b_ft_in1k | 87.3 | 304.2 | 81.1 | 88.8 | [link](https://huggingface.co/timm/vit_large_patch14_clip_224.laion2b_ft_in1k) |
| vit_base_patch16_clip_384.laion2b_ft_in12k_in1k | 87.2 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.laion2b_ft_in12k_in1k) |
| vit_base_patch16_clip_384.openai_ft_in12k_in1k | 87 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.openai_ft_in12k_in1k) |
| vit_base_patch16_clip_384.laion2b_ft_in1k | 86.6 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.laion2b_ft_in1k) |
| vit_base_patch16_clip_384.openai_ft_in1k | 86.2 | 86.9 | 55.5 | 101.6 | [link](https://huggingface.co/timm/vit_base_patch16_clip_384.openai_ft_in1k) |
| vit_base_patch16_clip_224.laion2b_ft_in12k_in1k | 86.2 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.laion2b_ft_in12k_in1k) |
| vit_base_patch16_clip_224.openai_ft_in12k_in1k | 85.9 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.openai_ft_in12k_in1k) |
| vit_base_patch32_clip_448.laion2b_ft_in12k_in1k | 85.8 | 88.3 | 17.9 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch32_clip_448.laion2b_ft_in12k_in1k) |
| vit_base_patch16_clip_224.laion2b_ft_in1k | 85.5 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.laion2b_ft_in1k) |
| vit_base_patch32_clip_384.laion2b_ft_in12k_in1k | 85.4 | 88.3 | 13.1 | 16.5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_384.laion2b_ft_in12k_in1k) |
| vit_base_patch16_clip_224.openai_ft_in1k | 85.3 | 86.6 | 17.6 | 23.9 | [link](https://huggingface.co/timm/vit_base_patch16_clip_224.openai_ft_in1k) |
| vit_base_patch32_clip_384.openai_ft_in12k_in1k | 85.2 | 88.3 | 13.1 | 16.5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_384.openai_ft_in12k_in1k) |
| vit_base_patch32_clip_224.laion2b_ft_in12k_in1k | 83.3 | 88.2 | 4.4 | 5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_224.laion2b_ft_in12k_in1k) |
| vit_base_patch32_clip_224.laion2b_ft_in1k | 82.6 | 88.2 | 4.4 | 5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_224.laion2b_ft_in1k) |
| vit_base_patch32_clip_224.openai_ft_in1k | 81.9 | 88.2 | 4.4 | 5 | [link](https://huggingface.co/timm/vit_base_patch32_clip_224.openai_ft_in1k) |
* Port of MaxViT Tensorflow Weights from official impl at https://github.com/google-research/maxvit
* There was larger than expected drops for the upscaled 384/512 in21k fine-tune weights, possible detail missing, but the 21k FT did seem sensitive to small preprocessing
| model | top1 | param_count | gmac | macts | hub |
|:-----------------------------------|-------:|--------------:|-------:|--------:|:-----------------------------------------------------------------------|
| maxvit_xlarge_tf_512.in21k_ft_in1k | 88.5 | 475.8 | 534.1 | 1413.2 | [link](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |
| maxvit_xlarge_tf_384.in21k_ft_in1k | 88.3 | 475.3 | 292.8 | 668.8 | [link](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |
| maxvit_base_tf_512.in21k_ft_in1k | 88.2 | 119.9 | 138 | 704 | [link](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |
| maxvit_large_tf_512.in21k_ft_in1k | 88 | 212.3 | 244.8 | 942.2 | [link](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |
| maxvit_large_tf_384.in21k_ft_in1k | 88 | 212 | 132.6 | 445.8 | [link](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |
| maxvit_base_tf_384.in21k_ft_in1k | 87.9 | 119.6 | 73.8 | 332.9 | [link](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |
| maxvit_base_tf_512.in1k | 86.6 | 119.9 | 138 | 704 | [link](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |
| maxvit_large_tf_512.in1k | 86.5 | 212.3 | 244.8 | 942.2 | [link](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |
| maxvit_base_tf_384.in1k | 86.3 | 119.6 | 73.8 | 332.9 | [link](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |
| maxvit_large_tf_384.in1k | 86.2 | 212 | 132.6 | 445.8 | [link](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |
| maxvit_small_tf_512.in1k | 86.1 | 69.1 | 67.3 | 383.8 | [link](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |
| maxvit_tiny_tf_512.in1k | 85.7 | 31 | 33.5 | 257.6 | [link](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |
| maxvit_small_tf_384.in1k | 85.5 | 69 | 35.9 | 183.6 | [link](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |
| maxvit_tiny_tf_384.in1k | 85.1 | 31 | 17.5 | 123.4 | [link](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |
| maxvit_large_tf_224.in1k | 84.9 | 211.8 | 43.7 | 127.4 | [link](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |
| maxvit_base_tf_224.in1k | 84.9 | 119.5 | 24 | 95 | [link](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |
| maxvit_small_tf_224.in1k | 84.4 | 68.9 | 11.7 | 53.2 | [link](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |
| maxvit_tiny_tf_224.in1k | 83.4 | 30.9 | 5.6 | 35.8 | [link](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |
### Oct 15, 2022
* Train and validation script enhancements
* Non-GPU (ie CPU) device support
* SLURM compatibility for train script
* HF datasets support (via ReaderHfds)
* TFDS/WDS dataloading improvements (sample padding/wrap for distributed use fixed wrt sample count estimate)
* in_chans !=3 support for scripts / loader
* Adan optimizer
* Can enable per-step LR scheduling via args
* Dataset 'parsers' renamed to 'readers', more descriptive of purpose
* AMP args changed, APEX via `--amp-impl apex`, bfloat16 supportedf via `--amp-dtype bfloat16`
* main branch switched to 0.7.x version, 0.6x forked for stable release of weight only adds
* master -> main branch rename
### Oct 10, 2022
* More weights in `maxxvit` series, incl first ConvNeXt block based `coatnext` and `maxxvit` experiments:
* `coatnext_nano_rw_224` - 82.0 @ 224 (G) -- (uses ConvNeXt conv block, no BatchNorm)
* `maxxvit_rmlp_nano_rw_256` - 83.0 @ 256, 83.7 @ 320 (G) (uses ConvNeXt conv block, no BN)
* `maxvit_rmlp_small_rw_224` - 84.5 @ 224, 85.1 @ 320 (G)
* `maxxvit_rmlp_small_rw_256` - 84.6 @ 256, 84.9 @ 288 (G) -- could be trained better, hparams need tuning (uses ConvNeXt block, no BN)
* `coatnet_rmlp_2_rw_224` - 84.6 @ 224, 85 @ 320 (T)
* NOTE: official MaxVit weights (in1k) have been released at https://github.com/google-research/maxvit -- some extra work is needed to port and adapt since my impl was created independently of theirs and has a few small differences + the whole TF same padding fun.
### Sept 23, 2022
* LAION-2B CLIP image towers supported as pretrained backbones for fine-tune or features (no classifier)
* vit_base_patch32_224_clip_laion2b
* vit_large_patch14_224_clip_laion2b
* vit_huge_patch14_224_clip_laion2b
* vit_giant_patch14_224_clip_laion2b
### Sept 7, 2022
* Hugging Face [`timm` docs](https://huggingface.co/docs/hub/timm) home now exists, look for more here in the future
* Add BEiT-v2 weights for base and large 224x224 models from https://github.com/microsoft/unilm/tree/master/beit2
* Add more weights in `maxxvit` series incl a `pico` (7.5M params, 1.9 GMACs), two `tiny` variants:
* `maxvit_rmlp_pico_rw_256` - 80.5 @ 256, 81.3 @ 320 (T)
* `maxvit_tiny_rw_224` - 83.5 @ 224 (G)
* `maxvit_rmlp_tiny_rw_256` - 84.2 @ 256, 84.8 @ 320 (T)
## Introduction
Py**T**orch **Im**age **M**odels (`timm`) is a collection of image models, layers, utilities, optimizers, schedulers, data-loaders / augmentations, and reference training / validation scripts that aim to pull together a wide variety of SOTA models with ability to reproduce ImageNet training results.
The work of many others is present here. I've tried to make sure all source material is acknowledged via links to github, arxiv papers, etc in the README, documentation, and code docstrings. Please let me know if I missed anything.
## Models
All model architecture families include variants with pretrained weights. There are specific model variants without any weights, it is NOT a bug. Help training new or better weights is always appreciated.
* Aggregating Nested Transformers - https://arxiv.org/abs/2105.12723
* BEiT - https://arxiv.org/abs/2106.08254
* Big Transfer ResNetV2 (BiT) - https://arxiv.org/abs/1912.11370
* Bottleneck Transformers - https://arxiv.org/abs/2101.11605
* CaiT (Class-Attention in Image Transformers) - https://arxiv.org/abs/2103.17239
* CoaT (Co-Scale Conv-Attentional Image Transformers) - https://arxiv.org/abs/2104.06399
* CoAtNet (Convolution and Attention) - https://arxiv.org/abs/2106.04803
* ConvNeXt - https://arxiv.org/abs/2201.03545
* ConvNeXt-V2 - http://arxiv.org/abs/2301.00808
* ConViT (Soft Convolutional Inductive Biases Vision Transformers)- https://arxiv.org/abs/2103.10697
* CspNet (Cross-Stage Partial Networks) - https://arxiv.org/abs/1911.11929
* DeiT - https://arxiv.org/abs/2012.12877
* DeiT-III - https://arxiv.org/pdf/2204.07118.pdf
* DenseNet - https://arxiv.org/abs/1608.06993
* DLA - https://arxiv.org/abs/1707.06484
* DPN (Dual-Path Network) - https://arxiv.org/abs/1707.01629
* EdgeNeXt - https://arxiv.org/abs/2206.10589
* EfficientFormer - https://arxiv.org/abs/2206.01191
* EfficientNet (MBConvNet Family)
* EfficientNet NoisyStudent (B0-B7, L2) - https://arxiv.org/abs/1911.04252
* EfficientNet AdvProp (B0-B8) - https://arxiv.org/abs/1911.09665
* EfficientNet (B0-B7) - https://arxiv.org/abs/1905.11946
* EfficientNet-EdgeTPU (S, M, L) - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html
* EfficientNet V2 - https://arxiv.org/abs/2104.00298
* FBNet-C - https://arxiv.org/abs/1812.03443
* MixNet - https://arxiv.org/abs/1907.09595
* MNASNet B1, A1 (Squeeze-Excite), and Small - https://arxiv.org/abs/1807.11626
* MobileNet-V2 - https://arxiv.org/abs/1801.04381
* Single-Path NAS - https://arxiv.org/abs/1904.02877
* TinyNet - https://arxiv.org/abs/2010.14819
* EfficientViT (MIT) - https://arxiv.org/abs/2205.14756
* EfficientViT (MSRA) - https://arxiv.org/abs/2305.07027
* EVA - https://arxiv.org/abs/2211.07636
* EVA-02 - https://arxiv.org/abs/2303.11331
* FastViT - https://arxiv.org/abs/2303.14189
* FlexiViT - https://arxiv.org/abs/2212.08013
* FocalNet (Focal Modulation Networks) - https://arxiv.org/abs/2203.11926
* GCViT (Global Context Vision Transformer) - https://arxiv.org/abs/2206.09959
* GhostNet - https://arxiv.org/abs/1911.11907
* GhostNet-V2 - https://arxiv.org/abs/2211.12905
* gMLP - https://arxiv.org/abs/2105.08050
* GPU-Efficient Networks - https://arxiv.org/abs/2006.14090
* Halo Nets - https://arxiv.org/abs/2103.12731
* HRNet - https://arxiv.org/abs/1908.07919
* InceptionNeXt - https://arxiv.org/abs/2303.16900
* Inception-V3 - https://arxiv.org/abs/1512.00567
* Inception-ResNet-V2 and Inception-V4 - https://arxiv.org/abs/1602.07261
* Lambda Networks - https://arxiv.org/abs/2102.08602
* LeViT (Vision Transformer in ConvNet's Clothing) - https://arxiv.org/abs/2104.01136
* MaxViT (Multi-Axis Vision Transformer) - https://arxiv.org/abs/2204.01697
* MetaFormer (PoolFormer-v2, ConvFormer, CAFormer) - https://arxiv.org/abs/2210.13452
* MLP-Mixer - https://arxiv.org/abs/2105.01601
* MobileNet-V3 (MBConvNet w/ Efficient Head) - https://arxiv.org/abs/1905.02244
* FBNet-V3 - https://arxiv.org/abs/2006.02049
* HardCoRe-NAS - https://arxiv.org/abs/2102.11646
* LCNet - https://arxiv.org/abs/2109.15099
* MobileOne - https://arxiv.org/abs/2206.04040
* MobileViT - https://arxiv.org/abs/2110.02178
* MobileViT-V2 - https://arxiv.org/abs/2206.02680
* MViT-V2 (Improved Multiscale Vision Transformer) - https://arxiv.org/abs/2112.01526
* NASNet-A - https://arxiv.org/abs/1707.07012
* NesT - https://arxiv.org/abs/2105.12723
* NFNet-F - https://arxiv.org/abs/2102.06171
* NF-RegNet / NF-ResNet - https://arxiv.org/abs/2101.08692
* PNasNet - https://arxiv.org/abs/1712.00559
* PoolFormer (MetaFormer) - https://arxiv.org/abs/2111.11418
* Pooling-based Vision Transformer (PiT) - https://arxiv.org/abs/2103.16302
* PVT-V2 (Improved Pyramid Vision Transformer) - https://arxiv.org/abs/2106.13797
* RegNet - https://arxiv.org/abs/2003.13678
* RegNetZ - https://arxiv.org/abs/2103.06877
* RepVGG - https://arxiv.org/abs/2101.03697
* RepGhostNet - https://arxiv.org/abs/2211.06088
* RepViT - https://arxiv.org/abs/2307.09283
* ResMLP - https://arxiv.org/abs/2105.03404
* ResNet/ResNeXt
* ResNet (v1b/v1.5) - https://arxiv.org/abs/1512.03385
* ResNeXt - https://arxiv.org/abs/1611.05431
* 'Bag of Tricks' / Gluon C, D, E, S variations - https://arxiv.org/abs/1812.01187
* Weakly-supervised (WSL) Instagram pretrained / ImageNet tuned ResNeXt101 - https://arxiv.org/abs/1805.00932
* Semi-supervised (SSL) / Semi-weakly Supervised (SWSL) ResNet/ResNeXts - https://arxiv.org/abs/1905.00546
* ECA-Net (ECAResNet) - https://arxiv.org/abs/1910.03151v4
* Squeeze-and-Excitation Networks (SEResNet) - https://arxiv.org/abs/1709.01507
* ResNet-RS - https://arxiv.org/abs/2103.07579
* Res2Net - https://arxiv.org/abs/1904.01169
* ResNeSt - https://arxiv.org/abs/2004.08955
* ReXNet - https://arxiv.org/abs/2007.00992
* SelecSLS - https://arxiv.org/abs/1907.00837
* Selective Kernel Networks - https://arxiv.org/abs/1903.06586
* Sequencer2D - https://arxiv.org/abs/2205.01972
* Swin S3 (AutoFormerV2) - https://arxiv.org/abs/2111.14725
* Swin Transformer - https://arxiv.org/abs/2103.14030
* Swin Transformer V2 - https://arxiv.org/abs/2111.09883
* Transformer-iN-Transformer (TNT) - https://arxiv.org/abs/2103.00112
* TResNet - https://arxiv.org/abs/2003.13630
* Twins (Spatial Attention in Vision Transformers) - https://arxiv.org/pdf/2104.13840.pdf
* Visformer - https://arxiv.org/abs/2104.12533
* Vision Transformer - https://arxiv.org/abs/2010.11929
* VOLO (Vision Outlooker) - https://arxiv.org/abs/2106.13112
* VovNet V2 and V1 - https://arxiv.org/abs/1911.06667
* Xception - https://arxiv.org/abs/1610.02357
* Xception (Modified Aligned, Gluon) - https://arxiv.org/abs/1802.02611
* Xception (Modified Aligned, TF) - https://arxiv.org/abs/1802.02611
* XCiT (Cross-Covariance Image Transformers) - https://arxiv.org/abs/2106.09681
## Features
Several (less common) features that I often utilize in my projects are included. Many of their additions are the reason why I maintain my own set of models, instead of using others' via PIP:
* All models have a common default configuration interface and API for
* accessing/changing the classifier - `get_classifier` and `reset_classifier`
* doing a forward pass on just the features - `forward_features` (see [documentation](https://huggingface.co/docs/timm/feature_extraction))
* these makes it easy to write consistent network wrappers that work with any of the models
* All models support multi-scale feature map extraction (feature pyramids) via create_model (see [documentation](https://huggingface.co/docs/timm/feature_extraction))
* `create_model(name, features_only=True, out_indices=..., output_stride=...)`
* `out_indices` creation arg specifies which feature maps to return, these indices are 0 based and generally correspond to the `C(i + 1)` feature level.
* `output_stride` creation arg controls output stride of the network by using dilated convolutions. Most networks are stride 32 by default. Not all networks support this.
* feature map channel counts, reduction level (stride) can be queried AFTER model creation via the `.feature_info` member
* All models have a consistent pretrained weight loader that adapts last linear if necessary, and from 3 to 1 channel input if desired
* High performance [reference training, validation, and inference scripts](https://huggingface.co/docs/timm/training_script) that work in several process/GPU modes:
* NVIDIA DDP w/ a single GPU per process, multiple processes with APEX present (AMP mixed-precision optional)
* PyTorch DistributedDataParallel w/ multi-gpu, single process (AMP disabled as it crashes when enabled)
* PyTorch w/ single GPU single process (AMP optional)
* A dynamic global pool implementation that allows selecting from average pooling, max pooling, average + max, or concat([average, max]) at model creation. All global pooling is adaptive average by default and compatible with pretrained weights.
* A 'Test Time Pool' wrapper that can wrap any of the included models and usually provides improved performance doing inference with input images larger than the training size. Idea adapted from original DPN implementation when I ported (https://github.com/cypw/DPNs)
* Learning rate schedulers
* Ideas adopted from
* [AllenNLP schedulers](https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers)
* [FAIRseq lr_scheduler](https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler)
* SGDR: Stochastic Gradient Descent with Warm Restarts (https://arxiv.org/abs/1608.03983)
* Schedulers include `step`, `cosine` w/ restarts, `tanh` w/ restarts, `plateau`
* Optimizers:
* `rmsprop_tf` adapted from PyTorch RMSProp by myself. Reproduces much improved Tensorflow RMSProp behaviour.
* `radam` by [Liyuan Liu](https://github.com/LiyuanLucasLiu/RAdam) (https://arxiv.org/abs/1908.03265)
* `novograd` by [Masashi Kimura](https://github.com/convergence-lab/novograd) (https://arxiv.org/abs/1905.11286)
* `lookahead` adapted from impl by [Liam](https://github.com/alphadl/lookahead.pytorch) (https://arxiv.org/abs/1907.08610)
* `fused<name>` optimizers by name with [NVIDIA Apex](https://github.com/NVIDIA/apex/tree/master/apex/optimizers) installed
* `adamp` and `sgdp` by [Naver ClovAI](https://github.com/clovaai) (https://arxiv.org/abs/2006.08217)
* `adafactor` adapted from [FAIRSeq impl](https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py) (https://arxiv.org/abs/1804.04235)
* `adahessian` by [David Samuel](https://github.com/davda54/ada-hessian) (https://arxiv.org/abs/2006.00719)
* Random Erasing from [Zhun Zhong](https://github.com/zhunzhong07/Random-Erasing/blob/master/transforms.py) (https://arxiv.org/abs/1708.04896)
* Mixup (https://arxiv.org/abs/1710.09412)
* CutMix (https://arxiv.org/abs/1905.04899)
* AutoAugment (https://arxiv.org/abs/1805.09501) and RandAugment (https://arxiv.org/abs/1909.13719) ImageNet configurations modeled after impl for EfficientNet training (https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py)
* AugMix w/ JSD loss (https://arxiv.org/abs/1912.02781), JSD w/ clean + augmented mixing support works with AutoAugment and RandAugment as well
* SplitBachNorm - allows splitting batch norm layers between clean and augmented (auxiliary batch norm) data
* DropPath aka "Stochastic Depth" (https://arxiv.org/abs/1603.09382)
* DropBlock (https://arxiv.org/abs/1810.12890)
* Blur Pooling (https://arxiv.org/abs/1904.11486)
* Space-to-Depth by [mrT23](https://github.com/mrT23/TResNet/blob/master/src/models/tresnet/layers/space_to_depth.py) (https://arxiv.org/abs/1801.04590) -- original paper?
* Adaptive Gradient Clipping (https://arxiv.org/abs/2102.06171, https://github.com/deepmind/deepmind-research/tree/master/nfnets)
* An extensive selection of channel and/or spatial attention modules:
* Bottleneck Transformer - https://arxiv.org/abs/2101.11605
* CBAM - https://arxiv.org/abs/1807.06521
* Effective Squeeze-Excitation (ESE) - https://arxiv.org/abs/1911.06667
* Efficient Channel Attention (ECA) - https://arxiv.org/abs/1910.03151
* Gather-Excite (GE) - https://arxiv.org/abs/1810.12348
* Global Context (GC) - https://arxiv.org/abs/1904.11492
* Halo - https://arxiv.org/abs/2103.12731
* Involution - https://arxiv.org/abs/2103.06255
* Lambda Layer - https://arxiv.org/abs/2102.08602
* Non-Local (NL) - https://arxiv.org/abs/1711.07971
* Squeeze-and-Excitation (SE) - https://arxiv.org/abs/1709.01507
* Selective Kernel (SK) - (https://arxiv.org/abs/1903.06586
* Split (SPLAT) - https://arxiv.org/abs/2004.08955
* Shifted Window (SWIN) - https://arxiv.org/abs/2103.14030
## Results
Model validation results can be found in the [results tables](results/README.md)
## Getting Started (Documentation)
The official documentation can be found at https://huggingface.co/docs/hub/timm. Documentation contributions are welcome.
[Getting Started with PyTorch Image Models (timm): A Practitioner’s Guide](https://towardsdatascience.com/getting-started-with-pytorch-image-models-timm-a-practitioners-guide-4e77b4bf9055) by [Chris Hughes](https://github.com/Chris-hughes10) is an extensive blog post covering many aspects of `timm` in detail.
[timmdocs](http://timm.fast.ai/) is an alternate set of documentation for `timm`. A big thanks to [Aman Arora](https://github.com/amaarora) for his efforts creating timmdocs.
[paperswithcode](https://paperswithcode.com/lib/timm) is a good resource for browsing the models within `timm`.
## Train, Validation, Inference Scripts
The root folder of the repository contains reference train, validation, and inference scripts that work with the included models and other features of this repository. They are adaptable for other datasets and use cases with a little hacking. See [documentation](https://huggingface.co/docs/timm/training_script).
## Awesome PyTorch Resources
One of the greatest assets of PyTorch is the community and their contributions. A few of my favourite resources that pair well with the models and components here are listed below.
### Object Detection, Instance and Semantic Segmentation
* Detectron2 - https://github.com/facebookresearch/detectron2
* Segmentation Models (Semantic) - https://github.com/qubvel/segmentation_models.pytorch
* EfficientDet (Obj Det, Semantic soon) - https://github.com/rwightman/efficientdet-pytorch
### Computer Vision / Image Augmentation
* Albumentations - https://github.com/albumentations-team/albumentations
* Kornia - https://github.com/kornia/kornia
### Knowledge Distillation
* RepDistiller - https://github.com/HobbitLong/RepDistiller
* torchdistill - https://github.com/yoshitomo-matsubara/torchdistill
### Metric Learning
* PyTorch Metric Learning - https://github.com/KevinMusgrave/pytorch-metric-learning
### Training / Frameworks
* fastai - https://github.com/fastai/fastai
## Licenses
### Code
The code here is licensed Apache 2.0. I've taken care to make sure any third party code included or adapted has compatible (permissive) licenses such as MIT, BSD, etc. I've made an effort to avoid any GPL / LGPL conflicts. That said, it is your responsibility to ensure you comply with licenses here and conditions of any dependent licenses. Where applicable, I've linked the sources/references for various components in docstrings. If you think I've missed anything please create an issue.
### Pretrained Weights
So far all of the pretrained weights available here are pretrained on ImageNet with a select few that have some additional pretraining (see extra note below). ImageNet was released for non-commercial research purposes only (https://image-net.org/download). It's not clear what the implications of that are for the use of pretrained weights from that dataset. Any models I have trained with ImageNet are done for research purposes and one should assume that the original dataset license applies to the weights. It's best to seek legal advice if you intend to use the pretrained weights in a commercial product.
#### Pretrained on more than ImageNet
Several weights included or references here were pretrained with proprietary datasets that I do not have access to. These include the Facebook WSL, SSL, SWSL ResNe(Xt) and the Google Noisy Student EfficientNet models. The Facebook models have an explicit non-commercial license (CC-BY-NC 4.0, https://github.com/facebookresearch/semi-supervised-ImageNet1K-models, https://github.com/facebookresearch/WSL-Images). The Google models do not appear to have any restriction beyond the Apache 2.0 license (and ImageNet concerns). In either case, you should contact Facebook or Google with any questions.
## Citing
### BibTeX
```bibtex
@misc{rw2019timm,
author = {Ross Wightman},
title = {PyTorch Image Models},
year = {2019},
publisher = {GitHub},
journal = {GitHub repository},
doi = {10.5281/zenodo.4414861},
howpublished = {\url{https://github.com/rwightman/pytorch-image-models}}
}
```
### Latest DOI
[![DOI](https://zenodo.org/badge/168799526.svg)](https://zenodo.org/badge/latestdoi/168799526)
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/pyproject.toml | [tool.pytest.ini_options]
markers = [
"base: marker for model tests using the basic setup",
"cfg: marker for model tests checking the config",
"torchscript: marker for model tests using torchscript",
"features: marker for model tests checking feature extraction",
"fxforward: marker for model tests using torch fx (only forward)",
"fxbackward: marker for model tests using torch fx (only backward)",
]
[tool.black]
line-length = 120
target-version = ['py37', 'py38', 'py39', 'py310', 'py311']
skip-string-normalization = true
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/clean_checkpoint.py | #!/usr/bin/env python3
""" Checkpoint Cleaning Script
Takes training checkpoints with GPU tensors, optimizer state, extra dict keys, etc.
and outputs a CPU tensor checkpoint with only the `state_dict` along with SHA256
calculation for model zoo compatibility.
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import torch
import argparse
import os
import hashlib
import shutil
import tempfile
from timm.models import load_state_dict
try:
import safetensors.torch
_has_safetensors = True
except ImportError:
_has_safetensors = False
parser = argparse.ArgumentParser(description='PyTorch Checkpoint Cleaner')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='output path')
parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true',
help='use ema version of weights if present')
parser.add_argument('--no-hash', dest='no_hash', action='store_true',
help='no hash in output filename')
parser.add_argument('--clean-aux-bn', dest='clean_aux_bn', action='store_true',
help='remove auxiliary batch norm layers (from SplitBN training) from checkpoint')
parser.add_argument('--safetensors', action='store_true',
help='Save weights using safetensors instead of the default torch way (pickle).')
def main():
args = parser.parse_args()
if os.path.exists(args.output):
print("Error: Output filename ({}) already exists.".format(args.output))
exit(1)
clean_checkpoint(
args.checkpoint,
args.output,
not args.no_use_ema,
args.no_hash,
args.clean_aux_bn,
safe_serialization=args.safetensors,
)
def clean_checkpoint(
checkpoint,
output,
use_ema=True,
no_hash=False,
clean_aux_bn=False,
safe_serialization: bool=False,
):
# Load an existing checkpoint to CPU, strip everything but the state_dict and re-save
if checkpoint and os.path.isfile(checkpoint):
print("=> Loading checkpoint '{}'".format(checkpoint))
state_dict = load_state_dict(checkpoint, use_ema=use_ema)
new_state_dict = {}
for k, v in state_dict.items():
if clean_aux_bn and 'aux_bn' in k:
# If all aux_bn keys are removed, the SplitBN layers will end up as normal and
# load with the unmodified model using BatchNorm2d.
continue
name = k[7:] if k.startswith('module.') else k
new_state_dict[name] = v
print("=> Loaded state_dict from '{}'".format(checkpoint))
ext = ''
if output:
checkpoint_root, checkpoint_base = os.path.split(output)
checkpoint_base, ext = os.path.splitext(checkpoint_base)
else:
checkpoint_root = ''
checkpoint_base = os.path.split(checkpoint)[1]
checkpoint_base = os.path.splitext(checkpoint_base)[0]
temp_filename = '__' + checkpoint_base
if safe_serialization:
assert _has_safetensors, "`pip install safetensors` to use .safetensors"
safetensors.torch.save_file(new_state_dict, temp_filename)
else:
torch.save(new_state_dict, temp_filename)
with open(temp_filename, 'rb') as f:
sha_hash = hashlib.sha256(f.read()).hexdigest()
if ext:
final_ext = ext
else:
final_ext = ('.safetensors' if safe_serialization else '.pth')
if no_hash:
final_filename = checkpoint_base + final_ext
else:
final_filename = '-'.join([checkpoint_base, sha_hash[:8]]) + final_ext
shutil.move(temp_filename, os.path.join(checkpoint_root, final_filename))
print("=> Saved state_dict to '{}, SHA256: {}'".format(final_filename, sha_hash))
return final_filename
else:
print("Error: Checkpoint ({}) doesn't exist".format(checkpoint))
return ''
if __name__ == '__main__':
main()
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/requirements-dev.txt | pytest
pytest-timeout
pytest-xdist
pytest-forked
expecttest
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/CONTRIBUTING.md | *This guideline is very much a work-in-progress.*
Contributions to `timm` for code, documentation, tests are more than welcome!
There haven't been any formal guidelines to date so please bear with me, and feel free to add to this guide.
# Coding style
Code linting and auto-format (black) are not currently in place but open to consideration. In the meantime, the style to follow is (mostly) aligned with Google's guide: https://google.github.io/styleguide/pyguide.html.
A few specific differences from Google style (or black)
1. Line length is 120 char. Going over is okay in some cases (e.g. I prefer not to break URL across lines).
2. Hanging indents are always prefered, please avoid aligning arguments with closing brackets or braces.
Example, from Google guide, but this is a NO here:
```
# Aligned with opening delimiter.
foo = long_function_name(var_one, var_two,
var_three, var_four)
meal = (spam,
beans)
# Aligned with opening delimiter in a dictionary.
foo = {
'long_dictionary_key': value1 +
value2,
...
}
```
This is YES:
```
# 4-space hanging indent; nothing on first line,
# closing parenthesis on a new line.
foo = long_function_name(
var_one, var_two, var_three,
var_four
)
meal = (
spam,
beans,
)
# 4-space hanging indent in a dictionary.
foo = {
'long_dictionary_key':
long_dictionary_value,
...
}
```
When there is discrepancy in a given source file (there are many origins for various bits of code and not all have been updated to what I consider current goal), please follow the style in a given file.
In general, if you add new code, formatting it with black using the following options should result in a style that is compatible with the rest of the code base:
```
black --skip-string-normalization --line-length 120 <path-to-file>
```
Avoid formatting code that is unrelated to your PR though.
PR with pure formatting / style fixes will be accepted but only in isolation from functional changes, best to ask before starting such a change.
# Documentation
As with code style, docstrings style based on the Google guide: guide: https://google.github.io/styleguide/pyguide.html
The goal for the code is to eventually move to have all major functions and `__init__` methods use PEP484 type annotations.
When type annotations are used for a function, as per the Google pyguide, they should **NOT** be duplicated in the docstrings, please leave annotations as the one source of truth re typing.
There are a LOT of gaps in current documentation relative to the functionality in timm, please, document away!
# Installation
Create a Python virtual environment using Python 3.10. Inside the environment, install torch` and `torchvision` using the instructions matching your system as listed on the [PyTorch website](https://pytorch.org/).
Then install the remaining dependencies:
```
python -m pip install -r requirements.txt
python -m pip install -r requirements-dev.txt # for testing
python -m pip install -e .
```
## Unit tests
Run the tests using:
```
pytest tests/
```
Since the whole test suite takes a lot of time to run locally (a few hours), you may want to select a subset of tests relating to the changes you made by using the `-k` option of [`pytest`](https://docs.pytest.org/en/7.1.x/example/markers.html#using-k-expr-to-select-tests-based-on-their-name). Moreover, running tests in parallel (in this example 4 processes) with the `-n` option may help:
```
pytest -k "substring-to-match" -n 4 tests/
```
## Building documentation
Please refer to [this document](https://github.com/huggingface/pytorch-image-models/tree/main/hfdocs).
# Questions
If you have any questions about contribution, where / how to contribute, please ask in the [Discussions](https://github.com/huggingface/pytorch-image-models/discussions/categories/contributing) (there is a `Contributing` topic).
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/requirements.txt | torch>=1.7
torchvision
pyyaml
huggingface_hub
safetensors>=0.2 | 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/distributed_train.sh | #!/bin/bash
NUM_PROC=$1
shift
torchrun --nproc_per_node=$NUM_PROC train.py "$@"
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/setup.py | """ Setup
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
exec(open('timm/version.py').read())
setup(
name='timm',
version=__version__,
description='PyTorch Image Models',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/huggingface/pytorch-image-models',
author='Ross Wightman',
author_email='ross@huggingface.co',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
# Note that this is a string of words separated by whitespace, not a list.
keywords='pytorch pretrained models efficientnet mobilenetv3 mnasnet resnet vision transformer vit',
packages=find_packages(exclude=['convert', 'tests', 'results']),
include_package_data=True,
install_requires=['torch >= 1.7', 'torchvision', 'pyyaml', 'huggingface_hub', 'safetensors'],
python_requires='>=3.7',
)
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/train.py | #!/usr/bin/env python3
""" ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniques. It favours canonical PyTorch
and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed
and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit.
This script was started from an early version of the PyTorch ImageNet example
(https://github.com/pytorch/examples/tree/master/imagenet)
NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples
(https://github.com/NVIDIA/apex/tree/master/examples/imagenet)
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import argparse
import json
import logging
import os
import time
from collections import OrderedDict
from contextlib import suppress
from datetime import datetime
from functools import partial
import torch
import torch.nn as nn
import torchvision.utils
import yaml
from torch.nn.parallel import DistributedDataParallel as NativeDDP
from timm import utils
from timm.data import create_dataset, create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset
from timm.layers import convert_splitbn_model, convert_sync_batchnorm, set_fast_norm
from timm.loss import JsdCrossEntropy, SoftTargetCrossEntropy, BinaryCrossEntropy, LabelSmoothingCrossEntropy
from timm.models import create_model, safe_model_name, resume_checkpoint, load_checkpoint, model_parameters
from timm.optim import create_optimizer_v2, optimizer_kwargs
from timm.scheduler import create_scheduler_v2, scheduler_kwargs
from timm.utils import ApexScaler, NativeScaler
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
try:
import wandb
has_wandb = True
except ImportError:
has_wandb = False
try:
from functorch.compile import memory_efficient_fusion
has_functorch = True
except ImportError as e:
has_functorch = False
has_compile = hasattr(torch, 'compile')
_logger = logging.getLogger('train')
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset parameters
group = parser.add_argument_group('Dataset parameters')
# Keep this argument outside the dataset group because it is positional.
parser.add_argument('data', nargs='?', metavar='DIR', const=None,
help='path to dataset (positional is *deprecated*, use --data-dir)')
parser.add_argument('--data-dir', metavar='DIR',
help='path to dataset (root dir)')
parser.add_argument('--dataset', metavar='NAME', default='',
help='dataset type + name ("<type>/<name>") (default: ImageFolder or ImageTar if empty)')
group.add_argument('--train-split', metavar='NAME', default='train',
help='dataset train split (default: train)')
group.add_argument('--val-split', metavar='NAME', default='validation',
help='dataset validation split (default: validation)')
parser.add_argument('--train-num-samples', default=None, type=int,
metavar='N', help='Manually specify num samples in train split, for IterableDatasets.')
parser.add_argument('--val-num-samples', default=None, type=int,
metavar='N', help='Manually specify num samples in validation split, for IterableDatasets.')
group.add_argument('--dataset-download', action='store_true', default=False,
help='Allow download of dataset for torch/ and tfds/ datasets that support it.')
group.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
group.add_argument('--input-img-mode', default=None, type=str,
help='Dataset image conversion mode for input images.')
group.add_argument('--input-key', default=None, type=str,
help='Dataset key for input images.')
group.add_argument('--target-key', default=None, type=str,
help='Dataset key for target labels.')
# Model parameters
group = parser.add_argument_group('Model parameters')
group.add_argument('--model', default='resnet50', type=str, metavar='MODEL',
help='Name of model to train (default: "resnet50")')
group.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
group.add_argument('--pretrained-path', default=None, type=str,
help='Load this checkpoint as if they were the pretrained weights (with adaptation).')
group.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Load this checkpoint into model after initialization (default: none)')
group.add_argument('--resume', default='', type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
group.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
group.add_argument('--num-classes', type=int, default=None, metavar='N',
help='number of label classes (Model default if None)')
group.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
group.add_argument('--img-size', type=int, default=None, metavar='N',
help='Image size (default: None => model default)')
group.add_argument('--in-chans', type=int, default=None, metavar='N',
help='Image input channels (default: None => 3)')
group.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N',
help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
group.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
group.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
group.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of dataset')
group.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
group.add_argument('-b', '--batch-size', type=int, default=128, metavar='N',
help='Input batch size for training (default: 128)')
group.add_argument('-vb', '--validation-batch-size', type=int, default=None, metavar='N',
help='Validation batch size override (default: None)')
group.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
group.add_argument('--fuser', default='', type=str,
help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')")
group.add_argument('--grad-accum-steps', type=int, default=1, metavar='N',
help='The number of steps to accumulate gradients (default: 1)')
group.add_argument('--grad-checkpointing', action='store_true', default=False,
help='Enable gradient checkpointing through model blocks/stages')
group.add_argument('--fast-norm', default=False, action='store_true',
help='enable experimental fast-norm')
group.add_argument('--model-kwargs', nargs='*', default={}, action=utils.ParseKwargs)
group.add_argument('--head-init-scale', default=None, type=float,
help='Head initialization scale')
group.add_argument('--head-init-bias', default=None, type=float,
help='Head initialization bias value')
# scripting / codegen
scripting_group = group.add_mutually_exclusive_group()
scripting_group.add_argument('--torchscript', dest='torchscript', action='store_true',
help='torch.jit.script the full model')
scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor',
help="Enable compilation w/ specified backend (default: inductor).")
# Optimizer parameters
group = parser.add_argument_group('Optimizer parameters')
group.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd")')
group.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: None, use opt default)')
group.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
group.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='Optimizer momentum (default: 0.9)')
group.add_argument('--weight-decay', type=float, default=2e-5,
help='weight decay (default: 2e-5)')
group.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
group.add_argument('--clip-mode', type=str, default='norm',
help='Gradient clipping mode. One of ("norm", "value", "agc")')
group.add_argument('--layer-decay', type=float, default=None,
help='layer-wise learning rate decay (default: None)')
group.add_argument('--opt-kwargs', nargs='*', default={}, action=utils.ParseKwargs)
# Learning rate schedule parameters
group = parser.add_argument_group('Learning rate schedule parameters')
group.add_argument('--sched', type=str, default='cosine', metavar='SCHEDULER',
help='LR scheduler (default: "step"')
group.add_argument('--sched-on-updates', action='store_true', default=False,
help='Apply LR scheduler step on update instead of epoch end.')
group.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate, overrides lr-base if set (default: None)')
group.add_argument('--lr-base', type=float, default=0.1, metavar='LR',
help='base learning rate: lr = lr_base * global_batch_size / base_size')
group.add_argument('--lr-base-size', type=int, default=256, metavar='DIV',
help='base learning rate batch size (divisor, default: 256).')
group.add_argument('--lr-base-scale', type=str, default='', metavar='SCALE',
help='base learning rate vs batch_size scaling ("linear", "sqrt", based on opt if empty)')
group.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
group.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
group.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
group.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',
help='learning rate cycle len multiplier (default: 1.0)')
group.add_argument('--lr-cycle-decay', type=float, default=0.5, metavar='MULT',
help='amount to decay each learning rate cycle (default: 0.5)')
group.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',
help='learning rate cycle limit, cycles enabled if > 1')
group.add_argument('--lr-k-decay', type=float, default=1.0,
help='learning rate k-decay for cosine/poly (default: 1.0)')
group.add_argument('--warmup-lr', type=float, default=1e-5, metavar='LR',
help='warmup learning rate (default: 1e-5)')
group.add_argument('--min-lr', type=float, default=0, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (default: 0)')
group.add_argument('--epochs', type=int, default=300, metavar='N',
help='number of epochs to train (default: 300)')
group.add_argument('--epoch-repeats', type=float, default=0., metavar='N',
help='epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).')
group.add_argument('--start-epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
group.add_argument('--decay-milestones', default=[90, 180, 270], type=int, nargs='+', metavar="MILESTONES",
help='list of decay epoch indices for multistep lr. must be increasing')
group.add_argument('--decay-epochs', type=float, default=90, metavar='N',
help='epoch interval to decay LR')
group.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
group.add_argument('--warmup-prefix', action='store_true', default=False,
help='Exclude warmup period from decay schedule.'),
group.add_argument('--cooldown-epochs', type=int, default=0, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
group.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10)')
group.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation & regularization parameters
group = parser.add_argument_group('Augmentation and regularization parameters')
group.add_argument('--no-aug', action='store_true', default=False,
help='Disable all training augmentation, override other train aug args')
group.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT',
help='Random resize scale (default: 0.08 1.0)')
group.add_argument('--ratio', type=float, nargs='+', default=[3. / 4., 4. / 3.], metavar='RATIO',
help='Random resize aspect ratio (default: 0.75 1.33)')
group.add_argument('--hflip', type=float, default=0.5,
help='Horizontal flip training aug probability')
group.add_argument('--vflip', type=float, default=0.,
help='Vertical flip training aug probability')
group.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
group.add_argument('--color-jitter-prob', type=float, default=None, metavar='PCT',
help='Probability of applying any color jitter.')
group.add_argument('--grayscale-prob', type=float, default=None, metavar='PCT',
help='Probability of applying random grayscale conversion.')
group.add_argument('--gaussian-blur-prob', type=float, default=None, metavar='PCT',
help='Probability of applying gaussian blur.')
group.add_argument('--aa', type=str, default=None, metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". (default: None)'),
group.add_argument('--aug-repeats', type=float, default=0,
help='Number of augmentation repetitions (distributed training only) (default: 0)')
group.add_argument('--aug-splits', type=int, default=0,
help='Number of augmentation splits (default: 0, valid: 0 or >=2)')
group.add_argument('--jsd-loss', action='store_true', default=False,
help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.')
group.add_argument('--bce-loss', action='store_true', default=False,
help='Enable BCE loss w/ Mixup/CutMix use.')
group.add_argument('--bce-sum', action='store_true', default=False,
help='Sum over classes when using BCE loss.')
group.add_argument('--bce-target-thresh', type=float, default=None,
help='Threshold for binarizing softened BCE targets (default: None, disabled).')
group.add_argument('--bce-pos-weight', type=float, default=None,
help='Positive weighting for BCE loss.')
group.add_argument('--reprob', type=float, default=0., metavar='PCT',
help='Random erase prob (default: 0.)')
group.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
group.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
group.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
group.add_argument('--mixup', type=float, default=0.0,
help='mixup alpha, mixup enabled if > 0. (default: 0.)')
group.add_argument('--cutmix', type=float, default=0.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 0.)')
group.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
group.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
group.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
group.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
group.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N',
help='Turn off mixup after this epoch, disabled if 0 (default: 0)')
group.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
group.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
group.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
group.add_argument('--drop-connect', type=float, default=None, metavar='PCT',
help='Drop connect rate, DEPRECATED, use drop-path (default: None)')
group.add_argument('--drop-path', type=float, default=None, metavar='PCT',
help='Drop path rate (default: None)')
group.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
# Batch norm parameters (only works with gen_efficientnet based models currently)
group = parser.add_argument_group('Batch norm parameters', 'Only works with gen_efficientnet based models currently.')
group.add_argument('--bn-momentum', type=float, default=None,
help='BatchNorm momentum override (if not None)')
group.add_argument('--bn-eps', type=float, default=None,
help='BatchNorm epsilon override (if not None)')
group.add_argument('--sync-bn', action='store_true',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
group.add_argument('--dist-bn', type=str, default='reduce',
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
group.add_argument('--split-bn', action='store_true',
help='Enable separate BN layers per augmentation split.')
# Model Exponential Moving Average
group = parser.add_argument_group('Model exponential moving average parameters')
group.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights')
group.add_argument('--model-ema-force-cpu', action='store_true', default=False,
help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.')
group.add_argument('--model-ema-decay', type=float, default=0.9998,
help='decay factor for model weights moving average (default: 0.9998)')
# Misc
group = parser.add_argument_group('Miscellaneous parameters')
group.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
group.add_argument('--worker-seeding', type=str, default='all',
help='worker seed mode (default: all)')
group.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
group.add_argument('--recovery-interval', type=int, default=0, metavar='N',
help='how many batches to wait before writing recovery checkpoint')
group.add_argument('--checkpoint-hist', type=int, default=10, metavar='N',
help='number of checkpoints to keep (default: 10)')
group.add_argument('-j', '--workers', type=int, default=4, metavar='N',
help='how many training processes to use (default: 4)')
group.add_argument('--save-images', action='store_true', default=False,
help='save images of input bathes every log interval for debugging')
group.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
group.add_argument('--amp-dtype', default='float16', type=str,
help='lower precision AMP dtype (default: float16)')
group.add_argument('--amp-impl', default='native', type=str,
help='AMP impl to use, "native" or "apex" (default: native)')
group.add_argument('--no-ddp-bb', action='store_true', default=False,
help='Force broadcast buffers for native DDP to off.')
group.add_argument('--synchronize-step', action='store_true', default=False,
help='torch.cuda.synchronize() end of each step')
group.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
group.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
group.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
group.add_argument('--experiment', default='', type=str, metavar='NAME',
help='name of train experiment, name of sub-folder for output')
group.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "top1"')
group.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
group.add_argument("--local_rank", default=0, type=int)
group.add_argument('--use-multi-epochs-loader', action='store_true', default=False,
help='use the multi-epochs-loader to save time at the beginning of every epoch')
group.add_argument('--log-wandb', action='store_true', default=False,
help='log training and validation metrics to wandb')
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def main():
utils.setup_default_logging()
args, args_text = _parse_args()
if torch.cuda.is_available():
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = True
args.prefetcher = not args.no_prefetcher
args.grad_accum_steps = max(1, args.grad_accum_steps)
device = utils.init_distributed_device(args)
if args.distributed:
_logger.info(
'Training in distributed mode with multiple processes, 1 device per process.'
f'Process {args.rank}, total {args.world_size}, device {args.device}.')
else:
_logger.info(f'Training with a single process on 1 device ({args.device}).')
assert args.rank >= 0
# resolve AMP arguments based on PyTorch / Apex availability
use_amp = None
amp_dtype = torch.float16
if args.amp:
if args.amp_impl == 'apex':
assert has_apex, 'AMP impl specified as APEX but APEX is not installed.'
use_amp = 'apex'
assert args.amp_dtype == 'float16'
else:
assert has_native_amp, 'Please update PyTorch to a version with native AMP (or use APEX).'
use_amp = 'native'
assert args.amp_dtype in ('float16', 'bfloat16')
if args.amp_dtype == 'bfloat16':
amp_dtype = torch.bfloat16
utils.random_seed(args.seed, args.rank)
if args.fuser:
utils.set_jit_fuser(args.fuser)
if args.fast_norm:
set_fast_norm()
in_chans = 3
if args.in_chans is not None:
in_chans = args.in_chans
elif args.input_size is not None:
in_chans = args.input_size[0]
factory_kwargs = {}
if args.pretrained_path:
# merge with pretrained_cfg of model, 'file' has priority over 'url' and 'hf_hub'.
factory_kwargs['pretrained_cfg_overlay'] = dict(
file=args.pretrained_path,
num_classes=-1, # force head adaptation
)
model = create_model(
args.model,
pretrained=args.pretrained,
in_chans=in_chans,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
scriptable=args.torchscript,
checkpoint_path=args.initial_checkpoint,
**factory_kwargs,
**args.model_kwargs,
)
if args.head_init_scale is not None:
with torch.no_grad():
model.get_classifier().weight.mul_(args.head_init_scale)
model.get_classifier().bias.mul_(args.head_init_scale)
if args.head_init_bias is not None:
nn.init.constant_(model.get_classifier().bias, args.head_init_bias)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes # FIXME handle model default vs config num_classes more elegantly
if args.grad_checkpointing:
model.set_grad_checkpointing(enable=True)
if utils.is_primary(args):
_logger.info(
f'Model {safe_model_name(args.model)} created, param count:{sum([m.numel() for m in model.parameters()])}')
data_config = resolve_data_config(vars(args), model=model, verbose=utils.is_primary(args))
# setup augmentation batch splits for contrastive loss or split bn
num_aug_splits = 0
if args.aug_splits > 0:
assert args.aug_splits > 1, 'A split of 1 makes no sense'
num_aug_splits = args.aug_splits
# enable split bn (separate bn stats per batch-portion)
if args.split_bn:
assert num_aug_splits > 1 or args.resplit
model = convert_splitbn_model(model, max(num_aug_splits, 2))
# move model to GPU, enable channels last layout if set
model.to(device=device)
if args.channels_last:
model.to(memory_format=torch.channels_last)
# setup synchronized BatchNorm for distributed training
if args.distributed and args.sync_bn:
args.dist_bn = '' # disable dist_bn when sync BN active
assert not args.split_bn
if has_apex and use_amp == 'apex':
# Apex SyncBN used with Apex AMP
# WARNING this won't currently work with models using BatchNormAct2d
model = convert_syncbn_model(model)
else:
model = convert_sync_batchnorm(model)
if utils.is_primary(args):
_logger.info(
'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '
'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.')
if args.torchscript:
assert not args.torchcompile
assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model'
assert not args.sync_bn, 'Cannot use SyncBatchNorm with torchscripted model'
model = torch.jit.script(model)
if not args.lr:
global_batch_size = args.batch_size * args.world_size * args.grad_accum_steps
batch_ratio = global_batch_size / args.lr_base_size
if not args.lr_base_scale:
on = args.opt.lower()
args.lr_base_scale = 'sqrt' if any([o in on for o in ('ada', 'lamb')]) else 'linear'
if args.lr_base_scale == 'sqrt':
batch_ratio = batch_ratio ** 0.5
args.lr = args.lr_base * batch_ratio
if utils.is_primary(args):
_logger.info(
f'Learning rate ({args.lr}) calculated from base learning rate ({args.lr_base}) '
f'and effective global batch size ({global_batch_size}) with {args.lr_base_scale} scaling.')
optimizer = create_optimizer_v2(
model,
**optimizer_kwargs(cfg=args),
**args.opt_kwargs,
)
# setup automatic mixed-precision (AMP) loss scaling and op casting
amp_autocast = suppress # do nothing
loss_scaler = None
if use_amp == 'apex':
assert device.type == 'cuda'
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
loss_scaler = ApexScaler()
if utils.is_primary(args):
_logger.info('Using NVIDIA APEX AMP. Training in mixed precision.')
elif use_amp == 'native':
try:
amp_autocast = partial(torch.autocast, device_type=device.type, dtype=amp_dtype)
except (AttributeError, TypeError):
# fallback to CUDA only AMP for PyTorch < 1.10
assert device.type == 'cuda'
amp_autocast = torch.cuda.amp.autocast
if device.type == 'cuda' and amp_dtype == torch.float16:
# loss scaler only used for float16 (half) dtype, bfloat16 does not need it
loss_scaler = NativeScaler()
if utils.is_primary(args):
_logger.info('Using native Torch AMP. Training in mixed precision.')
else:
if utils.is_primary(args):
_logger.info('AMP not enabled. Training in float32.')
# optionally resume from a checkpoint
resume_epoch = None
if args.resume:
resume_epoch = resume_checkpoint(
model,
args.resume,
optimizer=None if args.no_resume_opt else optimizer,
loss_scaler=None if args.no_resume_opt else loss_scaler,
log_info=utils.is_primary(args),
)
# setup exponential moving average of model weights, SWA could be used here too
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before DDP wrapper
model_ema = utils.ModelEmaV2(
model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else None)
if args.resume:
load_checkpoint(model_ema.module, args.resume, use_ema=True)
# setup distributed training
if args.distributed:
if has_apex and use_amp == 'apex':
# Apex DDP preferred unless native amp is activated
if utils.is_primary(args):
_logger.info("Using NVIDIA APEX DistributedDataParallel.")
model = ApexDDP(model, delay_allreduce=True)
else:
if utils.is_primary(args):
_logger.info("Using native Torch DistributedDataParallel.")
model = NativeDDP(model, device_ids=[device], broadcast_buffers=not args.no_ddp_bb)
# NOTE: EMA model does not need to be wrapped by DDP
if args.torchcompile:
# torch compile should be done after DDP
assert has_compile, 'A version of torch w/ torch.compile() is required for --compile, possibly a nightly.'
model = torch.compile(model, backend=args.torchcompile)
# create the train and eval datasets
if args.data and not args.data_dir:
args.data_dir = args.data
if args.input_img_mode is None:
input_img_mode = 'RGB' if data_config['input_size'][0] == 3 else 'L'
else:
input_img_mode = args.input_img_mode
dataset_train = create_dataset(
args.dataset,
root=args.data_dir,
split=args.train_split,
is_training=True,
class_map=args.class_map,
download=args.dataset_download,
batch_size=args.batch_size,
seed=args.seed,
repeats=args.epoch_repeats,
input_img_mode=input_img_mode,
input_key=args.input_key,
target_key=args.target_key,
num_samples=args.train_num_samples,
)
if args.val_split:
dataset_eval = create_dataset(
args.dataset,
root=args.data_dir,
split=args.val_split,
is_training=False,
class_map=args.class_map,
download=args.dataset_download,
batch_size=args.batch_size,
input_img_mode=input_img_mode,
input_key=args.input_key,
target_key=args.target_key,
num_samples=args.val_num_samples,
)
# setup mixup / cutmix
collate_fn = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_args = dict(
mixup_alpha=args.mixup,
cutmix_alpha=args.cutmix,
cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob,
switch_prob=args.mixup_switch_prob,
mode=args.mixup_mode,
label_smoothing=args.smoothing,
num_classes=args.num_classes
)
if args.prefetcher:
assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup)
collate_fn = FastCollateMixup(**mixup_args)
else:
mixup_fn = Mixup(**mixup_args)
# wrap dataset in AugMix helper
if num_aug_splits > 1:
dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)
# create data loaders w/ augmentation pipeiine
train_interpolation = args.train_interpolation
if args.no_aug or not train_interpolation:
train_interpolation = data_config['interpolation']
loader_train = create_loader(
dataset_train,
input_size=data_config['input_size'],
batch_size=args.batch_size,
is_training=True,
no_aug=args.no_aug,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
re_split=args.resplit,
scale=args.scale,
ratio=args.ratio,
hflip=args.hflip,
vflip=args.vflip,
color_jitter=args.color_jitter,
color_jitter_prob=args.color_jitter_prob,
grayscale_prob=args.grayscale_prob,
gaussian_blur_prob=args.gaussian_blur_prob,
auto_augment=args.aa,
num_aug_repeats=args.aug_repeats,
num_aug_splits=num_aug_splits,
interpolation=train_interpolation,
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
collate_fn=collate_fn,
pin_memory=args.pin_mem,
device=device,
use_prefetcher=args.prefetcher,
use_multi_epochs_loader=args.use_multi_epochs_loader,
worker_seeding=args.worker_seeding,
)
loader_eval = None
if args.val_split:
eval_workers = args.workers
if args.distributed and ('tfds' in args.dataset or 'wds' in args.dataset):
# FIXME reduces validation padding issues when using TFDS, WDS w/ workers and distributed training
eval_workers = min(2, args.workers)
loader_eval = create_loader(
dataset_eval,
input_size=data_config['input_size'],
batch_size=args.validation_batch_size or args.batch_size,
is_training=False,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=eval_workers,
distributed=args.distributed,
crop_pct=data_config['crop_pct'],
pin_memory=args.pin_mem,
device=device,
use_prefetcher=args.prefetcher,
)
# setup loss function
if args.jsd_loss:
assert num_aug_splits > 1 # JSD only valid with aug splits set
train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing)
elif mixup_active:
# smoothing is handled with mixup target transform which outputs sparse, soft targets
if args.bce_loss:
train_loss_fn = BinaryCrossEntropy(
target_threshold=args.bce_target_thresh,
sum_classes=args.bce_sum,
pos_weight=args.bce_pos_weight,
)
else:
train_loss_fn = SoftTargetCrossEntropy()
elif args.smoothing:
if args.bce_loss:
train_loss_fn = BinaryCrossEntropy(
smoothing=args.smoothing,
target_threshold=args.bce_target_thresh,
sum_classes=args.bce_sum,
pos_weight=args.bce_pos_weight,
)
else:
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
train_loss_fn = nn.CrossEntropyLoss()
train_loss_fn = train_loss_fn.to(device=device)
validate_loss_fn = nn.CrossEntropyLoss().to(device=device)
# setup checkpoint saver and eval metric tracking
eval_metric = args.eval_metric if loader_eval is not None else 'loss'
decreasing_metric = eval_metric == 'loss'
best_metric = None
best_epoch = None
saver = None
output_dir = None
if utils.is_primary(args):
if args.experiment:
exp_name = args.experiment
else:
exp_name = '-'.join([
datetime.now().strftime("%Y%m%d-%H%M%S"),
safe_model_name(args.model),
str(data_config['input_size'][-1])
])
output_dir = utils.get_outdir(args.output if args.output else './output/train', exp_name)
saver = utils.CheckpointSaver(
model=model,
optimizer=optimizer,
args=args,
model_ema=model_ema,
amp_scaler=loss_scaler,
checkpoint_dir=output_dir,
recovery_dir=output_dir,
decreasing=decreasing_metric,
max_history=args.checkpoint_hist
)
with open(os.path.join(output_dir, 'args.yaml'), 'w') as f:
f.write(args_text)
if utils.is_primary(args) and args.log_wandb:
if has_wandb:
wandb.init(project=args.experiment, config=args)
else:
_logger.warning(
"You've requested to log metrics to wandb but package not found. "
"Metrics not being logged to wandb, try `pip install wandb`")
# setup learning rate schedule and starting epoch
updates_per_epoch = (len(loader_train) + args.grad_accum_steps - 1) // args.grad_accum_steps
lr_scheduler, num_epochs = create_scheduler_v2(
optimizer,
**scheduler_kwargs(args, decreasing_metric=decreasing_metric),
updates_per_epoch=updates_per_epoch,
)
start_epoch = 0
if args.start_epoch is not None:
# a specified start_epoch will always override the resume epoch
start_epoch = args.start_epoch
elif resume_epoch is not None:
start_epoch = resume_epoch
if lr_scheduler is not None and start_epoch > 0:
if args.sched_on_updates:
lr_scheduler.step_update(start_epoch * updates_per_epoch)
else:
lr_scheduler.step(start_epoch)
if utils.is_primary(args):
_logger.info(
f'Scheduled epochs: {num_epochs}. LR stepped per {"epoch" if lr_scheduler.t_in_epochs else "update"}.')
results = []
try:
for epoch in range(start_epoch, num_epochs):
if hasattr(dataset_train, 'set_epoch'):
dataset_train.set_epoch(epoch)
elif args.distributed and hasattr(loader_train.sampler, 'set_epoch'):
loader_train.sampler.set_epoch(epoch)
train_metrics = train_one_epoch(
epoch,
model,
loader_train,
optimizer,
train_loss_fn,
args,
lr_scheduler=lr_scheduler,
saver=saver,
output_dir=output_dir,
amp_autocast=amp_autocast,
loss_scaler=loss_scaler,
model_ema=model_ema,
mixup_fn=mixup_fn,
)
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
if utils.is_primary(args):
_logger.info("Distributing BatchNorm running means and vars")
utils.distribute_bn(model, args.world_size, args.dist_bn == 'reduce')
if loader_eval is not None:
eval_metrics = validate(
model,
loader_eval,
validate_loss_fn,
args,
amp_autocast=amp_autocast,
)
if model_ema is not None and not args.model_ema_force_cpu:
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
utils.distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce')
ema_eval_metrics = validate(
model_ema.module,
loader_eval,
validate_loss_fn,
args,
amp_autocast=amp_autocast,
log_suffix=' (EMA)',
)
eval_metrics = ema_eval_metrics
else:
eval_metrics = None
if output_dir is not None:
lrs = [param_group['lr'] for param_group in optimizer.param_groups]
utils.update_summary(
epoch,
train_metrics,
eval_metrics,
filename=os.path.join(output_dir, 'summary.csv'),
lr=sum(lrs) / len(lrs),
write_header=best_metric is None,
log_wandb=args.log_wandb and has_wandb,
)
if eval_metrics is not None:
latest_metric = eval_metrics[eval_metric]
else:
latest_metric = train_metrics[eval_metric]
if saver is not None:
# save proper checkpoint with eval metric
best_metric, best_epoch = saver.save_checkpoint(epoch, metric=latest_metric)
if lr_scheduler is not None:
# step LR for next epoch
lr_scheduler.step(epoch + 1, latest_metric)
results.append({
'epoch': epoch,
'train': train_metrics,
'validation': eval_metrics,
})
except KeyboardInterrupt:
pass
results = {'all': results}
if best_metric is not None:
results['best'] = results['all'][best_epoch - start_epoch]
_logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))
print(f'--result\n{json.dumps(results, indent=4)}')
def train_one_epoch(
epoch,
model,
loader,
optimizer,
loss_fn,
args,
device=torch.device('cuda'),
lr_scheduler=None,
saver=None,
output_dir=None,
amp_autocast=suppress,
loss_scaler=None,
model_ema=None,
mixup_fn=None,
):
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
if args.prefetcher and loader.mixup_enabled:
loader.mixup_enabled = False
elif mixup_fn is not None:
mixup_fn.mixup_enabled = False
second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
has_no_sync = hasattr(model, "no_sync")
update_time_m = utils.AverageMeter()
data_time_m = utils.AverageMeter()
losses_m = utils.AverageMeter()
model.train()
accum_steps = args.grad_accum_steps
last_accum_steps = len(loader) % accum_steps
updates_per_epoch = (len(loader) + accum_steps - 1) // accum_steps
num_updates = epoch * updates_per_epoch
last_batch_idx = len(loader) - 1
last_batch_idx_to_accum = len(loader) - last_accum_steps
data_start_time = update_start_time = time.time()
optimizer.zero_grad()
update_sample_count = 0
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_batch_idx
need_update = last_batch or (batch_idx + 1) % accum_steps == 0
update_idx = batch_idx // accum_steps
if batch_idx >= last_batch_idx_to_accum:
accum_steps = last_accum_steps
if not args.prefetcher:
input, target = input.to(device), target.to(device)
if mixup_fn is not None:
input, target = mixup_fn(input, target)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
# multiply by accum steps to get equivalent for full update
data_time_m.update(accum_steps * (time.time() - data_start_time))
def _forward():
with amp_autocast():
output = model(input)
loss = loss_fn(output, target)
if accum_steps > 1:
loss /= accum_steps
return loss
def _backward(_loss):
if loss_scaler is not None:
loss_scaler(
_loss,
optimizer,
clip_grad=args.clip_grad,
clip_mode=args.clip_mode,
parameters=model_parameters(model, exclude_head='agc' in args.clip_mode),
create_graph=second_order,
need_update=need_update,
)
else:
_loss.backward(create_graph=second_order)
if need_update:
if args.clip_grad is not None:
utils.dispatch_clip_grad(
model_parameters(model, exclude_head='agc' in args.clip_mode),
value=args.clip_grad,
mode=args.clip_mode,
)
optimizer.step()
if has_no_sync and not need_update:
with model.no_sync():
loss = _forward()
_backward(loss)
else:
loss = _forward()
_backward(loss)
if not args.distributed:
losses_m.update(loss.item() * accum_steps, input.size(0))
update_sample_count += input.size(0)
if not need_update:
data_start_time = time.time()
continue
num_updates += 1
optimizer.zero_grad()
if model_ema is not None:
model_ema.update(model)
if args.synchronize_step and device.type == 'cuda':
torch.cuda.synchronize()
time_now = time.time()
update_time_m.update(time.time() - update_start_time)
update_start_time = time_now
if update_idx % args.log_interval == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if args.distributed:
reduced_loss = utils.reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item() * accum_steps, input.size(0))
update_sample_count *= args.world_size
if utils.is_primary(args):
_logger.info(
f'Train: {epoch} [{update_idx:>4d}/{updates_per_epoch} '
f'({100. * update_idx / (updates_per_epoch - 1):>3.0f}%)] '
f'Loss: {losses_m.val:#.3g} ({losses_m.avg:#.3g}) '
f'Time: {update_time_m.val:.3f}s, {update_sample_count / update_time_m.val:>7.2f}/s '
f'({update_time_m.avg:.3f}s, {update_sample_count / update_time_m.avg:>7.2f}/s) '
f'LR: {lr:.3e} '
f'Data: {data_time_m.val:.3f} ({data_time_m.avg:.3f})'
)
if args.save_images and output_dir:
torchvision.utils.save_image(
input,
os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx),
padding=0,
normalize=True
)
if saver is not None and args.recovery_interval and (
(update_idx + 1) % args.recovery_interval == 0):
saver.save_recovery(epoch, batch_idx=update_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
update_sample_count = 0
data_start_time = time.time()
# end for
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg)])
def validate(
model,
loader,
loss_fn,
args,
device=torch.device('cuda'),
amp_autocast=suppress,
log_suffix=''
):
batch_time_m = utils.AverageMeter()
losses_m = utils.AverageMeter()
top1_m = utils.AverageMeter()
top5_m = utils.AverageMeter()
model.eval()
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
if not args.prefetcher:
input = input.to(device)
target = target.to(device)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
# augmentation reduction
reduce_factor = args.tta
if reduce_factor > 1:
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target = target[0:target.size(0):reduce_factor]
loss = loss_fn(output, target)
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
if args.distributed:
reduced_loss = utils.reduce_tensor(loss.data, args.world_size)
acc1 = utils.reduce_tensor(acc1, args.world_size)
acc5 = utils.reduce_tensor(acc5, args.world_size)
else:
reduced_loss = loss.data
if device.type == 'cuda':
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if utils.is_primary(args) and (last_batch or batch_idx % args.log_interval == 0):
log_name = 'Test' + log_suffix
_logger.info(
f'{log_name}: [{batch_idx:>4d}/{last_idx}] '
f'Time: {batch_time_m.val:.3f} ({batch_time_m.avg:.3f}) '
f'Loss: {losses_m.val:>7.3f} ({losses_m.avg:>6.3f}) '
f'Acc@1: {top1_m.val:>7.3f} ({top1_m.avg:>7.3f}) '
f'Acc@5: {top5_m.val:>7.3f} ({top5_m.avg:>7.3f})'
)
metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)])
return metrics
if __name__ == '__main__':
main()
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/MANIFEST.in | include timm/models/_pruned/*.txt
include timm/data/_info/*.txt
include timm/data/_info/*.json
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/requirements-docs.txt | mkdocs
mkdocs-material
mkdocs-redirects
mdx_truly_sane_lists
mkdocs-awesome-pages-plugin
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/avg_checkpoints.py | #!/usr/bin/env python3
""" Checkpoint Averaging Script
This script averages all model weights for checkpoints in specified path that match
the specified filter wildcard. All checkpoints must be from the exact same model.
For any hope of decent results, the checkpoints should be from the same or child
(via resumes) training session. This can be viewed as similar to maintaining running
EMA (exponential moving average) of the model weights or performing SWA (stochastic
weight averaging), but post-training.
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import torch
import argparse
import os
import glob
import hashlib
from timm.models import load_state_dict
try:
import safetensors.torch
_has_safetensors = True
except ImportError:
_has_safetensors = False
DEFAULT_OUTPUT = "./averaged.pth"
DEFAULT_SAFE_OUTPUT = "./averaged.safetensors"
parser = argparse.ArgumentParser(description='PyTorch Checkpoint Averager')
parser.add_argument('--input', default='', type=str, metavar='PATH',
help='path to base input folder containing checkpoints')
parser.add_argument('--filter', default='*.pth.tar', type=str, metavar='WILDCARD',
help='checkpoint filter (path wildcard)')
parser.add_argument('--output', default=DEFAULT_OUTPUT, type=str, metavar='PATH',
help=f'Output filename. Defaults to {DEFAULT_SAFE_OUTPUT} when passing --safetensors.')
parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true',
help='Force not using ema version of weights (if present)')
parser.add_argument('--no-sort', dest='no_sort', action='store_true',
help='Do not sort and select by checkpoint metric, also makes "n" argument irrelevant')
parser.add_argument('-n', type=int, default=10, metavar='N',
help='Number of checkpoints to average')
parser.add_argument('--safetensors', action='store_true',
help='Save weights using safetensors instead of the default torch way (pickle).')
def checkpoint_metric(checkpoint_path):
if not checkpoint_path or not os.path.isfile(checkpoint_path):
return {}
print("=> Extracting metric from checkpoint '{}'".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path, map_location='cpu')
metric = None
if 'metric' in checkpoint:
metric = checkpoint['metric']
elif 'metrics' in checkpoint and 'metric_name' in checkpoint:
metrics = checkpoint['metrics']
print(metrics)
metric = metrics[checkpoint['metric_name']]
return metric
def main():
args = parser.parse_args()
# by default use the EMA weights (if present)
args.use_ema = not args.no_use_ema
# by default sort by checkpoint metric (if present) and avg top n checkpoints
args.sort = not args.no_sort
if args.safetensors and args.output == DEFAULT_OUTPUT:
# Default path changes if using safetensors
args.output = DEFAULT_SAFE_OUTPUT
output, output_ext = os.path.splitext(args.output)
if not output_ext:
output_ext = ('.safetensors' if args.safetensors else '.pth')
output = output + output_ext
if args.safetensors and not output_ext == ".safetensors":
print(
"Warning: saving weights as safetensors but output file extension is not "
f"set to '.safetensors': {args.output}"
)
if os.path.exists(output):
print("Error: Output filename ({}) already exists.".format(output))
exit(1)
pattern = args.input
if not args.input.endswith(os.path.sep) and not args.filter.startswith(os.path.sep):
pattern += os.path.sep
pattern += args.filter
checkpoints = glob.glob(pattern, recursive=True)
if args.sort:
checkpoint_metrics = []
for c in checkpoints:
metric = checkpoint_metric(c)
if metric is not None:
checkpoint_metrics.append((metric, c))
checkpoint_metrics = list(sorted(checkpoint_metrics))
checkpoint_metrics = checkpoint_metrics[-args.n:]
if checkpoint_metrics:
print("Selected checkpoints:")
[print(m, c) for m, c in checkpoint_metrics]
avg_checkpoints = [c for m, c in checkpoint_metrics]
else:
avg_checkpoints = checkpoints
if avg_checkpoints:
print("Selected checkpoints:")
[print(c) for c in checkpoints]
if not avg_checkpoints:
print('Error: No checkpoints found to average.')
exit(1)
avg_state_dict = {}
avg_counts = {}
for c in avg_checkpoints:
new_state_dict = load_state_dict(c, args.use_ema)
if not new_state_dict:
print(f"Error: Checkpoint ({c}) doesn't exist")
continue
for k, v in new_state_dict.items():
if k not in avg_state_dict:
avg_state_dict[k] = v.clone().to(dtype=torch.float64)
avg_counts[k] = 1
else:
avg_state_dict[k] += v.to(dtype=torch.float64)
avg_counts[k] += 1
for k, v in avg_state_dict.items():
v.div_(avg_counts[k])
# float32 overflow seems unlikely based on weights seen to date, but who knows
float32_info = torch.finfo(torch.float32)
final_state_dict = {}
for k, v in avg_state_dict.items():
v = v.clamp(float32_info.min, float32_info.max)
final_state_dict[k] = v.to(dtype=torch.float32)
if args.safetensors:
assert _has_safetensors, "`pip install safetensors` to use .safetensors"
safetensors.torch.save_file(final_state_dict, output)
else:
torch.save(final_state_dict, output)
with open(output, 'rb') as f:
sha_hash = hashlib.sha256(f.read()).hexdigest()
print(f"=> Saved state_dict to '{output}, SHA256: {sha_hash}'")
if __name__ == '__main__':
main()
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/hubconf.py | dependencies = ['torch']
import timm
globals().update(timm.models._registry._model_entrypoints)
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/validate.py | #!/usr/bin/env python3
""" ImageNet Validation Script
This is intended to be a lean and easily modifiable ImageNet validation script for evaluating pretrained
models or training checkpoints against ImageNet or similarly organized image datasets. It prioritizes
canonical PyTorch, standard Python style, and good performance. Repurpose as you see fit.
Hacked together by Ross Wightman (https://github.com/rwightman)
"""
import argparse
import csv
import glob
import json
import logging
import os
import time
from collections import OrderedDict
from contextlib import suppress
from functools import partial
import torch
import torch.nn as nn
import torch.nn.parallel
from timm.data import create_dataset, create_loader, resolve_data_config, RealLabelsImagenet
from timm.layers import apply_test_time_pool, set_fast_norm
from timm.models import create_model, load_checkpoint, is_model, list_models
from timm.utils import accuracy, AverageMeter, natural_key, setup_default_logging, set_jit_fuser, \
decay_batch_step, check_batch_size_retry, ParseKwargs, reparameterize_model
try:
from apex import amp
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
try:
from functorch.compile import memory_efficient_fusion
has_functorch = True
except ImportError as e:
has_functorch = False
has_compile = hasattr(torch, 'compile')
_logger = logging.getLogger('validate')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')
parser.add_argument('data', nargs='?', metavar='DIR', const=None,
help='path to dataset (*deprecated*, use --data-dir)')
parser.add_argument('--data-dir', metavar='DIR',
help='path to dataset (root dir)')
parser.add_argument('--dataset', metavar='NAME', default='',
help='dataset type + name ("<type>/<name>") (default: ImageFolder or ImageTar if empty)')
parser.add_argument('--split', metavar='NAME', default='validation',
help='dataset split (default: validation)')
parser.add_argument('--num-samples', default=None, type=int,
metavar='N', help='Manually specify num samples in dataset split, for IterableDatasets.')
parser.add_argument('--dataset-download', action='store_true', default=False,
help='Allow download of dataset for torch/ and tfds/ datasets that support it.')
parser.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
parser.add_argument('--input-key', default=None, type=str,
help='Dataset key for input images.')
parser.add_argument('--input-img-mode', default=None, type=str,
help='Dataset image conversion mode for input images.')
parser.add_argument('--target-key', default=None, type=str,
help='Dataset key for target labels.')
parser.add_argument('--model', '-m', metavar='NAME', default='dpn92',
help='model architecture (default: dpn92)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--in-chans', type=int, default=None, metavar='N',
help='Image input channels (default: None => 3)')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--use-train-size', action='store_true', default=False,
help='force use of train input size, even when test size is specified in pretrained cfg')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop pct')
parser.add_argument('--crop-mode', default=None, type=str,
metavar='N', help='Input image crop mode (squash, border, center). Model default if None.')
parser.add_argument('--crop-border-pixels', type=int, default=None,
help='Crop pixels from image border.')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--num-classes', type=int, default=None,
help='Number classes in dataset')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--log-freq', default=10, type=int,
metavar='N', help='batch logging frequency (default: 10)')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--test-pool', dest='test_pool', action='store_true',
help='enable test time pool')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--device', default='cuda', type=str,
help="Device (accelerator) to use.")
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
parser.add_argument('--amp-dtype', default='float16', type=str,
help='lower precision AMP dtype (default: float16)')
parser.add_argument('--amp-impl', default='native', type=str,
help='AMP impl to use, "native" or "apex" (default: native)')
parser.add_argument('--tf-preprocessing', action='store_true', default=False,
help='Use Tensorflow preprocessing pipeline (require CPU TF installed')
parser.add_argument('--use-ema', dest='use_ema', action='store_true',
help='use ema version of weights if present')
parser.add_argument('--fuser', default='', type=str,
help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')")
parser.add_argument('--fast-norm', default=False, action='store_true',
help='enable experimental fast-norm')
parser.add_argument('--reparam', default=False, action='store_true',
help='Reparameterize model')
parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs)
scripting_group = parser.add_mutually_exclusive_group()
scripting_group.add_argument('--torchscript', default=False, action='store_true',
help='torch.jit.script the full model')
scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor',
help="Enable compilation w/ specified backend (default: inductor).")
scripting_group.add_argument('--aot-autograd', default=False, action='store_true',
help="Enable AOT Autograd support.")
parser.add_argument('--results-file', default='', type=str, metavar='FILENAME',
help='Output csv file for validation results (summary)')
parser.add_argument('--results-format', default='csv', type=str,
help='Format for results file one of (csv, json) (default: csv).')
parser.add_argument('--real-labels', default='', type=str, metavar='FILENAME',
help='Real labels JSON file for imagenet evaluation')
parser.add_argument('--valid-labels', default='', type=str, metavar='FILENAME',
help='Valid label indices txt file for validation of partial label space')
parser.add_argument('--retry', default=False, action='store_true',
help='Enable batch size decay & retry for single model validation')
def validate(args):
# might as well try to validate something
args.pretrained = args.pretrained or not args.checkpoint
args.prefetcher = not args.no_prefetcher
if torch.cuda.is_available():
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = True
device = torch.device(args.device)
# resolve AMP arguments based on PyTorch / Apex availability
use_amp = None
amp_autocast = suppress
if args.amp:
if args.amp_impl == 'apex':
assert has_apex, 'AMP impl specified as APEX but APEX is not installed.'
assert args.amp_dtype == 'float16'
use_amp = 'apex'
_logger.info('Validating in mixed precision with NVIDIA APEX AMP.')
else:
assert has_native_amp, 'Please update PyTorch to a version with native AMP (or use APEX).'
assert args.amp_dtype in ('float16', 'bfloat16')
use_amp = 'native'
amp_dtype = torch.bfloat16 if args.amp_dtype == 'bfloat16' else torch.float16
amp_autocast = partial(torch.autocast, device_type=device.type, dtype=amp_dtype)
_logger.info('Validating in mixed precision with native PyTorch AMP.')
else:
_logger.info('Validating in float32. AMP not enabled.')
if args.fuser:
set_jit_fuser(args.fuser)
if args.fast_norm:
set_fast_norm()
# create model
in_chans = 3
if args.in_chans is not None:
in_chans = args.in_chans
elif args.input_size is not None:
in_chans = args.input_size[0]
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
in_chans=in_chans,
global_pool=args.gp,
scriptable=args.torchscript,
**args.model_kwargs,
)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes
if args.checkpoint:
load_checkpoint(model, args.checkpoint, args.use_ema)
if args.reparam:
model = reparameterize_model(model)
param_count = sum([m.numel() for m in model.parameters()])
_logger.info('Model %s created, param count: %d' % (args.model, param_count))
data_config = resolve_data_config(
vars(args),
model=model,
use_test_size=not args.use_train_size,
verbose=True,
)
test_time_pool = False
if args.test_pool:
model, test_time_pool = apply_test_time_pool(model, data_config)
model = model.to(device)
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
if args.torchscript:
assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model'
model = torch.jit.script(model)
elif args.torchcompile:
assert has_compile, 'A version of torch w/ torch.compile() is required for --compile, possibly a nightly.'
torch._dynamo.reset()
model = torch.compile(model, backend=args.torchcompile)
elif args.aot_autograd:
assert has_functorch, "functorch is needed for --aot-autograd"
model = memory_efficient_fusion(model)
if use_amp == 'apex':
model = amp.initialize(model, opt_level='O1')
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu)))
criterion = nn.CrossEntropyLoss().to(device)
root_dir = args.data or args.data_dir
if args.input_img_mode is None:
input_img_mode = 'RGB' if data_config['input_size'][0] == 3 else 'L'
else:
input_img_mode = args.input_img_mode
dataset = create_dataset(
root=root_dir,
name=args.dataset,
split=args.split,
download=args.dataset_download,
load_bytes=args.tf_preprocessing,
class_map=args.class_map,
num_samples=args.num_samples,
input_key=args.input_key,
input_img_mode=input_img_mode,
target_key=args.target_key,
)
if args.valid_labels:
with open(args.valid_labels, 'r') as f:
valid_labels = [int(line.rstrip()) for line in f]
else:
valid_labels = None
if args.real_labels:
real_labels = RealLabelsImagenet(dataset.filenames(basename=True), real_json=args.real_labels)
else:
real_labels = None
crop_pct = 1.0 if test_time_pool else data_config['crop_pct']
loader = create_loader(
dataset,
input_size=data_config['input_size'],
batch_size=args.batch_size,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
crop_pct=crop_pct,
crop_mode=data_config['crop_mode'],
crop_border_pixels=args.crop_border_pixels,
pin_memory=args.pin_mem,
device=device,
tf_preprocessing=args.tf_preprocessing,
)
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
with torch.no_grad():
# warmup, reduce variability of first batch time, especially for comparing torchscript vs non
input = torch.randn((args.batch_size,) + tuple(data_config['input_size'])).to(device)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
model(input)
end = time.time()
for batch_idx, (input, target) in enumerate(loader):
if args.no_prefetcher:
target = target.to(device)
input = input.to(device)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
# compute output
with amp_autocast():
output = model(input)
if valid_labels is not None:
output = output[:, valid_labels]
loss = criterion(output, target)
if real_labels is not None:
real_labels.add_result(output)
# measure accuracy and record loss
acc1, acc5 = accuracy(output.detach(), target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1.item(), input.size(0))
top5.update(acc5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.log_freq == 0:
_logger.info(
'Test: [{0:>4d}/{1}] '
'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) '
'Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})'.format(
batch_idx,
len(loader),
batch_time=batch_time,
rate_avg=input.size(0) / batch_time.avg,
loss=losses,
top1=top1,
top5=top5
)
)
if real_labels is not None:
# real labels mode replaces topk values at the end
top1a, top5a = real_labels.get_accuracy(k=1), real_labels.get_accuracy(k=5)
else:
top1a, top5a = top1.avg, top5.avg
results = OrderedDict(
model=args.model,
top1=round(top1a, 4), top1_err=round(100 - top1a, 4),
top5=round(top5a, 4), top5_err=round(100 - top5a, 4),
param_count=round(param_count / 1e6, 2),
img_size=data_config['input_size'][-1],
crop_pct=crop_pct,
interpolation=data_config['interpolation'],
)
_logger.info(' * Acc@1 {:.3f} ({:.3f}) Acc@5 {:.3f} ({:.3f})'.format(
results['top1'], results['top1_err'], results['top5'], results['top5_err']))
return results
def _try_run(args, initial_batch_size):
batch_size = initial_batch_size
results = OrderedDict()
error_str = 'Unknown'
while batch_size:
args.batch_size = batch_size * args.num_gpu # multiply by num-gpu for DataParallel case
try:
if torch.cuda.is_available() and 'cuda' in args.device:
torch.cuda.empty_cache()
results = validate(args)
return results
except RuntimeError as e:
error_str = str(e)
_logger.error(f'"{error_str}" while running validation.')
if not check_batch_size_retry(error_str):
break
batch_size = decay_batch_step(batch_size)
_logger.warning(f'Reducing batch size to {batch_size} for retry.')
results['error'] = error_str
_logger.error(f'{args.model} failed to validate ({error_str}).')
return results
_NON_IN1K_FILTERS = ['*_in21k', '*_in22k', '*in12k', '*_dino', '*fcmae', '*seer']
def main():
setup_default_logging()
args = parser.parse_args()
model_cfgs = []
model_names = []
if os.path.isdir(args.checkpoint):
# validate all checkpoints in a path with same model
checkpoints = glob.glob(args.checkpoint + '/*.pth.tar')
checkpoints += glob.glob(args.checkpoint + '/*.pth')
model_names = list_models(args.model)
model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)]
else:
if args.model == 'all':
# validate all models in a list of names with pretrained checkpoints
args.pretrained = True
model_names = list_models(
pretrained=True,
exclude_filters=_NON_IN1K_FILTERS,
)
model_cfgs = [(n, '') for n in model_names]
elif not is_model(args.model):
# model name doesn't exist, try as wildcard filter
model_names = list_models(
args.model,
pretrained=True,
)
model_cfgs = [(n, '') for n in model_names]
if not model_cfgs and os.path.isfile(args.model):
with open(args.model) as f:
model_names = [line.rstrip() for line in f]
model_cfgs = [(n, None) for n in model_names if n]
if len(model_cfgs):
_logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names)))
results = []
try:
initial_batch_size = args.batch_size
for m, c in model_cfgs:
args.model = m
args.checkpoint = c
r = _try_run(args, initial_batch_size)
if 'error' in r:
continue
if args.checkpoint:
r['checkpoint'] = args.checkpoint
results.append(r)
except KeyboardInterrupt as e:
pass
results = sorted(results, key=lambda x: x['top1'], reverse=True)
else:
if args.retry:
results = _try_run(args, args.batch_size)
else:
results = validate(args)
if args.results_file:
write_results(args.results_file, results, format=args.results_format)
# output results in JSON to stdout w/ delimiter for runner script
print(f'--result\n{json.dumps(results, indent=4)}')
def write_results(results_file, results, format='csv'):
with open(results_file, mode='w') as cf:
if format == 'json':
json.dump(results, cf, indent=4)
else:
if not isinstance(results, (list, tuple)):
results = [results]
if not results:
return
dw = csv.DictWriter(cf, fieldnames=results[0].keys())
dw.writeheader()
for r in results:
dw.writerow(r)
cf.flush()
if __name__ == '__main__':
main()
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/onnx_validate.py | """ ONNX-runtime validation script
This script was created to verify accuracy and performance of exported ONNX
models running with the onnxruntime. It utilizes the PyTorch dataloader/processing
pipeline for a fair comparison against the originals.
Copyright 2020 Ross Wightman
"""
import argparse
import numpy as np
import onnxruntime
from timm.data import create_loader, resolve_data_config, create_dataset
from timm.utils import AverageMeter
import time
parser = argparse.ArgumentParser(description='ONNX Validation')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--onnx-input', default='', type=str, metavar='PATH',
help='path to onnx model/weights file')
parser.add_argument('--onnx-output-opt', default='', type=str, metavar='PATH',
help='path to output optimized onnx graph')
parser.add_argument('--profile', action='store_true', default=False,
help='Enable profiler output.')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--crop-pct', type=float, default=None, metavar='PCT',
help='Override default crop pct of 0.875')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
def main():
args = parser.parse_args()
args.gpu_id = 0
# Set graph optimization level
sess_options = onnxruntime.SessionOptions()
sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
if args.profile:
sess_options.enable_profiling = True
if args.onnx_output_opt:
sess_options.optimized_model_filepath = args.onnx_output_opt
session = onnxruntime.InferenceSession(args.onnx_input, sess_options)
data_config = resolve_data_config(vars(args))
loader = create_loader(
create_dataset('', args.data),
input_size=data_config['input_size'],
batch_size=args.batch_size,
use_prefetcher=False,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
crop_pct=data_config['crop_pct']
)
input_name = session.get_inputs()[0].name
batch_time = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
for i, (input, target) in enumerate(loader):
# run the net and return prediction
output = session.run([], {input_name: input.data.numpy()})
output = output[0]
# measure accuracy and record loss
prec1, prec5 = accuracy_np(output, target.numpy())
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print(
f'Test: [{i}/{len(loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f}, {input.size(0) / batch_time.avg:.3f}/s, '
f'{100 * batch_time.avg / input.size(0):.3f} ms/sample) \t'
f'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
f'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'
)
print(f' * Prec@1 {top1.avg:.3f} ({100-top1.avg:.3f}) Prec@5 {top5.avg:.3f} ({100.-top5.avg:.3f})')
def accuracy_np(output, target):
max_indices = np.argsort(output, axis=1)[:, ::-1]
top5 = 100 * np.equal(max_indices[:, :5], target[:, np.newaxis]).sum(axis=1).mean()
top1 = 100 * np.equal(max_indices[:, 0], target).mean()
return top1, top5
if __name__ == '__main__':
main()
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2019 Ross Wightman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
hf_public_repos | hf_public_repos/pytorch-image-models/model-index.yml | Import:
- ./docs/models/*.md
Library:
Name: PyTorch Image Models
Headline: PyTorch image models, scripts, pretrained weights
Website: https://rwightman.github.io/pytorch-image-models/
Repository: https://github.com/rwightman/pytorch-image-models
Docs: https://rwightman.github.io/pytorch-image-models/
README: "# PyTorch Image Models\r\n\r\nPyTorch Image Models (TIMM) is a library\
\ for state-of-the-art image classification. With this library you can:\r\n\r\n\
- Choose from 300+ pre-trained state-of-the-art image classification models.\r\
\n- Train models afresh on research datasets such as ImageNet using provided scripts.\r\
\n- Finetune pre-trained models on your own datasets, including the latest cutting\
\ edge models."
| 0 |
hf_public_repos/pytorch-image-models | hf_public_repos/pytorch-image-models/results/results-imagenet-a.csv | model,top1,top1_err,top5,top5_err,param_count,img_size,crop_pct,interpolation,top1_diff,top5_diff,rank_diff
eva02_large_patch14_448.mim_m38m_ft_in22k_in1k,88.227,11.773,97.093,2.907,305.08,448,1.000,bicubic,-10.623,-2.787,+1
eva02_large_patch14_448.mim_in22k_ft_in22k_in1k,87.893,12.107,96.920,3.080,305.08,448,1.000,bicubic,-11.037,-2.990,-1
eva_giant_patch14_560.m30m_ft_in22k_in1k,87.573,12.427,96.893,3.107,"1,014.45",560,1.000,bicubic,-11.257,-3.007,+1
eva02_large_patch14_448.mim_m38m_ft_in1k,87.107,12.893,96.280,3.720,305.08,448,1.000,bicubic,-11.623,-3.590,+5
eva02_large_patch14_448.mim_in22k_ft_in1k,86.227,13.773,95.787,4.213,305.08,448,1.000,bicubic,-12.613,-4.043,-2
eva_giant_patch14_336.clip_ft_in1k,85.307,14.693,95.720,4.280,"1,013.01",336,1.000,bicubic,-13.513,-4.090,-1
eva_giant_patch14_336.m30m_ft_in22k_in1k,85.147,14.853,96.360,3.640,"1,013.01",336,1.000,bicubic,-13.663,-3.540,-1
tf_efficientnet_l2.ns_jft_in1k,84.747,15.253,96.147,3.853,480.31,800,0.960,bicubic,-13.803,-3.673,+9
regnety_1280.swag_ft_in1k,83.907,16.093,96.200,3.800,644.81,384,1.000,bicubic,-14.543,-3.670,+18
eva_large_patch14_336.in22k_ft_in22k_in1k,83.853,16.147,95.347,4.653,304.53,336,1.000,bicubic,-14.887,-4.453,-3
convnextv2_huge.fcmae_ft_in22k_in1k_512,83.827,16.173,96.173,3.827,660.29,512,1.000,bicubic,-14.773,-3.697,+4
maxvit_xlarge_tf_512.in21k_ft_in1k,83.400,16.600,95.520,4.480,475.77,512,1.000,bicubic,-15.220,-4.270,+1
tf_efficientnet_l2.ns_jft_in1k_475,83.400,16.600,95.453,4.547,480.31,475,0.936,bicubic,-15.100,-4.327,+8
eva_large_patch14_336.in22k_ft_in1k,82.760,17.240,95.507,4.493,304.53,336,1.000,bicubic,-15.970,-4.283,-6
maxvit_large_tf_512.in21k_ft_in1k,81.733,18.267,95.027,4.973,212.33,512,1.000,bicubic,-16.887,-4.773,-1
beit_large_patch16_512.in22k_ft_in22k_in1k,81.600,18.400,94.880,5.120,305.67,512,1.000,bicubic,-16.960,-4.960,0
maxvit_base_tf_512.in21k_ft_in1k,81.360,18.640,94.467,5.533,119.88,512,1.000,bicubic,-17.260,-5.333,-5
eva_giant_patch14_224.clip_ft_in1k,81.213,18.787,94.333,5.667,"1,012.56",224,0.900,bicubic,-17.247,-5.417,+8
maxvit_xlarge_tf_384.in21k_ft_in1k,81.067,18.933,94.640,5.360,475.32,384,1.000,bicubic,-17.433,-5.190,+3
convnextv2_huge.fcmae_ft_in22k_in1k_384,79.893,20.107,94.640,5.360,660.29,384,1.000,bicubic,-18.777,-5.220,-10
deit3_large_patch16_384.fb_in22k_ft_in1k,79.187,20.813,93.613,6.387,304.76,384,1.000,bicubic,-19.273,-6.147,+4
beit_large_patch16_384.in22k_ft_in22k_in1k,79.107,20.893,94.267,5.733,305.00,384,1.000,bicubic,-19.413,-5.553,-3
caformer_b36.sail_in22k_ft_in1k_384,78.360,21.640,93.467,6.533,98.75,384,1.000,bicubic,-20.080,-6.333,+6
maxvit_large_tf_384.in21k_ft_in1k,78.013,21.987,93.267,6.733,212.03,384,1.000,bicubic,-20.477,-6.483,-1
eva02_base_patch14_448.mim_in22k_ft_in1k,77.547,22.453,93.120,6.880,87.12,448,1.000,bicubic,-20.893,-6.680,+3
vit_large_patch14_clip_336.openai_ft_in12k_in1k,77.333,22.667,93.627,6.373,304.53,336,1.000,bicubic,-20.927,-6.143,+16
convnext_xxlarge.clip_laion2b_soup_ft_in1k,77.120,22.880,94.320,5.680,846.47,256,1.000,bicubic,-21.320,-5.500,+3
eva02_base_patch14_448.mim_in22k_ft_in22k_in1k,76.893,23.107,92.693,7.307,87.12,448,1.000,bicubic,-21.747,-7.107,-17
maxvit_base_tf_384.in21k_ft_in1k,76.853,23.147,92.600,7.400,119.65,384,1.000,bicubic,-21.667,-7.150,-9
beitv2_large_patch16_224.in1k_ft_in22k_in1k,76.773,23.227,93.173,6.827,304.43,224,0.950,bicubic,-21.767,-6.587,-12
eva_large_patch14_196.in22k_ft_in22k_in1k,75.507,24.493,91.760,8.240,304.14,196,1.000,bicubic,-22.913,-8.050,+2
regnety_1280.swag_lc_in1k,74.587,25.413,91.680,8.320,644.81,224,0.965,bicubic,-23.063,-7.890,+79
maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k,74.253,25.747,90.827,9.173,116.14,384,1.000,bicubic,-23.917,-8.933,+19
vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k,74.240,25.760,92.253,7.747,632.46,336,1.000,bicubic,-24.180,-7.517,-2
regnety_320.swag_ft_in1k,74.200,25.800,92.960,7.040,145.05,384,1.000,bicubic,-23.860,-6.800,+30
swinv2_large_window12to24_192to384.ms_in22k_ft_in1k,73.933,26.067,91.733,8.267,196.74,384,1.000,bicubic,-24.197,-7.977,+21
eva_large_patch14_196.in22k_ft_in1k,73.160,26.840,91.413,8.587,304.14,196,1.000,bicubic,-25.200,-8.407,-2
caformer_m36.sail_in22k_ft_in1k_384,72.987,27.013,90.600,9.400,56.20,384,1.000,bicubic,-25.163,-9.150,+18
vit_large_patch14_clip_224.openai_ft_in12k_in1k,72.293,27.707,90.880,9.120,304.20,224,1.000,bicubic,-25.927,-8.840,+7
convnextv2_large.fcmae_ft_in22k_in1k_384,72.067,27.933,91.013,8.987,197.96,384,1.000,bicubic,-26.333,-8.747,-6
vit_large_patch14_clip_224.openai_ft_in1k,71.813,28.187,91.453,8.547,304.20,224,1.000,bicubic,-26.347,-8.207,+14
vit_large_patch14_clip_336.laion2b_ft_in12k_in1k,71.800,28.200,90.227,9.773,304.53,336,1.000,bicubic,-26.540,-9.533,-5
convformer_b36.sail_in22k_ft_in1k_384,71.547,28.453,90.240,9.760,99.88,384,1.000,bicubic,-26.713,-9.590,-3
vit_large_patch16_384.augreg_in21k_ft_in1k,71.227,28.773,89.773,10.227,304.72,384,1.000,bicubic,-26.993,-9.947,+1
swinv2_base_window12to24_192to384.ms_in22k_ft_in1k,71.200,28.800,91.320,8.680,87.92,384,1.000,bicubic,-26.920,-8.420,+12
deit3_base_patch16_384.fb_in22k_ft_in1k,71.200,28.800,89.933,10.067,86.88,384,1.000,bicubic,-26.640,-9.737,+43
convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_384,70.720,29.280,90.547,9.453,200.13,384,1.000,bicubic,-27.760,-9.233,-23
vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k,70.680,29.320,90.400,9.600,632.05,224,1.000,bicubic,-27.620,-9.360,-10
coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k,70.600,29.400,89.227,10.773,73.88,384,1.000,bicubic,-27.470,-10.493,+15
caformer_s36.sail_in22k_ft_in1k_384,70.347,29.653,90.067,9.933,39.30,384,1.000,bicubic,-27.623,-9.653,+22
deit3_huge_patch14_224.fb_in22k_ft_in1k,70.253,29.747,90.707,9.293,632.13,224,1.000,bicubic,-27.917,-9.053,+2
convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_320,69.880,30.120,90.493,9.507,200.13,320,1.000,bicubic,-28.400,-9.277,-13
swin_large_patch4_window12_384.ms_in22k_ft_in1k,69.640,30.360,89.547,10.453,196.74,384,1.000,bicubic,-28.410,-10.143,+14
volo_d5_512.sail_in1k,69.587,30.413,90.427,9.573,296.09,512,1.150,bicubic,-28.183,-9.243,+46
convnext_xlarge.fb_in22k_ft_in1k_384,69.320,30.680,89.307,10.693,350.20,384,1.000,bicubic,-29.100,-10.503,-24
caformer_b36.sail_in22k_ft_in1k,69.133,30.867,89.600,10.400,98.75,224,1.000,bicubic,-29.027,-10.180,-2
maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k,69.067,30.933,89.880,10.120,116.09,384,1.000,bicubic,-29.193,-9.900,-16
deit3_large_patch16_224.fb_in22k_ft_in1k,68.693,31.307,89.973,10.027,304.37,224,1.000,bicubic,-29.477,-9.757,-7
seresnextaa201d_32x8d.sw_in12k_ft_in1k_384,68.560,31.440,88.613,11.387,149.39,384,1.000,bicubic,-29.650,-11.167,-11
beit_large_patch16_224.in22k_ft_in22k_in1k,68.467,31.533,89.573,10.427,304.43,224,0.900,bicubic,-29.713,-10.187,-10
regnety_160.swag_ft_in1k,68.093,31.907,90.707,9.293,83.59,384,1.000,bicubic,-29.687,-8.893,+35
convnextv2_large.fcmae_ft_in22k_in1k,68.093,31.907,89.720,10.280,197.96,288,1.000,bicubic,-29.997,-10.050,0
volo_d5_448.sail_in1k,68.080,31.920,89.720,10.280,295.91,448,1.150,bicubic,-29.670,-9.830,+39
maxvit_base_tf_512.in1k,67.933,32.067,88.493,11.507,119.88,512,1.000,bicubic,-29.797,-11.117,+40
maxvit_large_tf_512.in1k,67.880,32.120,87.653,12.347,212.33,512,1.000,bicubic,-29.950,-11.907,+26
tf_efficientnetv2_xl.in21k_ft_in1k,67.787,32.213,87.347,12.653,208.12,512,1.000,bicubic,-30.113,-12.223,+15
beitv2_large_patch16_224.in1k_ft_in1k,67.640,32.360,88.667,11.333,304.43,224,0.950,bicubic,-30.270,-10.993,+10
swinv2_large_window12to16_192to256.ms_in22k_ft_in1k,67.320,32.680,88.000,12.000,196.74,256,0.900,bicubic,-30.530,-11.640,+18
vit_large_patch14_clip_336.laion2b_ft_in1k,67.080,32.920,89.493,10.507,304.53,336,1.000,bicubic,-31.140,-10.307,-22
tf_efficientnet_b7.ns_jft_in1k,67.027,32.973,88.667,11.333,66.35,600,0.949,bicubic,-30.893,-11.053,+6
vit_large_patch14_clip_224.laion2b_ft_in12k_in1k,67.000,33.000,87.960,12.040,304.20,224,1.000,bicubic,-31.080,-11.690,-9
convnext_xlarge.fb_in22k_ft_in1k,66.960,33.040,88.947,11.053,350.20,288,1.000,bicubic,-31.150,-10.833,-12
convnext_large_mlp.clip_laion2b_augreg_ft_in1k_384,66.880,33.120,89.280,10.720,200.13,384,1.000,bicubic,-31.370,-10.480,-30
convformer_m36.sail_in22k_ft_in1k_384,66.853,33.147,87.800,12.200,57.05,384,1.000,bicubic,-31.187,-11.950,-5
convnextv2_base.fcmae_ft_in22k_in1k_384,66.787,33.213,88.893,11.107,88.72,384,1.000,bicubic,-31.563,-10.877,-39
volo_d4_448.sail_in1k,66.667,33.333,88.987,11.013,193.41,448,1.150,bicubic,-31.003,-10.623,+32
seresnextaa101d_32x8d.sw_in12k_ft_in1k_288,65.987,34.013,87.880,12.120,93.59,320,1.000,bicubic,-31.983,-11.820,-4
beit_base_patch16_384.in22k_ft_in22k_in1k,65.920,34.080,88.507,11.493,86.74,384,1.000,bicubic,-31.900,-11.193,+14
regnety_320.swag_lc_in1k,65.573,34.427,88.080,11.920,145.05,224,0.965,bicubic,-31.587,-11.590,+109
convnext_large.fb_in22k_ft_in1k_384,65.533,34.467,87.467,12.533,197.77,384,1.000,bicubic,-32.707,-12.283,-36
vit_huge_patch14_clip_224.laion2b_ft_in1k,65.493,34.507,87.720,12.280,632.05,224,1.000,bicubic,-32.527,-12.000,-11
volo_d3_448.sail_in1k,65.400,34.600,87.573,12.427,86.63,448,1.000,bicubic,-32.150,-11.987,+45
convnext_large.fb_in22k_ft_in1k,65.000,35.000,87.947,12.053,197.77,288,1.000,bicubic,-33.120,-11.833,-24
tf_efficientnetv2_l.in21k_ft_in1k,64.947,35.053,87.840,12.160,118.52,480,1.000,bicubic,-32.853,-11.820,+11
swin_base_patch4_window12_384.ms_in22k_ft_in1k,64.467,35.533,87.520,12.480,87.90,384,1.000,bicubic,-33.433,-12.150,-6
convnextv2_huge.fcmae_ft_in1k,64.440,35.560,87.080,12.920,660.29,288,1.000,bicubic,-33.460,-12.630,-6
maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k,64.187,35.813,85.520,14.480,116.14,224,0.950,bicubic,-33.623,-14.130,+7
vit_base_patch16_384.augreg_in21k_ft_in1k,63.693,36.307,86.693,13.307,86.86,384,1.000,bicubic,-34.147,-12.977,+2
maxvit_large_tf_384.in1k,63.507,36.493,85.093,14.907,212.03,384,1.000,bicubic,-34.063,-14.577,+37
swinv2_base_window12to16_192to256.ms_in22k_ft_in1k,63.307,36.693,87.507,12.493,87.92,256,0.900,bicubic,-34.353,-12.103,+19
convnextv2_base.fcmae_ft_in22k_in1k,62.893,37.107,87.667,12.333,88.72,288,1.000,bicubic,-35.167,-12.193,-25
maxvit_small_tf_512.in1k,62.867,37.133,86.307,13.693,69.13,512,1.000,bicubic,-34.883,-13.313,+11
maxvit_base_tf_384.in1k,62.613,37.387,85.187,14.813,119.65,384,1.000,bicubic,-34.957,-14.343,+32
convformer_b36.sail_in22k_ft_in1k,62.600,37.400,85.467,14.533,99.88,224,1.000,bicubic,-35.340,-14.293,-19
cait_m48_448.fb_dist_in1k,62.373,37.627,86.453,13.547,356.46,448,1.000,bicubic,-35.107,-13.147,+43
convnext_base.fb_in22k_ft_in1k_384,62.360,37.640,86.240,13.760,88.59,384,1.000,bicubic,-35.720,-13.520,-33
tf_efficientnet_b6.ns_jft_in1k,62.227,37.773,85.160,14.840,43.04,528,0.942,bicubic,-35.393,-14.390,+19
caformer_b36.sail_in1k_384,62.160,37.840,84.493,15.507,98.75,384,1.000,bicubic,-35.340,-15.147,+37
vit_base_patch8_224.augreg2_in21k_ft_in1k,62.027,37.973,85.840,14.160,86.58,224,0.900,bicubic,-35.663,-13.810,+8
convformer_s36.sail_in22k_ft_in1k_384,61.920,38.080,85.960,14.040,40.01,384,1.000,bicubic,-35.930,-13.690,-13
beitv2_base_patch16_224.in1k_ft_in22k_in1k,61.667,38.333,85.480,14.520,86.53,224,0.900,bicubic,-36.023,-14.200,+5
vit_large_r50_s32_384.augreg_in21k_ft_in1k,61.493,38.507,84.027,15.973,329.09,384,1.000,bicubic,-36.367,-15.643,-17
seresnextaa101d_32x8d.sw_in12k_ft_in1k,61.080,38.920,85.920,14.080,93.59,288,1.000,bicubic,-36.830,-13.740,-25
caformer_m36.sail_in22k_ft_in1k,61.067,38.933,84.893,15.107,56.20,224,1.000,bicubic,-36.773,-14.787,-15
swin_large_patch4_window7_224.ms_in22k_ft_in1k,61.000,39.000,85.867,14.133,196.53,224,0.900,bicubic,-36.650,-13.703,+8
convnext_base.fb_in22k_ft_in1k,60.893,39.107,86.147,13.853,88.59,288,1.000,bicubic,-36.967,-13.533,-22
resnetv2_152x4_bit.goog_in21k_ft_in1k,60.773,39.227,83.560,16.440,936.53,480,1.000,bilinear,-36.717,-16.050,+29
convnext_small.in12k_ft_in1k_384,60.733,39.267,84.960,15.040,50.22,384,1.000,bicubic,-37.067,-14.810,-12
convnext_large_mlp.clip_laion2b_augreg_ft_in1k,60.547,39.453,86.293,13.707,200.13,256,1.000,bicubic,-37.403,-13.417,-35
deit3_large_patch16_384.fb_in1k,60.533,39.467,85.733,14.267,304.76,384,1.000,bicubic,-36.887,-13.837,+36
caformer_m36.sail_in1k_384,60.533,39.467,84.760,15.240,56.20,384,1.000,bicubic,-36.907,-14.880,+35
tf_efficientnet_b5.ns_jft_in1k,60.293,39.707,84.453,15.547,30.39,456,0.934,bicubic,-37.207,-15.127,+22
vit_base_patch16_clip_384.openai_ft_in12k_in1k,60.227,39.773,84.613,15.387,86.86,384,0.950,bicubic,-37.963,-15.047,-64
regnety_160.swag_lc_in1k,60.147,39.853,85.760,14.240,83.59,224,0.965,bicubic,-36.673,-13.890,+131
coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k,59.973,40.027,83.760,16.240,73.88,224,0.950,bicubic,-37.677,-15.880,-3
vit_large_patch14_clip_224.laion2b_ft_in1k,59.947,40.053,85.667,14.333,304.20,224,1.000,bicubic,-37.943,-13.983,-34
xcit_large_24_p8_384.fb_dist_in1k,59.947,40.053,85.467,14.533,188.93,384,1.000,bicubic,-37.573,-14.073,+15
coatnet_2_rw_224.sw_in12k_ft_in1k,59.533,40.467,84.213,15.787,73.87,224,0.950,bicubic,-37.987,-15.387,+13
tf_efficientnetv2_m.in21k_ft_in1k,59.413,40.587,84.573,15.427,54.14,480,1.000,bicubic,-38.407,-15.027,-26
maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k,59.160,40.840,84.480,15.520,116.09,224,0.950,bicubic,-38.600,-15.220,-19
vit_base_patch16_clip_384.laion2b_ft_in12k_in1k,59.147,40.853,83.280,16.720,86.86,384,1.000,bicubic,-38.843,-16.380,-50
vit_base_patch8_224.augreg_in21k_ft_in1k,58.960,41.040,82.733,17.267,86.58,224,0.900,bicubic,-38.610,-16.857,+2
maxvit_tiny_tf_512.in1k,58.800,41.200,84.573,15.427,31.05,512,1.000,bicubic,-38.780,-14.987,0
tiny_vit_21m_512.dist_in22k_ft_in1k,58.733,41.267,83.693,16.307,21.27,512,1.000,bicubic,-39.137,-15.937,-41
caformer_s18.sail_in22k_ft_in1k_384,58.640,41.360,85.347,14.653,26.34,384,1.000,bicubic,-38.780,-14.273,+23
volo_d2_384.sail_in1k,58.600,41.400,84.280,15.720,58.87,384,1.000,bicubic,-38.720,-15.320,+35
tiny_vit_21m_384.dist_in22k_ft_in1k,58.320,41.680,83.653,16.347,21.23,384,1.000,bicubic,-39.290,-15.937,-8
convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384,58.253,41.747,85.480,14.520,88.59,384,1.000,bicubic,-39.787,-14.210,-60
resnext101_32x32d.fb_wsl_ig1b_ft_in1k,58.040,41.960,80.640,19.360,468.53,224,0.875,bilinear,-39.330,-19.040,+26
cait_m36_384.fb_dist_in1k,57.840,42.160,84.840,15.160,271.22,384,1.000,bicubic,-39.560,-14.670,+22
dm_nfnet_f5.dm_in1k,57.640,42.360,82.267,17.733,377.21,544,0.954,bicubic,-40.140,-17.493,-32
dm_nfnet_f6.dm_in1k,57.560,42.440,82.360,17.640,438.36,576,0.956,bicubic,-40.220,-17.290,-34
caformer_s36.sail_in1k_384,57.347,42.653,82.787,17.213,39.30,384,1.000,bicubic,-40.043,-16.753,+20
deit3_base_patch16_224.fb_in22k_ft_in1k,57.267,42.733,83.520,16.480,86.59,224,1.000,bicubic,-40.213,-16.030,+3
volo_d5_224.sail_in1k,57.107,42.893,82.733,17.267,295.46,224,0.960,bicubic,-40.273,-16.837,+19
convnext_base.clip_laiona_augreg_ft_in1k_384,57.080,42.920,84.773,15.227,88.59,384,1.000,bicubic,-40.540,-14.807,-19
deit3_small_patch16_384.fb_in22k_ft_in1k,57.080,42.920,83.067,16.933,22.21,384,1.000,bicubic,-40.050,-16.443,+55
convnextv2_large.fcmae_ft_in1k,56.880,43.120,83.467,16.533,197.96,288,1.000,bicubic,-40.780,-16.253,-28
regnety_160.lion_in12k_ft_in1k,56.747,43.253,83.453,16.547,83.59,288,1.000,bicubic,-40.703,-16.147,+2
dm_nfnet_f4.dm_in1k,56.707,43.293,81.760,18.240,316.07,512,0.951,bicubic,-40.933,-17.780,-26
xcit_medium_24_p8_384.fb_dist_in1k,56.693,43.307,83.453,16.547,84.32,384,1.000,bicubic,-40.587,-16.067,+27
maxvit_small_tf_384.in1k,56.600,43.400,82.293,17.707,69.02,384,1.000,bicubic,-40.830,-17.217,+4
regnety_160.sw_in12k_ft_in1k,56.253,43.747,82.840,17.160,83.59,288,1.000,bicubic,-41.197,-16.750,-1
convformer_m36.sail_in22k_ft_in1k,55.880,44.120,81.813,18.187,57.05,224,1.000,bicubic,-41.720,-17.807,-23
caformer_s36.sail_in22k_ft_in1k,55.813,44.187,82.133,17.867,39.30,224,1.000,bicubic,-41.787,-17.587,-23
vit_large_patch16_224.augreg_in21k_ft_in1k,55.573,44.427,80.107,19.893,304.33,224,0.900,bicubic,-42.057,-19.483,-31
convformer_b36.sail_in1k_384,55.453,44.547,81.293,18.707,99.88,384,1.000,bicubic,-42.077,-18.227,-18
vit_base_patch16_clip_384.openai_ft_in1k,55.000,45.000,82.613,17.387,86.86,384,1.000,bicubic,-42.540,-17.047,-20
vit_base_r50_s16_384.orig_in21k_ft_in1k,54.627,45.373,81.213,18.787,98.95,384,1.000,bicubic,-42.563,-18.347,+37
cait_s36_384.fb_dist_in1k,54.360,45.640,81.373,18.627,68.37,384,1.000,bicubic,-42.970,-18.167,+10
volo_d1_384.sail_in1k,54.333,45.667,81.000,19.000,26.78,384,1.000,bicubic,-42.577,-18.460,+81
deit3_huge_patch14_224.fb_in1k,54.320,45.680,82.093,17.907,632.13,224,0.900,bicubic,-42.580,-17.127,+82
vit_base_patch16_clip_384.laion2b_ft_in1k,54.267,45.733,80.880,19.120,86.86,384,1.000,bicubic,-43.453,-18.750,-48
xcit_small_24_p8_384.fb_dist_in1k,54.253,45.747,81.547,18.453,47.63,384,1.000,bicubic,-42.987,-18.003,+20
vit_medium_patch16_gap_384.sw_in12k_ft_in1k,54.173,45.827,81.640,18.360,39.03,384,0.950,bicubic,-43.267,-17.960,-11
resnetv2_101x3_bit.goog_in21k_ft_in1k,54.027,45.973,81.027,18.973,387.93,448,1.000,bilinear,-42.953,-18.503,+63
resnetv2_152x2_bit.goog_in21k_ft_in1k,54.013,45.987,82.000,18.000,236.34,448,1.000,bilinear,-42.987,-17.590,+58
beitv2_base_patch16_224.in1k_ft_in1k,53.813,46.187,81.853,18.147,86.53,224,0.900,bicubic,-43.357,-17.617,+29
dm_nfnet_f3.dm_in1k,53.773,46.227,79.813,20.187,254.92,416,0.940,bicubic,-43.697,-19.747,-20
convformer_m36.sail_in1k_384,53.547,46.453,80.733,19.267,57.05,384,1.000,bicubic,-43.863,-18.867,-9
deit3_base_patch16_384.fb_in1k,53.427,46.573,80.547,19.453,86.88,384,1.000,bicubic,-43.613,-18.833,+50
convnext_small.in12k_ft_in1k,53.240,46.760,81.400,18.600,50.22,288,1.000,bicubic,-44.110,-18.180,-4
resnext101_32x16d.fb_wsl_ig1b_ft_in1k,53.067,46.933,76.907,23.093,194.03,224,0.875,bilinear,-43.743,-22.693,+83
volo_d4_224.sail_in1k,52.987,47.013,80.427,19.573,192.96,224,0.960,bicubic,-44.293,-19.113,+3
xcit_large_24_p16_384.fb_dist_in1k,52.853,47.147,81.827,18.173,189.10,384,1.000,bicubic,-44.667,-17.653,-32
convnext_small.fb_in22k_ft_in1k_384,52.427,47.573,80.813,19.187,50.22,384,1.000,bicubic,-45.183,-18.787,-48
convnext_base.clip_laion2b_augreg_ft_in12k_in1k,52.320,47.680,82.480,17.520,88.59,256,1.000,bicubic,-45.280,-17.130,-47
vit_base_patch32_clip_448.laion2b_ft_in12k_in1k,52.320,47.680,79.747,20.253,88.34,448,1.000,bicubic,-44.990,-19.733,-5
maxvit_tiny_tf_384.in1k,52.080,47.920,79.813,20.187,30.98,384,1.000,bicubic,-45.230,-19.687,-7
regnety_120.sw_in12k_ft_in1k,51.813,48.187,80.787,19.213,51.82,288,1.000,bicubic,-45.467,-18.743,-4
swin_base_patch4_window7_224.ms_in22k_ft_in1k,51.440,48.560,80.080,19.920,87.77,224,0.900,bicubic,-45.840,-19.510,-6
tf_efficientnet_b4.ns_jft_in1k,51.253,48.747,79.173,20.827,19.34,380,0.922,bicubic,-45.697,-20.087,+53
efficientnet_b5.sw_in12k_ft_in1k,51.213,48.787,78.840,21.160,30.39,448,1.000,bicubic,-46.197,-20.710,-23
resnext101_32x8d.fb_swsl_ig1b_ft_in1k,51.213,48.787,78.240,21.760,88.79,224,0.875,bilinear,-45.997,-21.330,+6
flexivit_large.1200ep_in1k,51.200,48.800,80.667,19.333,304.36,240,0.950,bicubic,-46.210,-18.803,-27
eva02_small_patch14_336.mim_in22k_ft_in1k,51.200,48.800,79.120,20.880,22.13,336,1.000,bicubic,-45.940,-20.350,+16
resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_384,51.120,48.880,78.533,21.467,236.34,384,1.000,bicubic,-45.710,-20.877,+66
convnext_small.fb_in22k_ft_in1k,51.067,48.933,80.920,19.080,50.22,288,1.000,bicubic,-46.293,-18.610,-22
mvitv2_large.fb_in1k,50.867,49.133,78.493,21.507,217.99,224,0.900,bicubic,-46.063,-20.907,+50
beit_base_patch16_224.in22k_ft_in22k_in1k,50.720,49.280,79.733,20.267,86.53,224,0.900,bicubic,-46.360,-19.877,+21
tf_efficientnetv2_l.in1k,50.693,49.307,77.613,22.387,118.52,480,1.000,bicubic,-46.777,-21.917,-41
vit_base_patch16_384.orig_in21k_ft_in1k,50.653,49.347,78.200,21.800,86.86,384,1.000,bicubic,-46.067,-21.280,+78
xcit_small_12_p8_384.fb_dist_in1k,50.587,49.413,79.573,20.427,26.21,384,1.000,bicubic,-46.643,-19.907,-6
convformer_s36.sail_in1k_384,50.333,49.667,78.893,21.107,40.01,384,1.000,bicubic,-46.947,-20.577,-14
convnext_tiny.in12k_ft_in1k_384,50.320,49.680,79.800,20.200,28.59,384,1.000,bicubic,-47.020,-19.800,-26
volo_d3_224.sail_in1k,50.320,49.680,78.213,21.787,86.33,224,0.960,bicubic,-46.770,-21.257,+14
flexivit_large.600ep_in1k,50.253,49.747,80.013,19.987,304.36,240,0.950,bicubic,-47.027,-19.417,-23
vit_base_patch16_clip_224.laion2b_ft_in12k_in1k,50.120,49.880,78.040,21.960,86.57,224,0.950,bicubic,-47.330,-21.500,-45
convformer_s18.sail_in22k_ft_in1k_384,50.067,49.933,80.973,19.027,26.77,384,1.000,bicubic,-47.203,-18.577,-18
vit_base_patch16_224.augreg2_in21k_ft_in1k,49.827,50.173,78.960,21.040,86.57,224,0.900,bicubic,-47.323,-20.490,-1
vit_base_patch16_clip_224.openai_ft_in12k_in1k,49.800,50.200,77.120,22.880,86.57,224,0.950,bicubic,-47.730,-22.380,-61
cait_s24_384.fb_dist_in1k,49.733,50.267,78.760,21.240,47.06,384,1.000,bicubic,-47.337,-20.670,+16
inception_next_base.sail_in1k_384,49.693,50.307,79.133,20.867,86.67,384,1.000,bicubic,-47.567,-20.357,-21
xcit_medium_24_p16_384.fb_dist_in1k,49.333,50.667,79.867,20.133,84.40,384,1.000,bicubic,-47.947,-19.643,-26
deit_base_distilled_patch16_384.fb_in1k,49.333,50.667,79.227,20.773,87.63,384,1.000,bicubic,-47.627,-20.253,+28
caformer_s18.sail_in1k_384,49.147,50.853,78.693,21.307,26.34,384,1.000,bicubic,-47.933,-20.797,+11
coat_lite_medium_384.in1k,49.093,50.907,78.600,21.400,44.57,384,1.000,bicubic,-48.057,-20.940,-7
tf_efficientnet_b8.ra_in1k,48.933,51.067,77.227,22.773,87.41,672,0.954,bicubic,-48.267,-22.273,-14
convnextv2_base.fcmae_ft_in1k,48.693,51.307,78.827,21.173,88.72,288,1.000,bicubic,-48.527,-20.713,-21
deit3_large_patch16_224.fb_in1k,48.627,51.373,78.107,21.893,304.37,224,0.900,bicubic,-48.313,-21.153,+27
caformer_b36.sail_in1k,48.533,51.467,75.667,24.333,98.75,224,1.000,bicubic,-48.447,-23.823,+19
flexivit_large.300ep_in1k,48.520,51.480,78.653,21.347,304.36,240,0.950,bicubic,-48.730,-20.837,-29
convnext_base.clip_laion2b_augreg_ft_in1k,48.453,51.547,79.827,20.173,88.59,256,1.000,bicubic,-48.787,-19.693,-28
tf_efficientnetv2_s.in21k_ft_in1k,48.453,51.547,77.867,22.133,21.46,384,1.000,bicubic,-48.277,-21.463,+54
resnest269e.in1k,48.213,51.787,74.333,25.667,110.93,416,0.928,bicubic,-48.297,-25.017,+106
xcit_large_24_p8_224.fb_dist_in1k,48.160,51.840,79.093,20.907,188.93,224,1.000,bicubic,-48.910,-20.327,+2
deit3_medium_patch16_224.fb_in22k_ft_in1k,48.160,51.840,77.027,22.973,38.85,224,1.000,bicubic,-48.810,-22.403,+15
vit_base_patch32_clip_384.laion2b_ft_in12k_in1k,47.960,52.040,76.853,23.147,88.30,384,1.000,bicubic,-49.400,-22.667,-51
regnety_2560.seer_ft_in1k,47.907,52.093,76.800,23.200,"1,282.60",384,1.000,bicubic,-49.313,-22.720,-30
regnetz_e8.ra3_in1k,47.800,52.200,76.200,23.800,57.70,320,1.000,bicubic,-49.400,-23.340,-27
convnext_tiny.in12k_ft_in1k,47.507,52.493,78.747,21.253,28.59,288,1.000,bicubic,-49.553,-20.803,-1
convformer_s36.sail_in22k_ft_in1k,47.440,52.560,77.107,22.893,40.01,224,1.000,bicubic,-49.640,-22.453,-9
resnetv2_50x3_bit.goog_in21k_ft_in1k,47.307,52.693,77.333,22.667,217.32,448,1.000,bilinear,-49.403,-22.157,+49
vit_base_patch16_clip_224.openai_ft_in1k,47.200,52.800,77.533,22.467,86.57,224,0.900,bicubic,-49.880,-22.077,-8
xcit_large_24_p8_224.fb_in1k,47.160,52.840,74.480,25.520,188.93,224,1.000,bicubic,-49.240,-24.510,+112
xcit_small_24_p16_384.fb_dist_in1k,46.987,53.013,77.160,22.840,47.67,384,1.000,bicubic,-50.133,-22.290,-22
tf_efficientnet_b8.ap_in1k,46.893,53.107,76.533,23.467,87.41,672,0.954,bicubic,-50.217,-22.977,-22
convnext_large.fb_in1k,46.813,53.187,76.627,23.373,197.77,288,1.000,bicubic,-50.287,-22.823,-20
dm_nfnet_f2.dm_in1k,46.640,53.360,74.813,25.187,193.78,352,0.920,bicubic,-50.470,-24.847,-23
efficientnetv2_rw_m.agc_in1k,46.293,53.707,75.720,24.280,53.24,416,1.000,bicubic,-50.687,-23.620,-2
swinv2_base_window16_256.ms_in1k,46.213,53.787,75.173,24.827,87.92,256,0.900,bicubic,-50.537,-24.177,+34
resnext101_32x16d.fb_swsl_ig1b_ft_in1k,46.120,53.880,72.240,27.760,194.03,224,0.875,bilinear,-50.480,-27.030,+68
caformer_m36.sail_in1k,46.080,53.920,74.533,25.467,56.20,224,1.000,bicubic,-50.810,-24.897,+13
volo_d2_224.sail_in1k,46.067,53.933,75.253,24.747,58.68,224,0.960,bicubic,-50.933,-24.137,-7
vit_small_patch16_384.augreg_in21k_ft_in1k,45.893,54.107,76.720,23.280,22.20,384,1.000,bicubic,-50.807,-22.710,+40
ecaresnet269d.ra2_in1k,45.853,54.147,75.067,24.933,102.09,352,1.000,bicubic,-51.237,-24.403,-27
convnextv2_tiny.fcmae_ft_in22k_in1k_384,45.760,54.240,76.973,23.027,28.64,384,1.000,bicubic,-51.480,-22.637,-51
vit_small_r26_s32_384.augreg_in21k_ft_in1k,45.720,54.280,76.040,23.960,36.47,384,1.000,bicubic,-50.960,-23.040,+48
tf_efficientnet_b7.ap_in1k,45.373,54.627,74.200,25.800,66.35,600,0.949,bicubic,-51.827,-25.300,-47
swin_base_patch4_window12_384.ms_in1k,45.333,54.667,74.360,25.640,87.90,384,1.000,bicubic,-51.247,-24.810,+64
resnext101_32x8d.fb_wsl_ig1b_ft_in1k,45.320,54.680,70.907,29.093,88.79,224,0.875,bilinear,-51.010,-28.493,+111
xcit_medium_24_p8_224.fb_dist_in1k,45.227,54.773,76.760,23.240,84.32,224,1.000,bicubic,-51.693,-22.640,-1
tiny_vit_21m_224.dist_in22k_ft_in1k,45.027,54.973,75.547,24.453,21.20,224,0.950,bicubic,-52.173,-23.943,-48
eca_nfnet_l2.ra3_in1k,44.987,55.013,75.880,24.120,56.72,384,1.000,bicubic,-52.093,-23.630,-29
coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k,44.787,55.213,73.747,26.253,41.72,224,0.950,bicubic,-51.843,-25.473,+51
maxxvit_rmlp_small_rw_256.sw_in1k,44.600,55.400,75.040,24.960,66.01,256,0.950,bicubic,-52.200,-24.340,+11
crossvit_18_dagger_408.in1k,44.253,55.747,73.840,26.160,44.61,408,1.000,bicubic,-52.287,-25.520,+68
resnest200e.in1k,44.133,55.867,73.493,26.507,70.20,320,0.909,bicubic,-52.477,-25.857,+51
seresnextaa101d_32x8d.ah_in1k,43.973,56.027,73.347,26.653,93.59,288,1.000,bicubic,-52.987,-25.903,-16
cait_xs24_384.fb_dist_in1k,43.947,56.053,75.160,24.840,26.67,384,1.000,bicubic,-52.593,-24.260,+62
mvitv2_base.fb_in1k,43.747,56.253,74.507,25.493,51.47,224,0.900,bicubic,-53.013,-24.753,+12
resnetrs200.tf_in1k,43.733,56.267,72.827,27.173,93.21,320,1.000,bicubic,-52.967,-26.543,+26
rexnetr_300.sw_in12k_ft_in1k,43.653,56.347,76.507,23.493,34.81,288,1.000,bicubic,-53.187,-23.003,-1
tresnet_xl.miil_in1k_448,43.493,56.507,72.427,27.573,78.44,448,0.875,bilinear,-52.487,-26.623,+177
caformer_s18.sail_in22k_ft_in1k,43.333,56.667,74.960,25.040,26.34,224,1.000,bicubic,-53.377,-24.590,+18
xcit_small_12_p16_384.fb_dist_in1k,43.307,56.693,73.933,26.067,26.25,384,1.000,bicubic,-53.613,-25.457,-16
vit_base_patch16_224.augreg_in21k_ft_in1k,43.267,56.733,72.907,27.093,86.57,224,0.900,bicubic,-53.613,-26.623,-10
swin_small_patch4_window7_224.ms_in22k_ft_in1k,43.227,56.773,76.187,23.813,49.61,224,0.900,bicubic,-53.253,-23.203,+65
resnetrs420.tf_in1k,43.147,56.853,70.440,29.560,191.89,416,1.000,bicubic,-53.763,-29.080,-16
vit_base_patch32_clip_384.openai_ft_in12k_in1k,43.107,56.893,73.253,26.747,88.30,384,0.950,bicubic,-54.003,-26.247,-53
edgenext_base.in21k_ft_in1k,43.040,56.960,75.440,24.560,18.51,320,1.000,bicubic,-53.690,-23.980,+5
coatnet_rmlp_2_rw_224.sw_in1k,43.040,56.960,71.693,28.307,73.88,224,0.950,bicubic,-53.500,-27.607,+55
xcit_medium_24_p8_224.fb_in1k,43.040,56.960,70.320,29.680,84.32,224,1.000,bicubic,-53.050,-28.570,+142
tf_efficientnet_b7.ra_in1k,42.973,57.027,73.147,26.853,66.35,600,0.949,bicubic,-54.027,-26.373,-38
tf_efficientnetv2_m.in1k,42.813,57.187,72.600,27.400,54.14,480,1.000,bicubic,-54.397,-26.930,-74
vit_medium_patch16_gap_256.sw_in12k_ft_in1k,42.707,57.293,74.307,25.693,38.86,256,0.950,bicubic,-53.963,-25.063,+23
hrnet_w48_ssld.paddle_in1k,42.600,57.400,72.147,27.853,77.47,288,1.000,bilinear,-54.430,-27.243,-44
dm_nfnet_f1.dm_in1k,42.547,57.453,71.560,28.440,132.63,320,0.910,bicubic,-54.483,-28.080,-44
xcit_tiny_24_p8_384.fb_dist_in1k,42.453,57.547,72.853,27.147,12.11,384,1.000,bicubic,-54.077,-26.417,+49
gcvit_base.in1k,42.440,57.560,73.773,26.227,90.32,224,0.875,bicubic,-54.120,-25.377,+39
swinv2_small_window16_256.ms_in1k,42.360,57.640,72.867,27.133,49.73,256,0.900,bicubic,-54.110,-26.333,+55
maxvit_rmlp_small_rw_224.sw_in1k,42.227,57.773,72.387,27.613,64.90,224,0.900,bicubic,-54.353,-26.863,+35
convformer_s18.sail_in1k_384,42.120,57.880,74.453,25.547,26.77,384,1.000,bicubic,-54.920,-24.937,-51
convnextv2_tiny.fcmae_ft_in22k_in1k,42.093,57.907,75.747,24.253,28.64,288,1.000,bicubic,-54.757,-23.713,-24
maxvit_base_tf_224.in1k,41.987,58.013,70.080,29.920,119.47,224,0.950,bicubic,-54.963,-29.500,-39
convnext_base.fb_in1k,41.947,58.053,73.987,26.013,88.59,288,1.000,bicubic,-54.883,-25.463,-22
xcit_small_24_p8_224.fb_dist_in1k,41.907,58.093,73.653,26.347,47.63,224,1.000,bicubic,-54.963,-25.827,-30
crossvit_15_dagger_408.in1k,41.907,58.093,72.040,27.960,28.50,408,1.000,bicubic,-54.483,-27.310,+63
resnetaa101d.sw_in12k_ft_in1k,41.893,58.107,72.387,27.613,44.57,288,1.000,bicubic,-54.807,-26.973,0
maxvit_large_tf_224.in1k,41.880,58.120,68.653,31.347,211.79,224,0.950,bicubic,-55.080,-30.737,-46
xcit_small_24_p8_224.fb_in1k,41.800,58.200,71.080,28.920,47.63,224,1.000,bicubic,-54.610,-28.070,+55
vit_large_r50_s32_224.augreg_in21k_ft_in1k,41.640,58.360,70.227,29.773,328.99,224,0.900,bicubic,-55.150,-29.113,-23
vit_base_patch16_clip_224.laion2b_ft_in1k,41.613,58.387,73.627,26.373,86.57,224,1.000,bicubic,-55.517,-25.833,-80
swinv2_base_window8_256.ms_in1k,41.547,58.453,72.427,27.573,87.92,256,0.900,bicubic,-54.983,-26.893,+34
resnext101_32x4d.fb_swsl_ig1b_ft_in1k,41.547,58.453,71.720,28.280,44.18,224,0.875,bilinear,-54.873,-27.430,+48
deit3_small_patch16_224.fb_in22k_ft_in1k,41.200,58.800,71.893,28.107,22.06,224,1.000,bicubic,-55.460,-27.437,+5
maxvit_rmlp_tiny_rw_256.sw_in1k,41.173,58.827,71.187,28.813,29.15,256,0.950,bicubic,-55.247,-28.193,+46
regnety_1280.seer_ft_in1k,41.147,58.853,71.200,28.800,644.81,384,1.000,bicubic,-55.713,-28.190,-39
seresnext101d_32x8d.ah_in1k,41.147,58.853,70.920,29.080,93.59,288,1.000,bicubic,-55.553,-28.560,-9
convnext_tiny.fb_in22k_ft_in1k_384,41.053,58.947,72.533,27.467,28.59,384,1.000,bicubic,-56.027,-26.977,-76
caformer_s36.sail_in1k,41.040,58.960,70.893,29.107,39.30,224,1.000,bicubic,-55.650,-28.467,-8
davit_base.msft_in1k,40.880,59.120,72.747,27.253,87.95,224,0.950,bicubic,-56.060,-26.593,-54
tf_efficientnet_b6.ap_in1k,40.827,59.173,71.627,28.373,43.04,528,0.942,bicubic,-56.253,-27.793,-81
flexivit_base.1200ep_in1k,40.640,59.360,72.307,27.693,86.59,240,0.950,bicubic,-56.100,-27.053,-28
convformer_b36.sail_in1k,40.453,59.547,69.440,30.560,99.88,224,1.000,bicubic,-56.447,-30.040,-50
resmlp_big_24_224.fb_in22k_ft_in1k,40.373,59.627,74.800,25.200,129.14,224,0.875,bicubic,-56.247,-24.650,+1
deit3_small_patch16_384.fb_in1k,40.333,59.667,70.333,29.667,22.21,384,1.000,bicubic,-55.867,-28.957,+77
tresnet_l.miil_in1k_448,40.213,59.787,69.920,30.080,55.99,448,0.875,bilinear,-55.647,-29.280,+158
deit_base_patch16_384.fb_in1k,40.187,59.813,70.813,29.187,86.86,384,1.000,bicubic,-55.973,-28.427,+86
regnetz_040_h.ra3_in1k,40.000,60.000,71.320,28.680,28.94,320,1.000,bicubic,-56.700,-27.970,-26
resnetrs350.tf_in1k,39.960,60.040,68.907,31.093,163.96,384,1.000,bicubic,-56.790,-30.463,-37
flexivit_base.600ep_in1k,39.920,60.080,71.893,28.107,86.59,240,0.950,bicubic,-56.730,-27.437,-10
regnetz_d8.ra3_in1k,39.907,60.093,71.707,28.293,23.37,320,1.000,bicubic,-56.713,-27.803,-5
swin_s3_base_224.ms_in1k,39.853,60.147,70.467,29.533,71.13,224,0.900,bicubic,-56.387,-28.683,+62
flexivit_base.300ep_in1k,39.533,60.467,71.000,29.000,86.59,240,0.950,bicubic,-57.067,-28.530,-4
seresnext101_32x8d.ah_in1k,39.520,60.480,69.480,30.520,93.57,288,1.000,bicubic,-57.250,-29.870,-44
gcvit_small.in1k,39.400,60.600,70.480,29.520,51.09,224,0.875,bicubic,-56.880,-28.660,+53
regnetz_d8_evos.ch_in1k,39.360,60.640,71.427,28.573,23.46,320,1.000,bicubic,-57.210,-28.033,0
deit3_base_patch16_224.fb_in1k,39.173,60.827,70.933,29.067,86.59,224,0.900,bicubic,-57.127,-28.247,+47
volo_d1_224.sail_in1k,39.013,60.987,70.267,29.733,26.63,224,0.960,bicubic,-57.307,-29.043,+44
vit_large_patch32_384.orig_in21k_ft_in1k,38.920,61.080,68.933,31.067,306.63,384,1.000,bicubic,-56.910,-30.237,+151
resnetv2_101x1_bit.goog_in21k_ft_in1k,38.907,61.093,71.027,28.973,44.54,448,1.000,bilinear,-57.193,-28.243,+90
mvitv2_small.fb_in1k,38.773,61.227,70.413,29.587,34.87,224,0.900,bicubic,-57.597,-28.787,+29
regnetz_040.ra3_in1k,38.747,61.253,70.440,29.560,27.12,320,1.000,bicubic,-57.973,-29.060,-43
coat_lite_medium.in1k,38.600,61.400,71.093,28.907,44.57,224,0.900,bicubic,-57.870,-28.147,+12
xcit_small_12_p8_224.fb_dist_in1k,38.280,61.720,71.373,28.627,26.21,224,1.000,bicubic,-58.420,-27.987,-39
resnet200d.ra2_in1k,38.133,61.867,68.573,31.427,64.69,320,1.000,bicubic,-58.597,-30.847,-48
davit_small.msft_in1k,38.120,61.880,70.747,29.253,49.75,224,0.950,bicubic,-58.510,-28.413,-25
tf_efficientnet_b7.aa_in1k,38.120,61.880,69.320,30.680,66.35,600,0.949,bicubic,-58.420,-29.940,-5
focalnet_base_srf.ms_in1k,37.827,62.173,69.747,30.253,88.15,224,0.900,bicubic,-58.733,-29.483,-10
focalnet_base_lrf.ms_in1k,37.787,62.213,68.573,31.427,88.75,224,0.900,bicubic,-58.663,-30.727,+10
convformer_m36.sail_in1k,37.760,62.240,67.280,32.720,57.05,224,1.000,bicubic,-58.920,-32.300,-34
swinv2_small_window8_256.ms_in1k,37.733,62.267,69.853,30.147,49.73,256,0.900,bicubic,-58.537,-29.357,+38
xcit_large_24_p16_224.fb_dist_in1k,37.653,62.347,71.613,28.387,189.10,224,1.000,bicubic,-59.147,-27.737,-67
seresnet152d.ra2_in1k,37.653,62.347,69.493,30.507,66.84,320,1.000,bicubic,-59.117,-29.947,-63
maxvit_small_tf_224.in1k,37.560,62.440,67.947,32.053,68.93,224,0.950,bicubic,-59.130,-31.423,-42
xcit_small_12_p8_224.fb_in1k,37.547,62.453,68.160,31.840,26.21,224,1.000,bicubic,-58.563,-31.000,+74
eca_nfnet_l1.ra2_in1k,37.533,62.467,70.933,29.067,41.41,320,1.000,bicubic,-59.167,-28.447,-47
fastvit_ma36.apple_dist_in1k,37.493,62.507,71.053,28.947,44.07,256,0.950,bicubic,-59.287,-28.277,-69
efficientvit_b3.r288_in1k,37.427,62.573,69.893,30.107,48.65,288,1.000,bicubic,-59.203,-29.457,-35
twins_svt_large.in1k,37.213,62.787,69.187,30.813,99.27,224,0.900,bicubic,-59.027,-30.013,+34
regnetz_d32.ra3_in1k,37.160,62.840,70.480,29.520,27.58,320,0.950,bicubic,-59.430,-28.900,-29
vit_base_patch32_384.augreg_in21k_ft_in1k,37.107,62.893,69.787,30.213,88.30,384,1.000,bicubic,-59.383,-29.623,-11
regnety_064.ra3_in1k,37.013,62.987,68.107,31.893,30.58,288,1.000,bicubic,-59.347,-31.123,+11
swin_s3_small_224.ms_in1k,36.933,63.067,68.253,31.747,49.74,224,0.900,bicubic,-59.297,-30.827,+34
resnext101_64x4d.c1_in1k,36.840,63.160,66.627,33.373,83.46,288,1.000,bicubic,-59.240,-32.573,+70
regnety_160.deit_in1k,36.827,63.173,69.120,30.880,83.59,288,1.000,bicubic,-59.523,-30.060,+9
efficientnetv2_rw_s.ra2_in1k,36.827,63.173,68.320,31.680,23.94,384,1.000,bicubic,-59.713,-30.780,-24
convnext_small.fb_in1k,36.707,63.293,71.067,28.933,50.22,288,1.000,bicubic,-59.813,-28.273,-19
pit_b_distilled_224.in1k,36.707,63.293,68.093,31.907,74.79,224,0.900,bicubic,-59.523,-31.017,+28
pvt_v2_b4.in1k,36.627,63.373,68.653,31.347,62.56,224,0.900,bicubic,-59.723,-30.677,+7
pvt_v2_b5.in1k,36.293,63.707,68.427,31.573,81.96,224,0.900,bicubic,-60.067,-30.813,+4
cait_xxs36_384.fb_dist_in1k,36.267,63.733,67.733,32.267,17.37,384,1.000,bicubic,-59.563,-31.417,+120
fastvit_sa36.apple_dist_in1k,36.187,63.813,69.267,30.733,31.53,256,0.900,bicubic,-60.173,-29.903,0
regnety_640.seer_ft_in1k,36.120,63.880,68.307,31.693,281.38,384,1.000,bicubic,-60.730,-31.113,-94
nest_base_jx.goog_in1k,36.053,63.947,66.747,33.253,67.72,224,0.875,bicubic,-60.187,-32.473,+17
coatnet_1_rw_224.sw_in1k,35.973,64.027,67.133,32.867,41.72,224,0.950,bicubic,-60.057,-32.027,+74
maxvit_tiny_rw_224.sw_in1k,35.960,64.040,65.573,34.427,29.06,224,0.950,bicubic,-60.280,-33.597,+19
repvgg_d2se.rvgg_in1k,35.827,64.173,66.720,33.280,133.33,320,1.000,bilinear,-60.863,-32.640,-67
cs3se_edgenet_x.c2ns_in1k,35.667,64.333,67.827,32.173,50.72,320,1.000,bicubic,-60.783,-31.573,-22
sequencer2d_l.in1k,35.600,64.400,67.360,32.640,54.30,224,0.875,bicubic,-60.550,-31.800,+38
swin_base_patch4_window7_224.ms_in1k,35.560,64.440,68.160,31.840,87.77,224,0.900,bicubic,-60.560,-30.900,+45
regnety_080.ra3_in1k,35.560,64.440,67.213,32.787,39.18,288,1.000,bicubic,-60.970,-32.107,-35
tf_efficientnet_b3.ns_jft_in1k,35.520,64.480,67.773,32.227,12.23,300,0.904,bicubic,-60.870,-31.617,-15
inception_next_base.sail_in1k,35.187,64.813,66.533,33.467,86.67,224,0.950,bicubic,-61.373,-32.547,-44
tf_efficientnet_b6.aa_in1k,35.147,64.853,67.733,32.267,43.04,528,0.942,bicubic,-61.523,-31.757,-66
resnetrs270.tf_in1k,34.973,65.027,65.453,34.547,129.86,352,1.000,bicubic,-61.717,-33.897,-72
gcvit_tiny.in1k,34.947,65.053,66.893,33.107,28.22,224,0.875,bicubic,-61.233,-32.297,+21
tf_efficientnet_b5.ap_in1k,34.813,65.187,67.467,32.533,30.39,456,0.934,bicubic,-61.867,-31.993,-72
xcit_tiny_12_p8_384.fb_dist_in1k,34.653,65.347,66.293,33.707,6.71,384,1.000,bicubic,-61.427,-32.567,+49
fastvit_ma36.apple_in1k,34.627,65.373,66.987,33.013,44.07,256,0.950,bicubic,-61.843,-32.163,-37
vit_base_patch16_224_miil.in21k_ft_in1k,34.533,65.467,64.973,35.027,86.54,224,0.875,bilinear,-61.917,-34.337,-32
xcit_medium_24_p16_224.fb_dist_in1k,34.333,65.667,67.880,32.120,84.40,224,1.000,bicubic,-62.267,-31.390,-61
resnet152d.ra2_in1k,34.280,65.720,65.947,34.053,60.21,320,1.000,bicubic,-62.110,-33.213,-26
deit3_medium_patch16_224.fb_in1k,34.253,65.747,66.013,33.987,38.85,224,0.900,bicubic,-61.827,-33.227,+43
coat_small.in1k,34.187,65.813,66.133,33.867,21.69,224,0.900,bicubic,-61.723,-33.017,+82
tresnet_m.miil_in1k_448,34.107,65.893,64.533,35.467,31.39,448,0.875,bilinear,-60.883,-33.927,+253
resmlp_big_24_224.fb_distilled_in1k,34.053,65.947,69.573,30.427,129.14,224,0.875,bicubic,-62.397,-29.547,-39
regnetv_064.ra3_in1k,34.000,66.000,67.880,32.120,30.58,288,1.000,bicubic,-62.410,-31.480,-34
tiny_vit_11m_224.dist_in22k_ft_in1k,33.867,66.133,65.827,34.173,11.00,224,0.950,bicubic,-62.423,-33.403,-12
xcit_tiny_24_p16_384.fb_dist_in1k,33.827,66.173,65.400,34.600,12.12,384,1.000,bicubic,-62.113,-33.810,+70
inception_next_small.sail_in1k,33.800,66.200,65.773,34.227,49.37,224,0.875,bicubic,-62.440,-33.417,-10
caformer_s18.sail_in1k,33.707,66.293,65.400,34.600,26.34,224,1.000,bicubic,-62.433,-33.770,+19
pvt_v2_b3.in1k,33.640,66.360,67.680,32.320,45.24,224,0.900,bicubic,-62.350,-31.510,+55
focalnet_small_srf.ms_in1k,33.520,66.480,65.907,34.093,49.89,224,0.900,bicubic,-62.550,-33.383,+38
coatnet_rmlp_1_rw_224.sw_in1k,33.507,66.493,65.573,34.427,41.69,224,0.950,bicubic,-62.453,-33.617,+62
convformer_s18.sail_in22k_ft_in1k,33.453,66.547,68.280,31.720,26.77,224,1.000,bicubic,-63.177,-31.060,-83
resnet152.a1h_in1k,33.427,66.573,63.453,36.547,60.19,288,1.000,bicubic,-62.773,-35.807,-2
twins_pcpvt_large.in1k,33.387,66.613,67.907,32.093,60.99,224,0.900,bicubic,-62.753,-31.453,+12
convformer_s36.sail_in1k,33.320,66.680,63.827,36.173,40.01,224,1.000,bicubic,-63.260,-35.413,-74
focalnet_small_lrf.ms_in1k,33.227,66.773,67.067,32.933,50.34,224,0.900,bicubic,-62.953,-32.163,-1
fastvit_sa36.apple_in1k,33.200,66.800,66.000,34.000,31.53,256,0.900,bicubic,-63.040,-33.130,-18
twins_svt_base.in1k,33.200,66.800,65.720,34.280,56.07,224,0.900,bicubic,-62.960,-33.330,+3
pit_b_224.in1k,33.160,66.840,62.347,37.653,73.76,224,0.900,bicubic,-62.470,-36.893,+110
tiny_vit_21m_224.in1k,33.133,66.867,67.453,32.547,21.20,224,0.950,bicubic,-62.997,-31.767,+9
resnext50_32x4d.fb_swsl_ig1b_ft_in1k,33.040,66.960,65.120,34.880,25.03,224,0.875,bilinear,-62.820,-33.950,+68
resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k,33.027,66.973,64.240,35.760,236.34,224,0.875,bicubic,-63.093,-35.100,+9
mobilevitv2_200.cvnets_in22k_ft_in1k_384,32.987,67.013,65.493,34.507,18.45,384,1.000,bicubic,-63.073,-33.587,+27
convnextv2_nano.fcmae_ft_in22k_in1k_384,32.947,67.053,67.173,32.827,15.62,384,1.000,bicubic,-63.423,-32.227,-48
regnety_320.seer_ft_in1k,32.933,67.067,66.333,33.667,145.05,384,1.000,bicubic,-63.407,-33.017,-41
swinv2_cr_small_ns_224.sw_in1k,32.893,67.107,65.947,34.053,49.70,224,0.900,bicubic,-63.287,-33.173,-10
xception65.ra3_in1k,32.747,67.253,62.973,37.027,39.92,299,0.940,bicubic,-63.613,-36.257,-49
xcit_large_24_p16_224.fb_in1k,32.733,67.267,62.067,37.933,189.10,224,1.000,bicubic,-62.697,-36.773,+150
ecaresnet101d.miil_in1k,32.707,67.293,65.973,34.027,44.57,288,0.950,bicubic,-63.513,-33.337,-24
resnext101_32x16d.fb_ssl_yfcc100m_ft_in1k,32.640,67.360,63.987,36.013,194.03,224,0.875,bilinear,-63.150,-35.173,+73
swin_small_patch4_window7_224.ms_in1k,32.573,67.427,65.347,34.653,49.61,224,0.900,bicubic,-63.357,-33.803,+49
mobilevitv2_175.cvnets_in22k_ft_in1k_384,32.480,67.520,64.747,35.253,14.25,384,1.000,bicubic,-63.700,-34.393,-15
efficientvit_b3.r256_in1k,32.440,67.560,65.960,34.040,48.65,256,1.000,bicubic,-63.850,-33.230,-39
tf_efficientnetv2_b3.in21k_ft_in1k,32.333,67.667,66.133,33.867,14.36,300,0.900,bicubic,-63.887,-33.097,-28
nest_small_jx.goog_in1k,32.280,67.720,63.747,36.253,38.35,224,0.875,bicubic,-63.690,-35.283,+34
convnext_tiny_hnf.a2h_in1k,32.227,67.773,62.893,37.107,28.59,288,1.000,bicubic,-63.793,-36.247,+25
resnext101_64x4d.tv_in1k,32.160,67.840,64.227,35.773,83.46,224,0.875,bilinear,-63.870,-34.833,+17
efficientvit_b3.r224_in1k,32.080,67.920,64.867,35.133,48.65,224,0.950,bicubic,-63.950,-34.263,+19
vit_base_patch16_224.orig_in21k_ft_in1k,32.053,67.947,61.613,38.387,86.57,224,0.900,bicubic,-63.287,-37.387,+153
convnextv2_tiny.fcmae_ft_in1k,32.027,67.973,67.067,32.933,28.64,288,1.000,bicubic,-64.163,-32.263,-27
maxvit_nano_rw_256.sw_in1k,31.933,68.067,64.187,35.813,15.45,256,0.950,bicubic,-63.987,-34.823,+41
tf_efficientnet_b5.ra_in1k,31.787,68.213,65.280,34.720,30.39,456,0.934,bicubic,-64.553,-34.030,-57
rexnetr_200.sw_in12k_ft_in1k,31.733,68.267,67.653,32.347,16.52,288,1.000,bicubic,-64.467,-31.567,-33
swinv2_cr_small_224.sw_in1k,31.733,68.267,62.547,37.453,49.70,224,0.900,bicubic,-64.347,-36.593,+1
swinv2_tiny_window16_256.ms_in1k,31.707,68.293,65.667,34.333,28.35,256,0.900,bicubic,-64.223,-33.353,+34
regnetz_c16_evos.ch_in1k,31.493,68.507,66.347,33.653,13.49,320,0.950,bicubic,-64.647,-32.653,-21
fastvit_sa24.apple_dist_in1k,31.427,68.573,64.760,35.240,21.55,256,0.900,bicubic,-64.723,-34.430,-25
resnest101e.in1k,31.413,68.587,64.320,35.680,48.28,256,0.875,bilinear,-64.447,-34.780,+42
maxvit_rmlp_nano_rw_256.sw_in1k,31.400,68.600,63.413,36.587,15.50,256,0.950,bicubic,-64.570,-35.737,+21
regnety_320.tv2_in1k,31.360,68.640,64.840,35.160,145.05,224,0.965,bicubic,-64.720,-34.390,-8
maxxvit_rmlp_nano_rw_256.sw_in1k,31.333,68.667,64.360,35.640,16.78,256,0.950,bicubic,-64.707,-34.900,+2
regnetv_040.ra3_in1k,31.307,68.693,64.680,35.320,20.64,288,1.000,bicubic,-64.883,-34.570,-40
crossvit_base_240.in1k,31.267,68.733,61.307,38.693,105.03,240,0.875,bicubic,-64.253,-37.563,+95
cait_s24_224.fb_dist_in1k,31.187,68.813,64.573,35.427,46.92,224,1.000,bicubic,-65.233,-34.897,-85
convnext_nano.in12k_ft_in1k,31.107,68.893,67.333,32.667,15.59,288,1.000,bicubic,-64.883,-31.987,+8
efficientnet_b4.ra2_in1k,30.880,69.120,64.667,35.333,19.34,384,1.000,bicubic,-65.270,-34.543,-33
repvit_m2_3.dist_450e_in1k,30.787,69.213,63.840,36.160,23.69,224,0.950,bicubic,-65.543,-35.590,-69
regnety_040.ra3_in1k,30.613,69.387,63.827,36.173,20.65,288,1.000,bicubic,-65.407,-35.243,0
maxvit_tiny_tf_224.in1k,30.587,69.413,62.773,37.227,30.92,224,0.950,bicubic,-65.513,-36.507,-22
crossvit_18_240.in1k,30.587,69.413,61.920,38.080,43.27,240,0.875,bicubic,-64.853,-37.200,+113
sequencer2d_m.in1k,30.560,69.440,62.987,37.013,38.31,224,0.875,bicubic,-65.250,-36.223,+40
vit_base_patch32_clip_224.laion2b_ft_in12k_in1k,30.560,69.440,62.053,37.947,88.22,224,0.900,bicubic,-65.570,-37.107,-34
xcit_small_24_p16_224.fb_dist_in1k,30.547,69.453,64.733,35.267,47.67,224,1.000,bicubic,-65.673,-34.477,-56
crossvit_18_dagger_240.in1k,30.520,69.480,61.787,38.213,44.27,240,0.875,bicubic,-65.050,-37.273,+69
maxxvitv2_nano_rw_256.sw_in1k,30.440,69.560,63.707,36.293,23.70,256,0.950,bicubic,-65.460,-35.463,+21
mvitv2_tiny.fb_in1k,30.173,69.827,64.320,35.680,24.17,224,0.900,bicubic,-65.687,-34.800,+27
xcit_medium_24_p16_224.fb_in1k,30.173,69.827,59.307,40.693,84.40,224,1.000,bicubic,-65.367,-39.473,+74
rexnet_300.nav_in1k,30.027,69.973,63.920,36.080,34.71,224,0.875,bicubic,-65.813,-35.210,+27
cait_xxs24_384.fb_dist_in1k,30.013,69.987,63.907,36.093,12.03,384,1.000,bicubic,-65.267,-35.003,+133
tf_efficientnet_b5.aa_in1k,29.973,70.027,63.080,36.920,30.39,456,0.934,bicubic,-66.497,-36.200,-110
convnext_tiny.fb_in1k,29.960,70.040,65.120,34.880,28.59,288,1.000,bicubic,-65.830,-34.060,+33
twins_pcpvt_base.in1k,29.947,70.053,64.627,35.373,43.83,224,0.900,bicubic,-65.843,-34.503,+33
cs3sedarknet_x.c2ns_in1k,29.933,70.067,62.000,38.000,35.40,288,1.000,bicubic,-66.087,-37.190,-13
resnet50.fb_swsl_ig1b_ft_in1k,29.813,70.187,63.827,36.173,25.56,224,0.875,bilinear,-65.587,-35.273,+107
mobilevitv2_150.cvnets_in22k_ft_in1k_384,29.813,70.187,62.133,37.867,10.59,384,1.000,bicubic,-65.877,-37.007,+43
vit_relpos_base_patch16_clsgap_224.sw_in1k,29.733,70.267,62.880,37.120,86.43,224,0.900,bicubic,-66.037,-36.240,+32
resnet152.a1_in1k,29.720,70.280,57.280,42.720,60.19,288,1.000,bicubic,-65.780,-41.800,+78
convnextv2_nano.fcmae_ft_in22k_in1k,29.693,70.307,65.920,34.080,15.62,288,1.000,bicubic,-66.367,-33.300,-29
deit_base_distilled_patch16_224.fb_in1k,29.587,70.413,64.387,35.613,87.34,224,0.900,bicubic,-66.503,-34.803,-40
convit_base.fb_in1k,29.520,70.480,61.747,38.253,86.54,224,0.875,bicubic,-66.030,-37.363,+57
vit_relpos_medium_patch16_cls_224.sw_in1k,29.307,70.693,60.587,39.413,38.76,224,0.900,bicubic,-66.163,-38.363,+82
swin_tiny_patch4_window7_224.ms_in22k_ft_in1k,29.293,70.707,66.107,33.893,28.29,224,0.900,bicubic,-66.217,-33.013,+67
resnext101_32x8d.fb_ssl_yfcc100m_ft_in1k,29.120,70.880,61.000,39.000,88.79,224,0.875,bilinear,-66.390,-38.200,+67
davit_tiny.msft_in1k,29.107,70.893,63.573,36.427,28.36,224,0.950,bicubic,-66.553,-35.477,+38
tf_efficientnetv2_s.in1k,29.040,70.960,61.227,38.773,21.46,384,1.000,bicubic,-67.300,-37.973,-99
edgenext_base.usi_in1k,29.027,70.973,64.907,35.093,18.51,320,1.000,bicubic,-67.673,-34.593,-175
xception65p.ra3_in1k,28.987,71.013,59.907,40.093,39.82,299,0.940,bicubic,-67.223,-39.273,-80
convnext_tiny.fb_in22k_ft_in1k,28.987,71.013,55.600,44.400,28.59,288,1.000,bicubic,-65.853,-43.110,+195
resnet101d.ra2_in1k,28.973,71.027,62.080,37.920,44.57,320,1.000,bicubic,-67.317,-37.040,-97
fastvit_sa24.apple_in1k,28.960,71.040,62.413,37.587,21.55,256,0.900,bicubic,-66.960,-36.747,-9
resnetv2_101.a1h_in1k,28.933,71.067,59.867,40.133,44.54,288,1.000,bicubic,-67.117,-39.273,-40
resnetrs152.tf_in1k,28.893,71.107,60.507,39.493,86.62,320,1.000,bicubic,-67.687,-38.613,-152
regnety_160.tv2_in1k,28.867,71.133,61.627,38.373,83.59,224,0.965,bicubic,-67.103,-37.343,-25
vit_relpos_medium_patch16_224.sw_in1k,28.853,71.147,61.973,38.027,38.75,224,0.900,bicubic,-66.607,-37.037,+71
xcit_tiny_24_p8_224.fb_dist_in1k,28.707,71.293,61.373,38.627,12.11,224,1.000,bicubic,-67.103,-37.737,+5
xcit_tiny_24_p8_224.fb_in1k,28.653,71.347,60.440,39.560,12.11,224,1.000,bicubic,-67.007,-38.590,+25
efficientvit_b2.r288_in1k,28.627,71.373,64.227,35.773,24.33,288,1.000,bicubic,-67.333,-34.933,-26
repvit_m2_3.dist_300e_in1k,28.627,71.373,61.907,38.093,23.69,224,0.950,bicubic,-67.493,-37.333,-68
crossvit_15_dagger_240.in1k,28.547,71.453,60.293,39.707,28.21,240,0.875,bicubic,-67.133,-38.537,+20
cs3edgenet_x.c2_in1k,28.507,71.493,61.133,38.867,47.82,288,1.000,bicubic,-67.543,-38.037,-48
pvt_v2_b2_li.in1k,28.480,71.520,62.000,38.000,22.55,224,0.900,bicubic,-67.080,-37.030,+34
xcit_small_24_p16_224.fb_in1k,28.320,71.680,58.707,41.293,47.67,224,1.000,bicubic,-67.220,-40.013,+38
efficientformerv2_l.snap_dist_in1k,28.253,71.747,61.920,38.080,26.32,224,0.950,bicubic,-67.817,-37.200,-56
resnet101.a1h_in1k,28.160,71.840,59.387,40.613,44.55,288,1.000,bicubic,-67.860,-39.723,-45
efficientformer_l7.snap_dist_in1k,28.000,72.000,63.013,36.987,82.23,224,0.950,bicubic,-68.110,-36.267,-70
flexivit_small.1200ep_in1k,27.853,72.147,58.653,41.347,22.06,240,0.950,bicubic,-67.697,-40.227,+30
resnetaa50d.sw_in12k_ft_in1k,27.813,72.187,62.253,37.747,25.58,288,1.000,bicubic,-68.017,-36.837,-11
pvt_v2_b2.in1k,27.600,72.400,60.733,39.267,25.36,224,0.900,bicubic,-67.880,-38.267,+52
regnetz_c16.ra3_in1k,27.587,72.413,62.720,37.280,13.46,320,1.000,bicubic,-68.353,-36.500,-31
resnext101_32x8d.tv2_in1k,27.573,72.427,59.827,40.173,88.79,224,0.965,bilinear,-68.377,-39.073,-36
vit_base_patch16_384.augreg_in1k,27.547,72.453,57.253,42.747,86.86,384,1.000,bicubic,-67.393,-41.637,+150
coat_lite_small.in1k,27.507,72.493,58.533,41.467,19.84,224,0.900,bicubic,-68.033,-40.547,+27
deit_base_patch16_224.fb_in1k,27.413,72.587,58.880,41.120,86.57,224,0.900,bicubic,-68.037,-40.210,+55
vit_relpos_base_patch16_224.sw_in1k,27.320,72.680,61.147,38.853,86.43,224,0.900,bicubic,-68.240,-37.843,+20
resnetv2_50x1_bit.goog_in21k_ft_in1k,27.307,72.693,62.867,37.133,25.55,448,1.000,bilinear,-67.703,-36.113,+136
regnety_080_tv.tv2_in1k,27.280,72.720,61.520,38.480,39.38,224,0.965,bicubic,-68.580,-37.730,-24
coatnet_bn_0_rw_224.sw_in1k,27.213,72.787,61.253,38.747,27.44,224,0.950,bicubic,-68.487,-37.797,-1
xcit_small_12_p16_224.fb_dist_in1k,27.147,72.853,59.800,40.200,26.25,224,1.000,bicubic,-68.883,-39.190,-63
dm_nfnet_f0.dm_in1k,27.067,72.933,58.320,41.680,71.49,256,0.900,bicubic,-69.243,-41.000,-129
vit_small_patch16_224.augreg_in21k_ft_in1k,27.040,72.960,59.253,40.747,22.05,224,0.900,bicubic,-68.330,-39.687,+67
coatnet_0_rw_224.sw_in1k,27.027,72.973,59.387,40.613,27.44,224,0.950,bicubic,-68.413,-39.663,+52
sequencer2d_s.in1k,26.867,73.133,60.640,39.360,27.65,224,0.875,bicubic,-69.113,-38.490,-55
flexivit_small.600ep_in1k,26.853,73.147,57.253,42.747,22.06,240,0.950,bicubic,-68.817,-41.807,-3
tresnet_v2_l.miil_in21k_ft_in1k,26.707,73.293,59.827,40.173,46.17,224,0.875,bilinear,-69.453,-39.443,-105
gcvit_xtiny.in1k,26.693,73.307,60.907,39.093,19.98,224,0.875,bicubic,-68.897,-38.133,+7
mobilevitv2_200.cvnets_in22k_ft_in1k,26.653,73.347,59.400,40.600,18.45,256,0.888,bicubic,-68.507,-39.700,+94
swin_s3_tiny_224.ms_in1k,26.520,73.480,60.347,39.653,28.33,224,0.900,bicubic,-68.660,-38.603,+90
coatnet_rmlp_nano_rw_224.sw_in1k,26.467,73.533,60.547,39.453,15.15,224,0.900,bicubic,-68.963,-38.493,+48
swinv2_tiny_window8_256.ms_in1k,26.387,73.613,60.480,39.520,28.35,256,0.900,bicubic,-69.103,-38.480,+27
tf_efficientnet_b4.aa_in1k,26.293,73.707,60.080,39.920,19.34,380,0.922,bicubic,-69.607,-38.970,-46
regnetx_320.tv2_in1k,26.267,73.733,58.133,41.867,107.81,224,0.965,bicubic,-69.723,-40.967,-66
tf_efficientnet_b4.ap_in1k,26.227,73.773,60.240,39.760,19.34,380,0.922,bicubic,-69.933,-38.900,-116
coatnext_nano_rw_224.sw_in1k,26.227,73.773,59.613,40.387,14.70,224,0.900,bicubic,-69.203,-39.397,+43
deit3_small_patch16_224.fb_in1k,26.227,73.773,54.480,45.520,22.06,224,0.900,bicubic,-68.763,-44.500,+123
nfnet_l0.ra2_in1k,26.213,73.787,61.747,38.253,35.07,288,1.000,bicubic,-69.907,-37.523,-103
regnety_032.ra_in1k,26.200,73.800,61.013,38.987,19.44,288,1.000,bicubic,-69.760,-38.177,-64
ecaresnet50t.ra2_in1k,26.120,73.880,60.013,39.987,25.57,320,0.950,bicubic,-69.400,-38.797,+7
fbnetv3_g.ra2_in1k,26.107,73.893,61.080,38.920,16.62,288,0.950,bilinear,-69.413,-38.030,+7
mobilevitv2_175.cvnets_in22k_ft_in1k,26.013,73.987,58.520,41.480,14.25,256,0.888,bicubic,-69.207,-40.280,+71
flexivit_small.300ep_in1k,25.907,74.093,57.080,42.920,22.06,240,0.950,bicubic,-69.583,-42.020,+18
visformer_small.in1k,25.853,74.147,58.933,41.067,40.22,224,0.900,bicubic,-69.627,-39.967,+21
inception_next_tiny.sail_in1k,25.800,74.200,59.813,40.187,28.06,224,0.875,bicubic,-69.660,-39.097,+22
vit_small_patch16_384.augreg_in1k,25.773,74.227,57.587,42.413,22.20,384,1.000,bicubic,-69.517,-41.413,+57
convformer_s18.sail_in1k,25.680,74.320,57.960,42.040,26.77,224,1.000,bicubic,-70.270,-41.120,-69
halo2botnet50ts_256.a1h_in1k,25.573,74.427,56.827,43.173,22.64,256,0.950,bicubic,-69.847,-42.193,+34
vit_relpos_medium_patch16_rpn_224.sw_in1k,25.520,74.480,58.667,41.333,38.73,224,0.900,bicubic,-69.980,-40.113,+7
coat_mini.in1k,25.507,74.493,57.707,42.293,10.34,224,0.900,bicubic,-69.463,-41.373,+112
crossvit_15_240.in1k,25.453,74.547,57.587,42.413,27.53,240,0.875,bicubic,-69.697,-41.223,+76
vit_srelpos_medium_patch16_224.sw_in1k,25.373,74.627,58.427,41.573,38.74,224,0.900,bicubic,-69.867,-40.443,+56
resnet101.a1_in1k,25.253,74.747,55.120,44.880,44.55,288,1.000,bicubic,-70.267,-43.730,-3
resnetv2_50x1_bit.goog_distilled_in1k,25.200,74.800,59.667,40.333,25.55,224,0.875,bicubic,-70.910,-39.603,-117
convit_small.fb_in1k,25.147,74.853,57.293,42.707,27.78,224,0.875,bicubic,-70.063,-41.607,+59
xcit_small_12_p16_224.fb_in1k,25.107,74.893,56.067,43.933,26.25,224,1.000,bicubic,-70.323,-42.563,+24
vit_base_patch16_rpn_224.sw_in1k,25.080,74.920,58.680,41.320,86.54,224,0.900,bicubic,-70.310,-40.180,+31
gc_efficientnetv2_rw_t.agc_in1k,25.067,74.933,57.707,42.293,13.68,288,1.000,bicubic,-70.673,-41.473,-43
resnet152.a2_in1k,25.053,74.947,54.320,45.680,60.19,288,1.000,bicubic,-70.437,-44.670,+3
eca_nfnet_l0.ra2_in1k,24.800,75.200,60.040,39.960,24.14,288,1.000,bicubic,-71.140,-39.070,-80
xception41p.ra3_in1k,24.787,75.213,55.253,44.747,26.91,299,0.940,bicubic,-70.723,-43.657,-7
efficientvit_b2.r256_in1k,24.773,75.227,59.720,40.280,24.33,256,1.000,bicubic,-70.877,-39.340,-36
tnt_s_patch16_224,24.707,75.293,58.173,41.827,23.76,224,0.900,bicubic,-70.333,-40.657,+85
convnext_nano_ols.d1h_in1k,24.547,75.453,57.027,42.973,15.65,288,1.000,bicubic,-70.593,-41.823,+70
xcit_tiny_12_p16_384.fb_dist_in1k,24.453,75.547,57.080,42.920,6.72,384,1.000,bicubic,-70.677,-41.940,+70
cs3darknet_x.c2ns_in1k,24.387,75.613,57.760,42.240,35.05,288,1.000,bicubic,-71.463,-41.410,-69
efficientnetv2_rw_t.ra2_in1k,24.280,75.720,57.400,42.600,13.65,288,1.000,bicubic,-71.310,-41.670,-33
swinv2_cr_tiny_ns_224.sw_in1k,24.147,75.853,58.187,41.813,28.33,224,0.900,bicubic,-71.223,-40.953,+23
eva02_tiny_patch14_336.mim_in22k_ft_in1k,24.133,75.867,55.400,44.600,5.76,336,1.000,bicubic,-70.777,-43.480,+100
resnext101_32x4d.fb_ssl_yfcc100m_ft_in1k,24.120,75.880,57.373,42.627,44.18,224,0.875,bilinear,-71.320,-41.347,+3
twins_svt_small.in1k,24.107,75.893,57.160,42.840,24.06,224,0.900,bicubic,-71.083,-41.720,+48
coatnet_nano_rw_224.sw_in1k,24.093,75.907,57.147,42.853,15.14,224,0.900,bicubic,-71.147,-41.843,+39
vit_small_r26_s32_224.augreg_in21k_ft_in1k,24.093,75.907,56.213,43.787,36.43,224,0.900,bicubic,-71.537,-42.727,-44
tf_efficientnet_b5.in1k,24.080,75.920,58.347,41.653,30.39,456,0.934,bicubic,-71.990,-40.843,-125
tf_efficientnet_b2.ns_jft_in1k,24.040,75.960,57.320,42.680,9.11,260,0.890,bicubic,-71.730,-41.660,-65
mobilevitv2_150.cvnets_in22k_ft_in1k,24.013,75.987,55.920,44.080,10.59,256,0.888,bicubic,-71.127,-42.860,+55
vit_relpos_small_patch16_224.sw_in1k,24.000,76.000,58.160,41.840,21.98,224,0.900,bicubic,-71.150,-40.800,+48
convnext_nano.d1h_in1k,24.000,76.000,56.200,43.800,15.59,288,1.000,bicubic,-71.350,-42.720,+17
cs3sedarknet_l.c2ns_in1k,23.960,76.040,58.760,41.240,21.91,288,0.950,bicubic,-71.350,-40.370,+19
ecaresnet50d.miil_in1k,23.933,76.067,58.760,41.240,25.58,288,0.950,bicubic,-71.517,-40.080,-10
poolformer_m48.sail_in1k,23.867,76.133,57.160,42.840,73.47,224,0.950,bicubic,-71.763,-42.030,-50
vit_small_patch32_384.augreg_in21k_ft_in1k,23.760,76.240,57.280,42.720,22.92,384,1.000,bicubic,-71.260,-41.600,+68
tiny_vit_11m_224.in1k,23.627,76.373,58.973,41.027,11.00,224,0.950,bicubic,-71.863,-39.817,-23
lamhalobotnet50ts_256.a1h_in1k,23.613,76.387,55.320,44.680,22.57,256,0.950,bicubic,-71.537,-43.560,+44
nasnetalarge.tf_in1k,23.493,76.507,55.000,45.000,88.75,331,0.911,bicubic,-72.197,-43.930,-64
crossvit_small_240.in1k,23.427,76.573,56.840,43.160,26.86,240,0.875,bicubic,-71.423,-42.180,+95
levit_384.fb_dist_in1k,23.400,76.600,56.400,43.600,39.13,224,0.900,bicubic,-72.130,-42.660,-42
levit_conv_384.fb_dist_in1k,23.387,76.613,56.413,43.587,39.13,224,0.900,bicubic,-72.143,-42.637,-42
seresnext50_32x4d.racm_in1k,23.360,76.640,57.453,42.547,27.56,288,0.950,bicubic,-72.380,-41.567,-75
pnasnet5large.tf_in1k,23.333,76.667,53.627,46.373,86.06,331,0.911,bicubic,-72.377,-45.403,-72
focalnet_tiny_srf.ms_in1k,23.267,76.733,58.333,41.667,28.43,224,0.900,bicubic,-72.233,-40.797,-36
efficientnet_b3.ra2_in1k,23.200,76.800,55.947,44.053,12.23,320,1.000,bicubic,-72.520,-43.093,-76
wide_resnet50_2.racm_in1k,23.187,76.813,56.000,44.000,68.88,288,0.950,bicubic,-72.443,-42.660,-65
pit_s_distilled_224.in1k,23.160,76.840,57.093,42.907,24.04,224,0.900,bicubic,-71.980,-41.637,+36
hrnet_w18_ssld.paddle_in1k,23.147,76.853,55.160,44.840,21.30,288,1.000,bilinear,-72.843,-44.150,-130
efficientformer_l3.snap_dist_in1k,23.120,76.880,57.147,42.853,31.41,224,0.950,bicubic,-72.470,-42.013,-63
nest_tiny_jx.goog_in1k,23.107,76.893,56.200,43.800,17.06,224,0.875,bicubic,-72.133,-42.710,+12
focalnet_tiny_lrf.ms_in1k,23.080,76.920,58.560,41.440,28.65,224,0.900,bicubic,-72.380,-40.400,-29
tiny_vit_5m_224.dist_in22k_ft_in1k,23.067,76.933,56.480,43.520,5.39,224,0.950,bicubic,-71.983,-42.490,+47
resnet61q.ra2_in1k,23.013,76.987,55.760,44.240,36.85,288,1.000,bicubic,-72.767,-43.230,-91
regnetx_160.tv2_in1k,22.987,77.013,56.333,43.667,54.28,224,0.965,bicubic,-72.893,-42.757,-111
resmlp_big_24_224.fb_in1k,22.893,77.107,54.280,45.720,129.14,224,0.875,bicubic,-71.787,-44.220,+106
vit_srelpos_small_patch16_224.sw_in1k,22.880,77.120,55.747,44.253,21.97,224,0.900,bicubic,-72.150,-43.203,+46
resnetv2_50d_evos.ah_in1k,22.867,77.133,55.173,44.827,25.59,288,1.000,bicubic,-72.763,-43.937,-75
halonet50ts.a1h_in1k,22.867,77.133,54.013,45.987,22.73,256,0.940,bicubic,-72.273,-44.877,+29
vit_base_patch32_clip_224.laion2b_ft_in1k,22.853,77.147,55.013,44.987,88.22,224,0.900,bicubic,-72.667,-43.977,-57
tf_efficientnet_b4.in1k,22.773,77.227,57.093,42.907,19.34,380,0.922,bicubic,-73.047,-41.957,-105
poolformerv2_m48.sail_in1k,22.760,77.240,55.747,44.253,73.35,224,1.000,bicubic,-73.010,-43.293,-96
twins_pcpvt_small.in1k,22.693,77.307,56.813,43.187,24.11,224,0.900,bicubic,-72.517,-42.067,+6
repvit_m1_5.dist_450e_in1k,22.680,77.320,56.107,43.893,14.64,224,0.950,bicubic,-73.220,-43.013,-122
convnextv2_nano.fcmae_ft_in1k,22.653,77.347,58.307,41.693,15.62,288,1.000,bicubic,-73.147,-40.783,-106
poolformer_m36.sail_in1k,22.587,77.413,55.373,44.627,56.17,224,0.950,bicubic,-72.803,-43.567,-23
vit_base_patch32_clip_224.openai_ft_in1k,22.560,77.440,55.333,44.667,88.22,224,0.900,bicubic,-72.550,-43.597,+25
vit_base_patch32_224.augreg_in21k_ft_in1k,22.427,77.573,54.027,45.973,88.22,224,0.900,bicubic,-72.583,-45.033,+41
resnetv2_50d_gn.ah_in1k,22.307,77.693,55.000,45.000,25.57,288,1.000,bicubic,-73.173,-43.950,-51
ecaresnet101d_pruned.miil_in1k,22.267,77.733,56.227,43.773,24.88,288,0.950,bicubic,-73.493,-42.953,-103
wide_resnet101_2.tv2_in1k,22.160,77.840,54.973,45.027,126.89,224,0.965,bilinear,-73.380,-43.887,-76
ecaresnet50t.a1_in1k,22.093,77.907,53.680,46.320,25.57,288,1.000,bicubic,-73.317,-45.330,-35
xcit_tiny_12_p8_224.fb_dist_in1k,22.067,77.933,54.267,45.733,6.71,224,1.000,bicubic,-73.023,-44.643,+23
resnext50_32x4d.a1h_in1k,22.013,77.987,54.080,45.920,25.03,288,1.000,bicubic,-73.437,-44.760,-48
efficientvit_b2.r224_in1k,22.000,78.000,55.533,44.467,24.33,224,0.950,bicubic,-73.200,-43.387,-3
res2net101d.in1k,21.853,78.147,51.680,48.320,45.23,224,0.875,bilinear,-72.967,-47.090,+67
tresnet_m.miil_in21k_ft_in1k,21.680,78.320,53.853,46.147,31.39,224,0.875,bilinear,-74.030,-45.067,-106
repvit_m1_5.dist_300e_in1k,21.613,78.387,54.707,45.293,14.64,224,0.950,bicubic,-74.027,-44.283,-97
efficientformerv2_s2.snap_dist_in1k,21.547,78.453,54.240,45.760,12.71,224,0.950,bicubic,-73.813,-44.690,-33
fastvit_sa12.apple_dist_in1k,21.493,78.507,54.613,45.387,11.58,256,0.900,bicubic,-73.657,-44.347,+3
resnet50_gn.a1h_in1k,21.387,78.613,54.067,45.933,25.56,288,0.950,bicubic,-73.863,-44.933,-20
maxvit_rmlp_pico_rw_256.sw_in1k,21.240,78.760,51.920,48.080,7.52,256,0.950,bicubic,-73.400,-46.890,+88
convmixer_1536_20.in1k,21.213,78.787,55.507,44.493,51.63,224,0.960,bicubic,-73.867,-43.523,+14
swin_tiny_patch4_window7_224.ms_in1k,21.173,78.827,55.933,44.067,28.29,224,0.900,bicubic,-73.967,-42.927,+1
resnet101.a2_in1k,21.173,78.827,51.920,48.080,44.55,288,1.000,bicubic,-74.237,-47.020,-46
pit_s_224.in1k,21.080,78.920,53.613,46.387,23.46,224,0.900,bicubic,-73.490,-45.087,+100
xcit_tiny_12_p8_224.fb_in1k,20.960,79.040,52.467,47.533,6.71,224,1.000,bicubic,-73.730,-46.193,+73
resnet51q.ra2_in1k,20.920,79.080,55.680,44.320,35.70,288,1.000,bilinear,-74.950,-43.450,-144
poolformerv2_m36.sail_in1k,20.920,79.080,53.120,46.880,56.08,224,1.000,bicubic,-74.480,-46.180,-47
resnetrs101.tf_in1k,20.867,79.133,52.827,47.173,63.62,288,0.940,bicubic,-74.563,-46.163,-59
deit_small_distilled_patch16_224.fb_in1k,20.720,79.280,55.133,44.867,22.44,224,0.900,bicubic,-73.990,-43.897,+67
sebotnet33ts_256.a1h_in1k,20.720,79.280,48.760,51.240,13.70,256,0.940,bicubic,-73.890,-49.750,+89
regnety_032.tv2_in1k,20.547,79.453,54.347,45.653,19.44,224,0.965,bicubic,-74.763,-44.563,-39
resnet152.tv2_in1k,20.493,79.507,52.347,47.653,60.19,224,0.965,bilinear,-75.007,-46.613,-83
ecaresnetlight.miil_in1k,20.480,79.520,53.413,46.587,30.16,288,0.950,bicubic,-74.810,-45.617,-39
resnest50d_4s2x40d.in1k,20.387,79.613,52.773,47.227,30.42,224,0.875,bicubic,-74.583,-46.007,+19
resnetaa50.a1h_in1k,20.067,79.933,51.947,48.053,25.56,288,1.000,bicubic,-75.133,-47.143,-26
resnext50_32x4d.fb_ssl_yfcc100m_ft_in1k,20.040,79.960,53.560,46.440,25.03,224,0.875,bilinear,-74.850,-45.300,+27
haloregnetz_b.ra3_in1k,20.013,79.987,49.987,50.013,11.68,224,0.940,bicubic,-74.677,-48.843,+62
regnetz_b16.ra3_in1k,19.813,80.187,52.853,47.147,9.72,288,1.000,bicubic,-75.357,-46.227,-25
xcit_nano_12_p8_384.fb_dist_in1k,19.787,80.213,50.587,49.413,3.05,384,1.000,bicubic,-73.733,-47.943,+221
resnext50_32x4d.a1_in1k,19.733,80.267,50.173,49.827,25.03,288,1.000,bicubic,-75.097,-48.417,+40
tresnet_xl.miil_in1k,19.640,80.360,53.160,46.840,78.44,224,0.875,bilinear,-75.800,-45.630,-75
senet154.gluon_in1k,19.320,80.680,47.600,52.400,115.09,224,0.875,bicubic,-75.600,-51.160,+18
rexnet_200.nav_in1k,19.227,80.773,52.640,47.360,16.37,224,0.875,bicubic,-75.713,-46.370,+12
levit_conv_256.fb_dist_in1k,19.187,80.813,50.067,49.933,18.89,224,0.900,bicubic,-75.833,-48.923,+1
levit_256.fb_dist_in1k,19.173,80.827,50.107,49.893,18.89,224,0.900,bicubic,-75.847,-48.823,-1
lambda_resnet50ts.a1h_in1k,19.147,80.853,49.240,50.760,21.54,256,0.950,bicubic,-75.623,-49.450,+37
repvgg_b3.rvgg_in1k,19.067,80.933,50.293,49.707,123.09,224,0.875,bilinear,-75.483,-48.417,+79
mixer_b16_224.miil_in21k_ft_in1k,19.040,80.960,51.213,48.787,59.88,224,0.875,bilinear,-76.260,-47.667,-56
legacy_senet154.in1k,19.040,80.960,47.947,52.053,115.09,224,0.875,bilinear,-76.030,-50.883,-11
resnet50d.a1_in1k,18.960,81.040,48.813,51.187,25.58,288,1.000,bicubic,-75.770,-49.937,+44
seresnext101_64x4d.gluon_in1k,18.933,81.067,49.213,50.787,88.23,224,0.875,bicubic,-75.997,-49.617,+6
deit_small_patch16_224.fb_in1k,18.893,81.107,51.400,48.600,22.05,224,0.900,bicubic,-75.457,-47.290,+105
mobilevitv2_200.cvnets_in1k,18.893,81.107,50.520,49.480,18.45,256,0.888,bicubic,-75.947,-48.010,+23
gcvit_xxtiny.in1k,18.773,81.227,53.347,46.653,12.00,224,0.875,bicubic,-75.647,-45.543,+95
edgenext_small.usi_in1k,18.680,81.320,53.600,46.400,5.59,320,1.000,bicubic,-76.720,-45.270,-77
tf_efficientnet_b1.ns_jft_in1k,18.653,81.347,51.733,48.267,7.79,240,0.882,bicubic,-76.507,-47.217,-42
regnetx_080.tv2_in1k,18.573,81.427,50.333,49.667,39.57,224,0.965,bicubic,-76.527,-48.497,-24
ecaresnet50t.a2_in1k,18.533,81.467,48.773,51.227,25.57,288,1.000,bicubic,-76.817,-50.087,-73
poolformer_s36.sail_in1k,18.493,81.507,52.187,47.813,30.86,224,0.900,bicubic,-76.587,-46.723,-23
repvit_m3.dist_in1k,18.480,81.520,51.867,48.133,10.68,224,0.950,bicubic,-76.720,-46.953,-52
wide_resnet50_2.tv2_in1k,18.467,81.533,52.467,47.533,68.88,224,0.965,bilinear,-76.403,-46.443,+8
cait_xxs36_224.fb_dist_in1k,18.307,81.693,49.480,50.520,17.30,224,1.000,bicubic,-75.963,-49.230,+106
cs3darknet_l.c2ns_in1k,18.293,81.707,51.840,48.160,21.16,288,0.950,bicubic,-76.827,-47.140,-35
seresnet50.ra2_in1k,18.293,81.707,51.253,48.747,28.09,288,0.950,bicubic,-77.037,-47.757,-76
sehalonet33ts.ra2_in1k,18.227,81.773,47.733,52.267,13.69,256,0.940,bicubic,-76.533,-50.837,+21
ese_vovnet39b.ra_in1k,18.200,81.800,49.880,50.120,24.57,288,0.950,bicubic,-76.670,-49.060,+4
tf_efficientnet_lite4.in1k,18.120,81.880,50.707,49.293,13.01,380,0.920,bilinear,-76.760,-48.183,0
vit_tiny_patch16_384.augreg_in21k_ft_in1k,18.027,81.973,50.307,49.693,5.79,384,1.000,bicubic,-75.623,-48.283,+177
tiny_vit_5m_224.in1k,18.000,82.000,50.173,49.827,5.39,224,0.950,bicubic,-76.240,-48.287,+100
gcresnet50t.ra2_in1k,17.880,82.120,49.413,50.587,25.90,288,1.000,bicubic,-77.360,-49.567,-68
resnet50d.ra2_in1k,17.853,82.147,50.240,49.760,25.58,288,0.950,bicubic,-77.157,-48.670,-23
mobilevitv2_175.cvnets_in1k,17.787,82.213,49.747,50.253,14.25,256,0.888,bicubic,-77.103,-49.113,-8
resnest50d_1s4x24d.in1k,17.640,82.360,49.760,50.240,25.68,224,0.875,bicubic,-77.090,-49.220,+16
resnetv2_50.a1h_in1k,17.587,82.413,49.813,50.187,25.55,288,1.000,bicubic,-77.263,-49.057,+1
resnet50.c2_in1k,17.533,82.467,49.760,50.240,25.56,288,1.000,bicubic,-77.387,-49.050,-16
resnest50d.in1k,17.360,82.640,50.733,49.267,27.48,224,0.875,bilinear,-77.490,-48.147,-2
convnext_pico.d1_in1k,17.347,82.653,50.213,49.787,9.05,288,0.950,bicubic,-77.413,-48.717,+8
seresnext101_32x4d.gluon_in1k,17.333,82.667,46.373,53.627,48.96,224,0.875,bicubic,-77.557,-52.447,-12
efficientnet_el.ra_in1k,17.320,82.680,50.027,49.973,10.59,300,0.904,bicubic,-77.800,-48.943,-50
inception_v4.tf_in1k,17.293,82.707,45.867,54.133,42.68,299,0.875,bicubic,-77.077,-52.763,+75
tf_efficientnet_b3.ap_in1k,17.227,82.773,49.680,50.320,12.23,300,0.904,bicubic,-78.093,-49.220,-92
gcresnext50ts.ch_in1k,17.173,82.827,48.107,51.893,15.67,288,1.000,bicubic,-77.687,-50.753,-11
xcit_tiny_24_p16_224.fb_dist_in1k,17.160,82.840,47.507,52.493,12.12,224,1.000,bicubic,-77.380,-51.293,+49
fastvit_s12.apple_dist_in1k,17.120,82.880,49.400,50.600,9.47,256,0.900,bicubic,-77.710,-49.370,-6
regnetx_032.tv2_in1k,17.067,82.933,48.107,51.893,15.30,224,0.965,bicubic,-77.593,-50.693,+18
xception71.tf_in1k,17.027,82.973,45.547,54.453,42.34,299,0.903,bicubic,-77.273,-53.103,+80
regnety_016.tv2_in1k,16.973,83.027,49.853,50.147,11.20,224,0.965,bicubic,-77.547,-48.967,+48
tf_efficientnet_b3.aa_in1k,16.973,83.027,49.280,50.720,12.23,300,0.904,bicubic,-78.037,-49.750,-39
cs3darknet_focus_l.c2ns_in1k,16.960,83.040,50.493,49.507,21.15,288,0.950,bicubic,-78.190,-48.437,-72
convnextv2_pico.fcmae_ft_in1k,16.907,83.093,50.307,49.693,9.07,288,0.950,bicubic,-78.323,-48.613,-86
resmlp_36_224.fb_distilled_in1k,16.880,83.120,51.493,48.507,44.69,224,0.875,bicubic,-78.010,-47.377,-26
resnext101_64x4d.gluon_in1k,16.867,83.133,44.147,55.853,83.46,224,0.875,bicubic,-77.803,-54.513,+10
poolformerv2_s36.sail_in1k,16.680,83.320,49.600,50.400,30.79,224,1.000,bicubic,-78.630,-49.320,-102
resnet152d.gluon_in1k,16.627,83.373,44.213,55.787,60.21,224,0.875,bicubic,-78.103,-54.277,-2
tf_efficientnetv2_b3.in1k,16.600,83.400,48.680,51.320,14.36,300,0.904,bicubic,-78.560,-50.140,-79
gmlp_s16_224.ra3_in1k,16.547,83.453,45.080,54.920,19.42,224,0.875,bicubic,-77.603,-53.590,+90
resnet152s.gluon_in1k,16.547,83.453,44.507,55.493,60.32,224,0.875,bicubic,-78.473,-54.373,-54
tresnet_l.miil_in1k,16.533,83.467,49.893,50.107,55.99,224,0.875,bilinear,-78.747,-49.067,-102
inception_resnet_v2.tf_in1k,16.520,83.480,44.973,55.027,55.84,299,0.897,bicubic,-78.060,-53.817,+22
convnext_pico_ols.d1_in1k,16.507,83.493,49.707,50.293,9.06,288,1.000,bicubic,-78.113,-49.153,+16
seresnet50.a1_in1k,16.493,83.507,46.760,53.240,28.09,288,1.000,bicubic,-78.167,-52.020,+5
resmlp_24_224.fb_distilled_in1k,16.453,83.547,50.347,49.653,30.02,224,0.875,bicubic,-77.997,-48.423,+44
mobilevitv2_150.cvnets_in1k,16.453,83.547,48.453,51.547,10.59,256,0.888,bicubic,-78.097,-50.327,+25
resnet101.tv2_in1k,16.400,83.600,48.707,51.293,44.55,224,0.965,bilinear,-78.880,-50.303,-106
gernet_l.idstcv_in1k,16.320,83.680,47.227,52.773,31.08,256,0.875,bilinear,-78.790,-51.753,-73
repvit_m1_1.dist_450e_in1k,16.280,83.720,49.680,50.320,8.80,224,0.950,bicubic,-78.620,-49.280,-46
fastvit_sa12.apple_in1k,16.280,83.720,49.653,50.347,11.58,256,0.900,bicubic,-78.600,-49.367,-38
inception_resnet_v2.tf_ens_adv_in1k,16.280,83.720,43.640,56.360,55.84,299,0.897,bicubic,-77.890,-54.900,+73
repvgg_b3g4.rvgg_in1k,16.227,83.773,47.653,52.347,83.83,224,0.875,bilinear,-78.283,-51.317,+28
xcit_tiny_24_p16_224.fb_in1k,16.227,83.773,46.000,54.000,12.12,224,1.000,bicubic,-77.853,-52.540,+86
resnet50.a1_in1k,16.040,83.960,45.773,54.227,25.56,288,1.000,bicubic,-78.700,-52.907,-24
xception65.tf_in1k,16.040,83.960,43.760,56.240,39.92,299,0.903,bicubic,-77.740,-54.610,+119
resnet50.c1_in1k,16.000,84.000,47.387,52.613,25.56,288,1.000,bicubic,-78.730,-51.433,-23
ecaresnet50d_pruned.miil_in1k,15.987,84.013,49.707,50.293,19.94,288,0.950,bicubic,-79.123,-49.193,-83
edgenext_small_rw.sw_in1k,15.987,84.013,49.667,50.333,7.83,320,1.000,bicubic,-78.683,-49.113,-14
resnet50.fb_ssl_yfcc100m_ft_in1k,15.920,84.080,49.413,50.587,25.56,224,0.875,bilinear,-78.530,-49.327,+30
resnext50_32x4d.ra_in1k,15.867,84.133,47.227,52.773,25.03,288,0.950,bicubic,-78.833,-51.533,-20
fastvit_t12.apple_dist_in1k,15.640,84.360,47.680,52.320,7.55,256,0.900,bicubic,-78.950,-51.110,+2
resnet50.d_in1k,15.640,84.360,45.160,54.840,25.56,288,1.000,bicubic,-79.340,-53.680,-67
regnety_320.pycls_in1k,15.627,84.373,44.760,55.240,145.05,224,0.875,bicubic,-78.913,-54.030,+9
vit_base_patch32_384.augreg_in1k,15.600,84.400,44.147,55.853,88.30,384,1.000,bicubic,-78.040,-54.093,+127
convmixer_768_32.in1k,15.507,84.493,47.907,52.093,21.11,224,0.960,bicubic,-78.993,-50.953,+17
ecaresnet26t.ra2_in1k,15.453,84.547,47.960,52.040,16.01,320,0.950,bicubic,-78.867,-50.760,+38
resnext50d_32x4d.bt_in1k,15.400,84.600,46.187,53.813,25.05,288,0.950,bicubic,-79.150,-52.503,+3
coat_tiny.in1k,15.400,84.600,45.587,54.413,5.50,224,0.900,bicubic,-78.180,-52.833,+129
resnet50d.a2_in1k,15.387,84.613,44.813,55.187,25.58,288,1.000,bicubic,-79.193,-53.877,-4
skresnext50_32x4d.ra_in1k,15.373,84.627,44.547,55.453,27.48,224,0.875,bicubic,-78.867,-54.143,+43
resnext50_32x4d.a2_in1k,15.293,84.707,45.253,54.747,25.03,288,1.000,bicubic,-79.287,-53.397,-5
efficientvit_b1.r288_in1k,15.280,84.720,46.600,53.400,9.10,288,1.000,bicubic,-79.210,-51.920,+11
seresnet33ts.ra2_in1k,15.267,84.733,47.373,52.627,19.78,288,1.000,bicubic,-79.773,-51.527,-91
vit_relpos_base_patch32_plus_rpn_256.sw_in1k,15.240,84.760,42.640,57.360,119.42,256,0.900,bicubic,-78.500,-55.900,+107
cait_xxs24_224.fb_dist_in1k,15.160,84.840,44.893,55.107,11.96,224,1.000,bicubic,-78.450,-53.567,+119
eca_resnet33ts.ra2_in1k,15.053,84.947,49.013,50.987,19.68,288,1.000,bicubic,-79.567,-49.747,-21
vit_base_patch16_224.augreg_in1k,15.013,84.987,42.027,57.973,86.57,224,0.900,bicubic,-78.627,-56.373,+115
fastvit_s12.apple_in1k,14.920,85.080,45.320,54.680,9.47,256,0.900,bicubic,-79.210,-53.430,+56
repvit_m1_0.dist_450e_in1k,14.907,85.093,46.827,53.173,7.30,224,0.950,bicubic,-79.623,-52.053,-3
levit_conv_192.fb_dist_in1k,14.907,85.093,44.933,55.067,10.95,224,0.900,bicubic,-79.263,-53.747,+46
levit_192.fb_dist_in1k,14.880,85.120,44.933,55.067,10.95,224,0.900,bicubic,-79.290,-53.607,+43
seresnet50.a2_in1k,14.840,85.160,44.400,55.600,28.09,288,1.000,bicubic,-79.820,-54.320,-34
poolformerv2_s24.sail_in1k,14.800,85.200,45.920,54.080,21.34,224,1.000,bicubic,-79.850,-52.920,-33
repvit_m1_1.dist_300e_in1k,14.760,85.240,47.227,52.773,8.80,224,0.950,bicubic,-80.000,-51.483,-57
rexnet_150.nav_in1k,14.733,85.267,46.880,53.120,9.73,224,0.875,bicubic,-79.747,-51.920,0
darknet53.c2ns_in1k,14.680,85.320,47.107,52.893,41.61,288,1.000,bicubic,-79.940,-51.793,-30
gcresnet33ts.ra2_in1k,14.667,85.333,46.320,53.680,19.88,288,1.000,bicubic,-80.253,-52.490,-86
resnet50.tv2_in1k,14.640,85.360,46.973,53.027,25.56,224,0.965,bilinear,-80.000,-51.827,-36
res2net50d.in1k,14.627,85.373,44.480,55.520,25.72,224,0.875,bilinear,-79.693,-54.160,+18
darknetaa53.c2ns_in1k,14.560,85.440,45.427,54.573,36.02,288,1.000,bilinear,-79.910,-53.393,-4
coat_lite_mini.in1k,14.560,85.440,44.493,55.507,11.01,224,0.900,bicubic,-79.480,-54.057,+52
efficientnet_el_pruned.in1k,14.480,85.520,46.107,53.893,10.59,300,0.904,bicubic,-79.910,-52.633,+3
efficientnet_b2.ra_in1k,14.440,85.560,46.053,53.947,9.11,288,1.000,bicubic,-80.180,-52.727,-33
poolformer_s24.sail_in1k,14.267,85.733,47.360,52.640,21.39,224,0.900,bicubic,-80.293,-51.540,-26
legacy_seresnext101_32x4d.in1k,14.173,85.827,43.000,57.000,48.96,224,0.875,bilinear,-80.197,-55.580,+2
fbnetv3_d.ra2_in1k,14.093,85.907,46.467,53.533,10.31,256,0.950,bilinear,-79.827,-52.153,+59
gernet_m.idstcv_in1k,14.080,85.920,46.080,53.920,21.14,224,0.875,bilinear,-80.540,-52.830,-40
pvt_v2_b1.in1k,14.027,85.973,47.733,52.267,14.01,224,0.900,bicubic,-79.773,-50.927,+73
repvit_m2.dist_in1k,14.013,85.987,46.373,53.627,8.80,224,0.950,bicubic,-80.727,-52.337,-69
mobilevitv2_125.cvnets_in1k,14.013,85.987,45.027,54.973,7.48,256,0.888,bicubic,-79.947,-53.523,+53
dpn68b.ra_in1k,13.893,86.107,40.307,59.693,12.61,288,1.000,bicubic,-80.107,-58.193,+47
resnext101_32x4d.gluon_in1k,13.853,86.147,41.640,58.360,44.18,224,0.875,bicubic,-80.687,-57.140,-26
seresnext50_32x4d.gluon_in1k,13.627,86.373,43.773,56.227,27.56,224,0.875,bicubic,-80.703,-54.837,0
fastvit_t12.apple_in1k,13.613,86.387,43.307,56.693,7.55,256,0.900,bicubic,-80.307,-55.303,+54
pit_xs_distilled_224.in1k,13.573,86.427,45.173,54.827,11.00,224,0.900,bicubic,-80.207,-53.327,+67
resnet152.a3_in1k,13.547,86.453,43.387,56.613,60.19,224,0.950,bicubic,-81.183,-55.293,-70
resmlp_36_224.fb_in1k,13.467,86.533,46.667,53.333,44.69,224,0.875,bicubic,-80.723,-51.943,+11
repvgg_b2g4.rvgg_in1k,13.467,86.533,43.853,56.147,61.76,224,0.875,bilinear,-80.413,-54.807,+57
efficientformerv2_s1.snap_dist_in1k,13.453,86.547,42.933,57.067,6.19,224,0.950,bicubic,-80.747,-55.707,+9
vit_small_patch16_224.augreg_in1k,13.387,86.613,41.427,58.573,22.05,224,0.900,bicubic,-80.503,-57.013,+52
eca_botnext26ts_256.c1_in1k,13.320,86.680,42.133,57.867,10.59,256,0.950,bicubic,-80.460,-56.487,+62
visformer_tiny.in1k,13.307,86.693,43.933,56.067,10.32,224,0.900,bicubic,-80.253,-54.457,+86
regnetx_320.pycls_in1k,13.293,86.707,40.747,59.253,107.81,224,0.875,bicubic,-81.157,-58.173,-22
resnet101d.gluon_in1k,13.200,86.800,41.547,58.453,44.57,224,0.875,bicubic,-81.030,-56.993,+1
efficientnet_b3_pruned.in1k,13.160,86.840,45.200,54.800,9.86,300,0.904,bicubic,-81.470,-53.560,-62
resnet50.b1k_in1k,13.093,86.907,43.947,56.053,25.56,288,1.000,bicubic,-81.767,-54.863,-101
mixnet_xl.ra_in1k,13.080,86.920,43.213,56.787,11.90,224,0.875,bicubic,-81.100,-55.107,+4
cspresnext50.ra_in1k,13.053,86.947,44.973,55.027,20.57,256,0.887,bilinear,-81.777,-53.837,-96
efficientformer_l1.snap_dist_in1k,13.013,86.987,45.600,54.400,12.29,224,0.950,bicubic,-81.467,-53.230,-35
regnetx_016.tv2_in1k,13.000,87.000,45.427,54.573,9.19,224,0.965,bicubic,-81.130,-53.193,+13
repvit_m1_0.dist_300e_in1k,13.000,87.000,44.413,55.587,7.30,224,0.950,bicubic,-81.300,-54.437,-13
nf_regnet_b1.ra2_in1k,12.947,87.053,44.373,55.627,10.22,288,0.900,bicubic,-81.163,-54.247,+12
eca_halonext26ts.c1_in1k,12.947,87.053,42.813,57.187,10.76,256,0.940,bicubic,-81.093,-55.677,+21
mobilevit_s.cvnets_in1k,12.907,87.093,40.760,59.240,5.58,256,0.900,bicubic,-80.253,-57.560,+120
pit_xs_224.in1k,12.827,87.173,42.813,57.187,10.62,224,0.900,bicubic,-80.273,-55.517,+126
tf_efficientnet_b3.in1k,12.787,87.213,43.627,56.373,12.23,300,0.904,bicubic,-81.753,-55.223,-53
resnet50.b2k_in1k,12.760,87.240,44.133,55.867,25.56,288,1.000,bicubic,-81.970,-54.797,-93
resnext50_32x4d.tv2_in1k,12.693,87.307,43.093,56.907,25.03,224,0.965,bilinear,-81.927,-55.617,-70
inception_v3.gluon_in1k,12.640,87.360,40.427,59.573,23.83,299,0.875,bicubic,-80.830,-58.143,+83
tresnet_m.miil_in1k,12.613,87.387,41.907,58.093,31.39,224,0.875,bilinear,-82.007,-56.643,-69
resnet50.a1h_in1k,12.587,87.413,44.240,55.760,25.56,224,1.000,bicubic,-82.183,-54.230,-106
crossvit_9_dagger_240.in1k,12.560,87.440,41.720,58.280,8.78,240,0.875,bicubic,-80.330,-56.520,+140
efficientvit_b1.r256_in1k,12.547,87.453,42.120,57.880,9.10,256,1.000,bicubic,-81.543,-56.240,+5
resnetblur50.bt_in1k,12.493,87.507,44.160,55.840,25.56,288,0.950,bicubic,-81.967,-54.680,-47
resmlp_24_224.fb_in1k,12.493,87.507,43.413,56.587,30.02,224,0.875,bicubic,-81.537,-55.247,+12
convnext_femto_ols.d1_in1k,12.480,87.520,43.933,56.067,5.23,288,0.950,bicubic,-81.440,-54.667,+20
coat_lite_tiny.in1k,12.467,87.533,41.053,58.947,5.72,224,0.900,bicubic,-80.773,-57.207,+101
efficientnet_em.ra2_in1k,12.400,87.600,43.933,56.067,6.90,240,0.882,bicubic,-81.430,-54.887,+28
regnety_120.pycls_in1k,12.400,87.600,42.173,57.827,51.82,224,0.875,bicubic,-82.070,-56.597,-53
regnety_160.pycls_in1k,12.227,87.773,41.387,58.613,83.59,224,0.875,bicubic,-82.133,-57.473,-41
resnet50.a2_in1k,12.173,87.827,40.453,59.547,25.56,288,1.000,bicubic,-82.457,-58.207,-87
ecaresnet50t.a3_in1k,12.147,87.853,41.573,58.427,25.57,224,0.950,bicubic,-82.203,-57.097,-41
hrnet_w64.ms_in1k,12.013,87.987,40.827,59.173,128.06,224,0.875,bilinear,-82.017,-57.503,+2
cspdarknet53.ra_in1k,11.960,88.040,43.267,56.733,27.64,256,0.887,bilinear,-82.700,-55.583,-97
xcit_tiny_12_p16_224.fb_dist_in1k,11.933,88.067,40.133,59.867,6.72,224,1.000,bicubic,-81.477,-58.377,+74
resnet101s.gluon_in1k,11.893,88.107,40.947,59.053,44.67,224,0.875,bicubic,-82.827,-57.873,-108
resnet101.a3_in1k,11.867,88.133,40.840,59.160,44.55,224,0.950,bicubic,-82.163,-57.750,-4
gmixer_24_224.ra3_in1k,11.867,88.133,37.800,62.200,24.72,224,0.875,bicubic,-80.963,-60.080,+128
nf_resnet50.ra2_in1k,11.773,88.227,45.907,54.093,25.56,288,0.940,bicubic,-82.767,-52.723,-75
fbnetv3_b.ra2_in1k,11.760,88.240,44.400,55.600,8.60,256,0.950,bilinear,-82.210,-54.090,-1
dpn92.mx_in1k,11.640,88.360,40.160,59.840,37.67,224,0.875,bicubic,-82.650,-58.590,-41
botnet26t_256.c1_in1k,11.613,88.387,40.107,59.893,12.49,256,0.950,bicubic,-81.897,-58.213,+53
convnextv2_femto.fcmae_ft_in1k,11.600,88.400,40.800,59.200,5.23,288,0.950,bicubic,-82.590,-57.860,-33
dla102x2.in1k,11.560,88.440,41.293,58.707,41.28,224,0.875,bilinear,-82.410,-57.227,-3
xception41.tf_in1k,11.560,88.440,39.067,60.933,26.97,299,0.903,bicubic,-81.870,-59.153,+61
vit_small_patch32_224.augreg_in21k_ft_in1k,11.507,88.493,39.547,60.453,22.88,224,0.900,bicubic,-80.533,-58.743,+176
efficientvit_b1.r224_in1k,11.493,88.507,40.200,59.800,9.10,224,0.950,bicubic,-82.017,-58.100,+47
levit_128.fb_dist_in1k,11.440,88.560,40.187,59.813,9.21,224,0.900,bicubic,-81.890,-58.333,+69
levit_conv_128.fb_dist_in1k,11.440,88.560,40.173,59.827,9.21,224,0.900,bicubic,-81.910,-58.197,+66
lambda_resnet26t.c1_in1k,11.387,88.613,40.187,59.813,10.96,256,0.940,bicubic,-82.443,-58.463,+8
seresnext26t_32x4d.bt_in1k,11.373,88.627,41.107,58.893,16.81,288,0.950,bicubic,-82.187,-57.383,+37
regnety_080.pycls_in1k,11.373,88.627,40.613,59.387,39.18,224,0.875,bicubic,-82.797,-57.987,-39
efficientnet_b2_pruned.in1k,11.333,88.667,42.040,57.960,8.31,260,0.890,bicubic,-82.807,-56.670,-31
resnet50.ra_in1k,11.333,88.667,41.013,58.987,25.56,288,0.950,bicubic,-82.877,-57.607,-48
tf_efficientnet_el.in1k,11.320,88.680,42.053,57.947,10.59,300,0.904,bicubic,-83.080,-56.657,-71
xcit_nano_12_p16_384.fb_dist_in1k,11.227,88.773,39.853,60.147,3.05,384,1.000,bicubic,-80.603,-58.167,+180
convnext_femto.d1_in1k,11.213,88.787,42.773,57.227,5.22,288,0.950,bicubic,-82.717,-55.747,-13
resnet152c.gluon_in1k,11.107,88.893,37.133,62.867,60.21,224,0.875,bicubic,-83.053,-61.457,-42
hrnet_w48.ms_in1k,11.093,88.907,40.307,59.693,77.47,224,0.875,bilinear,-82.827,-58.433,-13
vit_tiny_r_s16_p8_384.augreg_in21k_ft_in1k,11.093,88.907,39.933,60.067,6.36,384,1.000,bicubic,-80.947,-58.297,+160
halonet26t.a1h_in1k,11.093,88.907,38.800,61.200,12.48,256,0.950,bicubic,-82.907,-59.540,-22
mobilevitv2_100.cvnets_in1k,11.067,88.933,40.613,59.387,4.90,256,0.888,bicubic,-82.233,-57.667,+63
tf_efficientnet_b0.ns_jft_in1k,11.000,89.000,40.080,59.920,5.29,224,0.875,bicubic,-82.620,-58.560,+19
tf_efficientnetv2_b2.in1k,11.000,89.000,39.747,60.253,10.10,260,0.890,bicubic,-83.420,-58.833,-82
inception_v3.tf_adv_in1k,11.000,89.000,36.720,63.280,23.83,299,0.875,bicubic,-81.900,-61.420,+98
seresnext26d_32x4d.bt_in1k,10.987,89.013,41.347,58.653,16.81,288,0.950,bicubic,-82.453,-56.983,+39
xcit_tiny_12_p16_224.fb_in1k,10.987,89.013,37.067,62.933,6.72,224,1.000,bicubic,-81.523,-61.173,+126
regnety_008_tv.tv2_in1k,10.840,89.160,40.533,59.467,6.43,224,0.965,bicubic,-82.850,-57.957,+6
resnet34d.ra2_in1k,10.827,89.173,38.653,61.347,21.82,288,0.950,bicubic,-82.813,-59.887,+8
dpn107.mx_in1k,10.827,89.173,38.307,61.693,86.92,224,0.875,bicubic,-83.513,-60.193,-77
inception_v3.tf_in1k,10.827,89.173,36.853,63.147,23.83,299,0.875,bicubic,-82.493,-61.527,+51
mobileone_s4.apple_in1k,10.787,89.213,38.480,61.520,14.95,224,0.900,bilinear,-82.953,-59.590,-2
xcit_nano_12_p8_224.fb_dist_in1k,10.773,89.227,38.120,61.880,3.05,224,1.000,bicubic,-81.307,-59.790,+144
densenetblur121d.ra_in1k,10.547,89.453,39.707,60.293,8.00,288,0.950,bicubic,-82.073,-58.553,+109
tf_efficientnet_b2.ap_in1k,10.533,89.467,40.133,59.867,9.11,260,0.890,bicubic,-83.977,-58.487,-105
dpn131.mx_in1k,10.533,89.467,36.787,63.213,79.25,224,0.875,bicubic,-83.517,-61.923,-44
rexnet_130.nav_in1k,10.413,89.587,41.547,58.453,7.56,224,0.875,bicubic,-83.487,-56.853,-26
repvit_m0_9.dist_450e_in1k,10.400,89.600,40.120,59.880,5.49,224,0.950,bicubic,-83.200,-58.380,+7
hrnet_w44.ms_in1k,10.307,89.693,39.493,60.507,67.06,224,0.875,bilinear,-83.243,-59.107,+11
xcit_nano_12_p8_224.fb_in1k,10.307,89.693,36.973,63.027,3.05,224,1.000,bicubic,-80.703,-60.797,+180
resnext50_32x4d.a3_in1k,10.267,89.733,38.200,61.800,25.03,224,0.950,bicubic,-83.393,-60.320,-4
lambda_resnet26rpt_256.c1_in1k,10.227,89.773,38.133,61.867,10.99,256,0.940,bicubic,-83.483,-60.387,-9
resnext101_32x8d.tv_in1k,10.173,89.827,37.747,62.253,88.79,224,0.875,bilinear,-83.647,-60.833,-24
regnetx_160.pycls_in1k,10.133,89.867,38.053,61.947,54.28,224,0.875,bicubic,-84.007,-60.467,-64
legacy_seresnext50_32x4d.in1k,10.093,89.907,39.213,60.787,27.56,224,0.875,bilinear,-83.657,-59.367,-20
resnetrs50.tf_in1k,10.053,89.947,37.573,62.427,35.69,224,0.910,bicubic,-84.267,-61.067,-90
dpn98.mx_in1k,10.013,89.987,36.173,63.827,61.57,224,0.875,bicubic,-84.147,-62.467,-70
inception_v3.tv_in1k,10.013,89.987,35.227,64.773,23.83,299,0.875,bicubic,-82.717,-62.743,+88
efficientnet_b1.ft_in1k,10.000,90.000,37.600,62.400,7.79,256,1.000,bicubic,-83.250,-60.690,+38
legacy_xception.tf_in1k,9.987,90.013,37.987,62.013,22.86,299,0.897,bicubic,-83.473,-60.543,+13
resnet33ts.ra2_in1k,9.947,90.053,39.840,60.160,19.68,288,1.000,bicubic,-84.153,-58.810,-64
regnety_064.pycls_in1k,9.933,90.067,39.093,60.907,30.58,224,0.875,bicubic,-84.207,-59.657,-71
resnet152.gluon_in1k,9.733,90.267,36.093,63.907,60.19,224,0.875,bicubic,-84.337,-62.367,-63
tf_efficientnet_lite3.in1k,9.680,90.320,39.013,60.987,8.20,300,0.904,bilinear,-84.520,-59.617,-87
tf_efficientnet_b2.aa_in1k,9.667,90.333,38.893,61.107,9.11,260,0.890,bicubic,-84.713,-59.717,-109
tf_efficientnet_cc_b1_8e.in1k,9.573,90.427,36.840,63.160,39.72,240,0.882,bicubic,-84.347,-61.420,-47
res2net101_26w_4s.in1k,9.507,90.493,35.093,64.907,45.21,224,0.875,bilinear,-84.213,-63.217,-25
resnet50.ram_in1k,9.480,90.520,35.507,64.493,25.56,288,0.950,bicubic,-85.040,-63.143,-129
legacy_seresnet152.in1k,9.333,90.667,37.373,62.627,66.82,224,0.875,bilinear,-84.047,-60.967,+13
cspresnet50.ra_in1k,9.293,90.707,39.613,60.387,21.62,256,0.887,bilinear,-84.447,-59.027,-32
repvit_m0_9.dist_300e_in1k,9.293,90.707,38.840,61.160,5.49,224,0.950,bicubic,-84.147,-59.870,+3
resnet34.a1_in1k,9.267,90.733,34.947,65.053,21.80,288,1.000,bicubic,-83.833,-63.363,+38
hrnet_w40.ms_in1k,9.240,90.760,36.920,63.080,57.56,224,0.875,bilinear,-84.260,-61.620,-6
resnet32ts.ra2_in1k,9.213,90.787,38.600,61.400,17.96,288,1.000,bicubic,-84.617,-60.040,-48
regnetx_120.pycls_in1k,9.213,90.787,37.200,62.800,46.11,224,0.875,bicubic,-85.017,-61.470,-100
crossvit_tiny_240.in1k,9.133,90.867,34.600,65.400,7.01,240,0.875,bicubic,-81.097,-62.990,+179
resnest26d.gluon_in1k,9.053,90.947,37.840,62.160,17.07,224,0.875,bilinear,-84.267,-60.520,+11
vit_tiny_patch16_224.augreg_in21k_ft_in1k,9.053,90.947,34.627,65.373,5.72,224,0.900,bicubic,-82.717,-63.413,+130
resnet50d.a3_in1k,9.040,90.960,37.307,62.693,25.58,224,0.950,bicubic,-84.440,-61.143,-8
gcresnext26ts.ch_in1k,8.987,91.013,36.920,63.080,10.48,288,1.000,bicubic,-84.173,-61.450,+26
vit_base_patch16_224.sam_in1k,8.987,91.013,36.133,63.867,86.57,224,0.900,bicubic,-85.163,-62.367,-93
regnety_040.pycls_in1k,8.933,91.067,37.067,62.933,20.65,224,0.875,bicubic,-84.947,-61.453,-59
resnext50_32x4d.gluon_in1k,8.933,91.067,36.293,63.707,25.03,224,0.875,bicubic,-84.877,-62.127,-53
rexnet_100.nav_in1k,8.893,91.107,36.413,63.587,4.80,224,0.875,bicubic,-84.127,-61.777,+33
mixnet_l.ft_in1k,8.893,91.107,36.240,63.760,7.33,224,0.875,bicubic,-84.537,-62.190,-8
efficientvit_m5.r224_in1k,8.893,91.107,34.587,65.413,12.47,224,0.875,bicubic,-83.567,-63.403,+82
bat_resnext26ts.ch_in1k,8.880,91.120,36.453,63.547,10.73,256,0.900,bicubic,-84.440,-62.167,+3
convit_tiny.fb_in1k,8.867,91.133,34.307,65.693,5.71,224,0.875,bicubic,-81.793,-63.423,+156
mobilenetv3_large_100.miil_in21k_ft_in1k,8.853,91.147,33.080,66.920,5.48,224,0.875,bilinear,-83.407,-64.540,+90
hrnet_w18.ms_aug_in1k,8.747,91.253,38.787,61.213,21.30,224,0.950,bilinear,-84.803,-59.913,-29
resnet50.bt_in1k,8.653,91.347,38.720,61.280,25.56,288,0.950,bicubic,-85.667,-59.810,-127
levit_conv_128s.fb_dist_in1k,8.653,91.347,33.093,66.907,7.78,224,0.900,bicubic,-83.307,-64.967,+107
dla169.in1k,8.640,91.360,36.000,64.000,53.39,224,0.875,bilinear,-84.710,-62.600,-10
levit_128s.fb_dist_in1k,8.640,91.360,33.067,66.933,7.78,224,0.900,bicubic,-83.320,-64.823,+103
mixer_b16_224.goog_in21k_ft_in1k,8.627,91.373,29.413,70.587,59.88,224,0.875,bicubic,-83.253,-68.627,+109
repvit_m1.dist_in1k,8.613,91.387,37.293,62.707,5.49,224,0.950,bicubic,-84.677,-61.147,0
hrnet_w30.ms_in1k,8.587,91.413,37.067,62.933,37.71,224,0.875,bilinear,-84.613,-61.423,+3
eca_resnext26ts.ch_in1k,8.560,91.440,36.827,63.173,10.30,288,1.000,bicubic,-84.500,-61.573,+17
ghostnetv2_160.in1k,8.560,91.440,36.627,63.373,12.39,224,0.875,bicubic,-84.430,-61.603,+23
legacy_seresnet101.in1k,8.533,91.467,35.960,64.040,49.33,224,0.875,bilinear,-84.777,-62.560,-8
convnext_atto_ols.a2_in1k,8.533,91.467,35.000,65.000,3.70,288,0.950,bicubic,-84.557,-63.470,+13
tf_efficientnet_b2.in1k,8.520,91.480,36.520,63.480,9.11,260,0.890,bicubic,-85.590,-61.930,-106
tf_efficientnet_b1.ap_in1k,8.453,91.547,35.240,64.760,7.79,240,0.882,bicubic,-85.227,-63.120,-58
repvgg_b2.rvgg_in1k,8.440,91.560,36.480,63.520,89.02,224,0.875,bilinear,-85.060,-62.090,-38
ese_vovnet19b_dw.ra_in1k,8.307,91.693,36.973,63.027,6.54,288,0.950,bicubic,-84.843,-61.277,+2
resmlp_12_224.fb_distilled_in1k,8.307,91.693,36.853,63.147,15.35,224,0.875,bicubic,-84.513,-61.287,+31
crossvit_9_240.in1k,8.280,91.720,34.107,65.893,8.55,240,0.875,bicubic,-82.350,-63.623,+139
dla102x.in1k,8.187,91.813,37.067,62.933,26.31,224,0.875,bilinear,-85.303,-61.433,-39
seresnext26ts.ch_in1k,8.147,91.853,36.093,63.907,10.39,288,1.000,bicubic,-84.813,-62.087,+17
hrnet_w32.ms_in1k,8.053,91.947,37.560,62.440,41.23,224,0.875,bilinear,-85.477,-60.890,-48
resnet101c.gluon_in1k,8.027,91.973,33.320,66.680,44.57,224,0.875,bicubic,-85.643,-65.100,-65
vit_base_patch32_224.augreg_in1k,7.987,92.013,30.453,69.547,88.22,224,0.900,bicubic,-83.203,-66.937,+113
cs3darknet_m.c2ns_in1k,7.960,92.040,36.507,63.493,9.31,288,0.950,bicubic,-85.390,-62.103,-29
poolformerv2_s12.sail_in1k,7.960,92.040,34.560,65.440,11.89,224,1.000,bicubic,-85.000,-63.800,+13
resnet50d.gluon_in1k,7.947,92.053,34.987,65.013,25.58,224,0.875,bicubic,-85.803,-63.403,-79
resnet26t.ra2_in1k,7.893,92.107,36.720,63.280,16.01,320,1.000,bicubic,-85.307,-61.690,-17
fastvit_t8.apple_dist_in1k,7.853,92.147,34.667,65.333,4.03,256,0.900,bicubic,-84.687,-63.503,+43
res2net50_26w_8s.in1k,7.853,92.147,33.707,66.293,48.40,224,0.875,bilinear,-85.537,-64.463,-37
dla60_res2next.in1k,7.840,92.160,34.960,65.040,17.03,224,0.875,bilinear,-85.350,-63.440,-18
repghostnet_200.in1k,7.800,92.200,37.200,62.800,9.80,224,0.875,bicubic,-85.700,-61.530,-52
mobilevitv2_075.cvnets_in1k,7.773,92.227,33.720,66.280,2.87,256,0.888,bicubic,-83.987,-64.060,+87
convnextv2_atto.fcmae_ft_in1k,7.773,92.227,32.907,67.093,3.71,288,0.950,bicubic,-85.197,-65.253,+4
mobilevit_xs.cvnets_in1k,7.733,92.267,32.520,67.480,2.32,256,0.900,bicubic,-83.087,-65.410,+114
tf_efficientnetv2_b1.in1k,7.720,92.280,34.613,65.387,8.14,240,0.882,bicubic,-86.230,-64.007,-111
deit_tiny_distilled_patch16_224.fb_in1k,7.693,92.307,33.507,66.493,5.91,224,0.900,bicubic,-83.017,-64.053,+116
regnety_032.pycls_in1k,7.680,92.320,34.280,65.720,19.44,224,0.875,bicubic,-85.730,-64.360,-48
efficientformerv2_s0.snap_dist_in1k,7.667,92.333,32.653,67.347,3.60,224,0.950,bicubic,-84.293,-65.407,+72
convnext_atto.d2_in1k,7.613,92.387,35.053,64.947,3.70,288,0.950,bicubic,-85.177,-63.097,+12
dla60_res2net.in1k,7.600,92.400,34.613,65.387,20.85,224,0.875,bilinear,-85.570,-63.817,-27
efficientnet_b1_pruned.in1k,7.480,92.520,34.480,65.520,6.33,240,0.882,bicubic,-85.300,-63.560,+11
regnetx_064.pycls_in1k,7.373,92.627,34.360,65.640,26.21,224,0.875,bicubic,-86.527,-64.280,-111
wide_resnet101_2.tv_in1k,7.360,92.640,34.147,65.853,126.89,224,0.875,bilinear,-86.380,-64.083,-93
densenet121.ra_in1k,7.333,92.667,35.480,64.520,7.98,288,0.950,bicubic,-85.187,-62.740,+28
deit_tiny_patch16_224.fb_in1k,7.293,92.707,30.680,69.320,5.72,224,0.900,bicubic,-82.367,-66.770,+134
regnetx_008.tv2_in1k,7.280,92.720,34.133,65.867,7.26,224,0.965,bicubic,-85.270,-64.047,+22
resnet50s.gluon_in1k,7.280,92.720,33.453,66.547,25.68,224,0.875,bicubic,-86.360,-65.007,-86
resnet101.gluon_in1k,7.267,92.733,32.773,67.227,44.55,224,0.875,bicubic,-86.483,-65.607,-102
resnet34.a2_in1k,7.267,92.733,31.813,68.187,21.80,288,1.000,bicubic,-85.453,-66.357,+10
edgenext_x_small.in1k,7.267,92.733,30.920,69.080,2.34,288,1.000,bicubic,-84.443,-66.680,+76
hardcorenas_e.miil_green_in1k,7.240,92.760,33.307,66.693,8.07,224,0.875,bilinear,-85.330,-64.793,+16
efficientnet_b0.ra_in1k,7.213,92.787,33.987,66.013,5.29,224,0.875,bicubic,-85.477,-64.083,+8
tf_mixnet_l.in1k,7.173,92.827,31.667,68.333,7.33,224,0.875,bicubic,-86.147,-66.363,-50
tf_efficientnet_b1.aa_in1k,7.160,92.840,33.027,66.973,7.79,240,0.882,bicubic,-86.330,-65.333,-73
tf_efficientnet_cc_b0_8e.in1k,7.133,92.867,31.787,68.213,24.01,224,0.875,bicubic,-85.707,-66.393,-10
convmixer_1024_20_ks9_p14.in1k,7.093,92.907,33.053,66.947,24.38,224,0.960,bicubic,-85.307,-65.217,+25
resmlp_12_224.fb_in1k,7.013,92.987,33.933,66.067,15.35,224,0.875,bicubic,-85.207,-64.217,+35
cs3darknet_focus_m.c2ns_in1k,6.933,93.067,34.587,65.413,9.30,288,0.950,bicubic,-86.037,-63.473,-24
fastvit_t8.apple_in1k,6.893,93.107,33.400,66.600,4.03,256,0.900,bicubic,-85.167,-64.530,+42
hardcorenas_f.miil_green_in1k,6.880,93.120,34.067,65.933,8.20,224,0.875,bilinear,-86.090,-64.323,-25
pit_ti_distilled_224.in1k,6.840,93.160,30.947,69.053,5.10,224,0.900,bicubic,-83.930,-66.663,+88
ghostnetv2_130.in1k,6.707,93.293,32.960,67.040,8.96,224,0.875,bicubic,-85.613,-65.300,+25
efficientnet_es.ra_in1k,6.693,93.307,33.973,66.027,5.44,224,0.875,bicubic,-86.477,-64.437,-49
selecsls60b.in1k,6.693,93.307,33.293,66.707,32.77,224,0.875,bicubic,-86.617,-64.997,-60
res2net50_26w_6s.in1k,6.693,93.307,31.653,68.347,37.05,224,0.875,bilinear,-86.707,-66.627,-73
poolformer_s12.sail_in1k,6.653,93.347,34.520,65.480,11.92,224,0.900,bicubic,-85.957,-63.660,-2
mixnet_m.ft_in1k,6.653,93.347,32.053,67.947,5.01,224,0.875,bicubic,-85.757,-66.097,+14
dpn68b.mx_in1k,6.640,93.360,32.907,67.093,12.61,224,0.875,bicubic,-86.150,-65.173,-18
tinynet_a.in1k,6.640,93.360,32.227,67.773,6.19,192,0.875,bicubic,-85.800,-65.853,+9
legacy_seresnext26_32x4d.in1k,6.627,93.373,33.240,66.760,16.79,224,0.875,bicubic,-86.013,-64.880,-8
tf_efficientnet_b1.in1k,6.627,93.373,32.640,67.360,7.79,240,0.882,bicubic,-86.473,-65.660,-49
mobileone_s3.apple_in1k,6.627,93.373,32.147,67.853,10.17,224,0.900,bilinear,-86.313,-66.043,-30
resnet50.a3_in1k,6.587,93.413,32.053,67.947,25.56,224,0.950,bicubic,-86.133,-65.957,-15
regnety_004.tv2_in1k,6.533,93.467,30.480,69.520,4.34,224,0.965,bicubic,-85.047,-67.410,+51
dla60x.in1k,6.493,93.507,34.080,65.920,17.35,224,0.875,bilinear,-86.587,-64.420,-50
repvgg_b1.rvgg_in1k,6.467,93.533,33.800,66.200,57.42,224,0.875,bilinear,-86.863,-64.570,-79
skresnet34.ra_in1k,6.467,93.533,31.573,68.427,22.28,224,0.875,bicubic,-85.913,-66.577,+7
repghostnet_150.in1k,6.453,93.547,32.307,67.693,6.58,224,0.875,bicubic,-85.917,-65.743,+6
hardcorenas_d.miil_green_in1k,6.453,93.547,32.187,67.813,7.50,224,0.875,bilinear,-85.947,-65.893,+4
resnet26d.bt_in1k,6.307,93.693,32.747,67.253,16.01,288,0.950,bicubic,-86.213,-65.463,-7
regnetx_080.pycls_in1k,6.293,93.707,32.373,67.627,39.57,224,0.875,bicubic,-87.587,-66.217,-145
resnet18.fb_swsl_ig1b_ft_in1k,6.253,93.747,31.600,68.400,11.69,224,0.875,bilinear,-84.447,-66.100,+72
legacy_seresnet50.in1k,6.200,93.800,32.680,67.320,28.09,224,0.875,bilinear,-86.760,-65.730,-44
pit_ti_224.in1k,6.120,93.880,30.240,69.760,4.85,224,0.900,bicubic,-83.820,-67.210,+89
resnet152.tv_in1k,6.067,93.933,32.080,67.920,60.19,224,0.875,bilinear,-87.253,-65.960,-85
wide_resnet50_2.tv_in1k,6.013,93.987,32.160,67.840,68.88,224,0.875,bilinear,-87.147,-66.280,-70
tf_efficientnet_cc_b0_4e.in1k,5.987,94.013,29.587,70.413,13.31,224,0.875,bicubic,-86.613,-68.493,-21
regnetx_040.pycls_in1k,5.947,94.053,31.493,68.507,22.12,224,0.875,bicubic,-87.613,-67.037,-120
seresnet50.a3_in1k,5.947,94.053,30.827,69.173,28.09,224,0.950,bicubic,-86.123,-67.213,+11
mixer_l16_224.goog_in21k_ft_in1k,5.880,94.120,18.547,81.453,208.20,224,0.875,bicubic,-81.270,-74.973,+120
tf_efficientnetv2_b0.in1k,5.867,94.133,30.787,69.213,7.14,224,0.875,bicubic,-87.243,-67.603,-71
dla102.in1k,5.827,94.173,32.760,67.240,33.27,224,0.875,bilinear,-87.223,-65.790,-65
selecsls60.in1k,5.680,94.320,32.520,67.480,30.67,224,0.875,bicubic,-87.330,-65.780,-62
regnety_016.pycls_in1k,5.667,94.333,30.480,69.520,11.20,224,0.875,bicubic,-87.373,-67.890,-66
res2next50.in1k,5.653,94.347,30.893,69.107,24.67,224,0.875,bilinear,-87.187,-67.287,-52
hardcorenas_c.miil_green_in1k,5.653,94.347,30.427,69.573,5.52,224,0.875,bilinear,-86.367,-67.413,+9
hrnet_w18_small_v2.gluon_in1k,5.520,94.480,31.853,68.147,15.60,224,0.875,bicubic,-87.250,-66.557,-44
hrnet_w18.ms_in1k,5.493,94.507,30.907,69.093,21.30,224,0.875,bilinear,-86.827,-67.163,-12
resnest14d.gluon_in1k,5.453,94.547,28.547,71.453,10.61,224,0.875,bilinear,-86.267,-69.323,+24
ghostnetv2_100.in1k,5.387,94.613,28.560,71.440,6.16,224,0.875,bicubic,-85.513,-69.140,+45
tf_efficientnet_em.in1k,5.360,94.640,31.080,68.920,6.90,240,0.882,bicubic,-87.580,-67.100,-61
tf_efficientnet_lite2.in1k,5.333,94.667,30.880,69.120,6.09,260,0.890,bicubic,-87.317,-67.350,-41
gernet_s.idstcv_in1k,5.320,94.680,30.147,69.853,8.17,224,0.875,bilinear,-86.810,-68.043,-7
resnext26ts.ra2_in1k,5.307,94.693,29.680,70.320,10.30,288,1.000,bicubic,-86.823,-68.350,-8
tf_efficientnet_b0.ap_in1k,5.307,94.693,28.840,71.160,5.29,224,0.875,bicubic,-86.893,-69.180,-12
efficientvit_m4.r224_in1k,5.307,94.693,28.013,71.987,8.80,224,0.875,bicubic,-85.273,-69.517,+55
repvgg_b1g4.rvgg_in1k,5.240,94.760,30.760,69.240,39.97,224,0.875,bilinear,-87.760,-67.670,-75
resnet34.bt_in1k,5.213,94.787,29.440,70.560,21.80,288,0.950,bicubic,-87.197,-68.430,-29
xcit_nano_12_p16_224.fb_dist_in1k,5.200,94.800,26.493,73.507,3.05,224,1.000,bicubic,-84.500,-70.607,+68
res2net50_26w_4s.in1k,5.173,94.827,29.360,70.640,25.70,224,0.875,bilinear,-87.317,-68.670,-35
efficientvit_m3.r224_in1k,5.160,94.840,27.400,72.600,6.90,224,0.875,bicubic,-84.700,-70.140,+64
vit_tiny_r_s16_p8_224.augreg_in21k_ft_in1k,5.080,94.920,27.027,72.973,6.34,224,0.900,bicubic,-84.100,-70.193,+74
mobilenetv3_large_100.ra_in1k,5.067,94.933,28.200,71.800,5.48,224,0.875,bicubic,-86.283,-69.510,+17
tf_efficientnet_b0.aa_in1k,5.053,94.947,28.760,71.240,5.29,224,0.875,bicubic,-87.187,-69.240,-23
tf_mixnet_m.in1k,5.053,94.947,28.187,71.813,5.01,224,0.875,bicubic,-87.247,-69.703,-27
res2net50_14w_8s.in1k,5.040,94.960,28.733,71.267,25.06,224,0.875,bilinear,-87.730,-69.427,-62
regnetx_004_tv.tv2_in1k,5.000,95.000,27.560,72.440,5.50,224,0.965,bicubic,-85.640,-70.040,+39
repghostnet_130.in1k,4.987,95.013,29.653,70.347,5.48,224,0.875,bicubic,-86.903,-68.277,-5
mixnet_s.ft_in1k,4.947,95.053,28.573,71.427,4.13,224,0.875,bicubic,-86.873,-69.127,-2
hardcorenas_b.miil_green_in1k,4.947,95.053,28.040,71.960,5.18,224,0.875,bilinear,-86.813,-69.820,+2
mobilenetv3_rw.rmsp_in1k,4.933,95.067,29.853,70.147,5.48,224,0.875,bicubic,-86.277,-67.807,+13
hardcorenas_a.miil_green_in1k,4.893,95.107,28.093,71.907,5.26,224,0.875,bilinear,-86.457,-69.757,+7
regnetx_032.pycls_in1k,4.880,95.120,30.227,69.773,15.30,224,0.875,bicubic,-88.230,-68.163,-104
resnet50c.gluon_in1k,4.880,95.120,28.080,71.920,25.58,224,0.875,bicubic,-88.150,-70.320,-95
xcit_nano_12_p16_224.fb_in1k,4.853,95.147,25.467,74.533,3.05,224,1.000,bicubic,-83.767,-71.323,+73
resnext50_32x4d.tv_in1k,4.827,95.173,30.267,69.733,25.03,224,0.875,bilinear,-87.923,-68.003,-71
densenet161.tv_in1k,4.733,95.267,29.560,70.440,28.68,224,0.875,bicubic,-87.747,-68.740,-51
resnet101.tv_in1k,4.693,95.307,29.347,70.653,44.55,224,0.875,bilinear,-88.117,-68.903,-79
selecsls42b.in1k,4.680,95.320,28.573,71.427,32.46,224,0.875,bicubic,-87.610,-69.537,-40
tf_efficientnet_lite1.in1k,4.600,95.400,28.347,71.653,5.42,240,0.882,bicubic,-88.030,-69.713,-67
mobilenetv2_120d.ra_in1k,4.547,95.453,29.320,70.680,5.83,224,0.875,bicubic,-87.853,-68.740,-48
mobileone_s2.apple_in1k,4.520,95.480,29.133,70.867,7.88,224,0.900,bilinear,-88.300,-69.137,-85
tf_efficientnet_b0.in1k,4.427,95.573,26.680,73.320,5.29,224,0.875,bicubic,-87.653,-71.480,-34
pvt_v2_b0.in1k,4.347,95.653,25.960,74.040,3.67,224,0.900,bicubic,-84.433,-70.900,+61
vit_base_patch32_224.sam_in1k,4.333,95.667,24.387,75.613,88.22,224,0.900,bicubic,-85.407,-72.613,+41
resnet50.am_in1k,4.267,95.733,28.627,71.373,25.56,224,0.875,bicubic,-89.703,-70.003,-216
edgenext_xx_small.in1k,4.267,95.733,24.093,75.907,1.33,288,1.000,bicubic,-84.623,-72.887,+57
tinynet_b.in1k,4.200,95.800,26.787,73.213,3.73,188,0.875,bicubic,-86.720,-70.873,+5
efficientnet_es_pruned.in1k,4.200,95.800,26.453,73.547,5.44,224,0.875,bicubic,-86.970,-71.287,-1
repghostnet_111.in1k,4.147,95.853,26.187,73.813,4.54,224,0.875,bicubic,-86.563,-71.283,+13
densenet201.tv_in1k,4.133,95.867,27.547,72.453,20.01,224,0.875,bicubic,-88.607,-70.683,-85
resnet50.gluon_in1k,4.120,95.880,26.960,73.040,25.56,224,0.875,bicubic,-88.420,-71.080,-72
fbnetc_100.rmsp_in1k,4.107,95.893,25.907,74.093,5.57,224,0.875,bilinear,-86.623,-71.303,+8
semnasnet_100.rmsp_in1k,3.947,96.053,26.933,73.067,3.89,224,0.875,bicubic,-87.363,-70.627,-14
mobilevitv2_050.cvnets_in1k,3.947,96.053,23.947,76.053,1.37,256,0.888,bicubic,-84.233,-73.043,+59
resnet26.bt_in1k,3.933,96.067,28.213,71.787,16.00,288,0.950,bicubic,-88.057,-69.807,-42
repvgg_a2.rvgg_in1k,3.933,96.067,27.227,72.773,28.21,224,0.875,bilinear,-88.007,-70.873,-35
dpn68.mx_in1k,3.893,96.107,25.693,74.307,12.61,224,0.875,bicubic,-88.097,-72.527,-42
tf_mixnet_s.in1k,3.880,96.120,25.267,74.733,4.13,224,0.875,bicubic,-87.640,-72.353,-23
semnasnet_075.rmsp_in1k,3.867,96.133,27.080,72.920,2.91,224,0.875,bicubic,-86.203,-70.360,+21
tf_efficientnet_es.in1k,3.827,96.173,26.133,73.867,5.44,224,0.875,bicubic,-88.143,-71.747,-45
mobilevit_xxs.cvnets_in1k,3.827,96.173,21.733,78.267,1.27,256,0.900,bicubic,-83.333,-74.367,+58
resnet18d.ra2_in1k,3.813,96.187,26.013,73.987,11.71,288,0.950,bicubic,-86.467,-71.547,+12
regnety_008.pycls_in1k,3.787,96.213,27.160,72.840,6.26,224,0.875,bicubic,-87.943,-71.020,-32
dla60.in1k,3.747,96.253,27.947,72.053,22.04,224,0.875,bilinear,-88.453,-70.153,-62
resnet18.fb_ssl_yfcc100m_ft_in1k,3.747,96.253,25.373,74.627,11.69,224,0.875,bilinear,-86.463,-72.177,+11
mobilenetv2_140.ra_in1k,3.720,96.280,26.720,73.280,6.11,224,0.875,bicubic,-88.120,-71.140,-41
densenet169.tv_in1k,3.707,96.293,25.587,74.413,14.15,224,0.875,bicubic,-88.233,-72.553,-46
resnet18.a1_in1k,3.707,96.293,22.960,77.040,11.69,288,1.000,bicubic,-85.973,-74.140,+19
regnetx_016.pycls_in1k,3.613,96.387,26.320,73.680,9.19,224,0.875,bicubic,-88.567,-71.880,-66
efficientvit_m2.r224_in1k,3.613,96.387,21.853,78.147,4.19,224,0.875,bicubic,-84.857,-75.047,+40
spnasnet_100.rmsp_in1k,3.573,96.427,24.253,75.747,4.42,224,0.875,bilinear,-86.757,-72.937,+1
res2net50_48w_2s.in1k,3.560,96.440,26.613,73.387,25.29,224,0.875,bilinear,-88.990,-71.467,-94
tf_mobilenetv3_large_100.in1k,3.560,96.440,25.120,74.880,5.48,224,0.875,bilinear,-87.670,-72.540,-31
repghostnet_100.in1k,3.520,96.480,24.520,75.480,4.07,224,0.875,bicubic,-86.770,-72.960,-1
regnety_006.pycls_in1k,3.453,96.547,24.920,75.080,6.06,224,0.875,bicubic,-87.937,-73.080,-38
ghostnet_100.in1k,3.427,96.573,25.120,74.880,5.18,224,0.875,bicubic,-86.753,-72.170,+1
resnet34.a3_in1k,3.373,96.627,23.387,76.613,21.80,224,0.950,bicubic,-86.567,-73.793,+6
legacy_seresnet34.in1k,3.347,96.653,23.813,76.187,21.96,224,0.875,bilinear,-87.553,-73.767,-23
resnet18.a2_in1k,3.267,96.733,22.373,77.627,11.69,288,1.000,bicubic,-86.303,-74.587,+13
efficientnet_lite0.ra_in1k,3.240,96.760,25.947,74.053,4.65,224,0.875,bicubic,-87.880,-71.693,-34
dla34.in1k,3.240,96.760,23.547,76.453,15.74,224,0.875,bilinear,-87.520,-74.103,-21
efficientvit_b0.r224_in1k,3.200,96.800,19.533,80.467,3.41,224,0.950,bicubic,-84.740,-76.597,+31
mobilenetv2_110d.ra_in1k,3.187,96.813,24.573,75.427,4.52,224,0.875,bicubic,-87.773,-72.967,-31
regnety_004.pycls_in1k,3.187,96.813,22.680,77.320,4.34,224,0.875,bicubic,-87.303,-74.850,-14
tinynet_c.in1k,3.120,96.880,21.520,78.480,2.46,184,0.875,bicubic,-84.660,-74.850,+29
mnasnet_100.rmsp_in1k,3.107,96.893,24.227,75.773,4.38,224,0.875,bicubic,-87.393,-73.243,-17
repghostnet_080.in1k,3.080,96.920,21.973,78.027,3.28,224,0.875,bicubic,-85.760,-74.727,+16
tf_efficientnet_lite0.in1k,3.040,96.960,22.893,77.107,4.65,224,0.875,bicubic,-88.010,-74.687,-39
skresnet18.ra_in1k,3.027,96.973,22.813,77.187,11.96,224,0.875,bicubic,-86.633,-74.417,0
mobileone_s1.apple_in1k,2.947,97.053,24.947,75.053,4.83,224,0.900,bilinear,-88.333,-72.873,-49
vgg19_bn.tv_in1k,2.947,97.053,23.440,76.560,143.68,224,0.875,bilinear,-87.133,-74.140,-12
tinynet_d.in1k,2.853,97.147,17.787,82.213,2.34,152,0.875,bicubic,-81.867,-77.383,+40
tf_mobilenetv3_large_075.in1k,2.840,97.160,21.560,78.440,3.99,224,0.875,bilinear,-86.800,-75.630,-3
efficientvit_m1.r224_in1k,2.827,97.173,19.600,80.400,2.98,224,0.875,bicubic,-83.963,-76.430,+27
resnet14t.c3_in1k,2.760,97.240,20.213,79.787,10.08,224,0.950,bicubic,-86.230,-76.517,+3
hrnet_w18_small_v2.ms_in1k,2.720,97.280,23.720,76.280,15.60,224,0.875,bilinear,-88.480,-74.180,-52
regnetx_008.pycls_in1k,2.667,97.333,22.453,77.547,7.26,224,0.875,bicubic,-88.383,-75.267,-49
vgg16_bn.tv_in1k,2.653,97.347,23.800,76.200,138.37,224,0.875,bilinear,-87.437,-73.570,-21
resnet34.gluon_in1k,2.653,97.347,21.680,78.320,21.80,224,0.875,bicubic,-88.327,-75.950,-47
lcnet_100.ra2_in1k,2.627,97.373,20.760,79.240,2.95,224,0.875,bicubic,-86.123,-76.220,+6
vgg16.tv_in1k,2.627,97.373,20.360,79.640,138.36,224,0.875,bilinear,-85.933,-76.440,+7
repvgg_b0.rvgg_in1k,2.560,97.440,24.000,76.000,15.82,224,0.875,bilinear,-88.830,-73.700,-66
densenet121.tv_in1k,2.547,97.453,22.653,77.347,7.98,224,0.875,bicubic,-88.343,-75.057,-47
regnetx_006.pycls_in1k,2.533,97.467,20.627,79.373,6.20,224,0.875,bicubic,-87.817,-76.803,-33
hrnet_w18_small.gluon_in1k,2.507,97.493,20.653,79.347,13.19,224,0.875,bicubic,-86.963,-76.407,-12
legacy_seresnet18.in1k,2.480,97.520,20.067,79.933,11.78,224,0.875,bicubic,-86.410,-76.633,-5
lcnet_075.ra2_in1k,2.320,97.680,17.173,82.827,2.36,224,0.875,bicubic,-83.650,-78.507,+21
mobilenetv3_small_075.lamb_in1k,2.307,97.693,15.893,84.107,2.04,224,0.875,bicubic,-80.723,-78.207,+30
efficientvit_m0.r224_in1k,2.293,97.707,16.493,83.507,2.35,224,0.875,bicubic,-80.057,-77.937,+30
repghostnet_058.in1k,2.253,97.747,18.320,81.680,2.55,224,0.875,bicubic,-84.287,-77.580,+13
repvgg_a1.rvgg_in1k,2.240,97.760,21.333,78.667,14.09,224,0.875,bilinear,-88.360,-76.317,-45
mobileone_s0.apple_in1k,2.240,97.760,17.467,82.533,5.29,224,0.875,bilinear,-85.990,-78.933,0
resnet18.a3_in1k,2.227,97.773,17.773,82.227,11.69,224,0.950,bicubic,-84.223,-78.107,+11
mobilenetv2_100.ra_in1k,2.147,97.853,19.933,80.067,3.50,224,0.875,bicubic,-87.453,-77.217,-23
regnety_002.pycls_in1k,2.120,97.880,18.920,81.080,3.16,224,0.875,bicubic,-85.250,-77.690,+2
vgg19.tv_in1k,2.107,97.893,20.760,79.240,143.67,224,0.875,bilinear,-86.943,-76.110,-19
vgg13_bn.tv_in1k,2.093,97.907,20.333,79.667,133.05,224,0.875,bilinear,-86.657,-76.387,-12
tf_mobilenetv3_small_100.in1k,2.027,97.973,15.827,84.173,2.54,224,0.875,bilinear,-83.163,-79.943,+12
mobilenetv3_small_100.lamb_in1k,1.987,98.013,17.093,82.907,2.54,224,0.875,bicubic,-83.233,-78.557,+10
repghostnet_050.in1k,1.987,98.013,16.507,83.493,2.31,224,0.875,bicubic,-83.073,-78.693,+11
tf_mobilenetv3_small_075.in1k,1.987,98.013,14.840,85.160,2.04,224,0.875,bilinear,-81.513,-80.000,+16
regnetx_004.pycls_in1k,1.920,98.080,19.147,80.853,5.16,224,0.875,bicubic,-87.010,-77.973,-22
resnet34.tv_in1k,1.853,98.147,20.053,79.947,21.80,224,0.875,bilinear,-88.097,-77.287,-42
tinynet_e.in1k,1.853,98.147,14.013,85.987,2.04,106,0.875,bicubic,-77.067,-78.527,+18
vgg13.tv_in1k,1.840,98.160,18.027,81.973,133.05,224,0.875,bilinear,-85.200,-78.303,-5
mobilenetv3_small_050.lamb_in1k,1.813,98.187,12.533,87.467,1.59,224,0.875,bicubic,-75.207,-78.767,+17
lcnet_050.ra2_in1k,1.787,98.213,13.867,86.133,1.88,224,0.875,bicubic,-80.013,-79.843,+13
mnasnet_small.lamb_in1k,1.773,98.227,15.080,84.920,2.03,224,0.875,bicubic,-82.647,-80.110,+5
dla46x_c.in1k,1.747,98.253,16.387,83.613,1.07,224,0.875,bilinear,-82.483,-78.883,+5
vgg11_bn.tv_in1k,1.720,98.280,18.093,81.907,132.87,224,0.875,bilinear,-85.780,-78.727,-15
dla60x_c.in1k,1.613,98.387,18.013,81.987,1.32,224,0.875,bilinear,-84.677,-78.147,-6
tf_mobilenetv3_large_minimal_100.in1k,1.613,98.387,17.120,82.880,3.92,224,0.875,bilinear,-87.337,-79.740,-34
mobilenetv2_050.lamb_in1k,1.613,98.387,14.200,85.800,1.97,224,0.875,bicubic,-82.297,-80.520,+3
resnet10t.c3_in1k,1.600,98.400,16.053,83.947,5.44,224,0.950,bicubic,-84.620,-79.687,-8
vgg11.tv_in1k,1.560,98.440,16.187,83.813,132.86,224,0.875,bilinear,-85.020,-80.093,-13
resnet18.gluon_in1k,1.547,98.453,16.640,83.360,11.69,224,0.875,bicubic,-86.823,-80.030,-26
hrnet_w18_small.ms_in1k,1.533,98.467,18.093,81.907,13.19,224,0.875,bilinear,-87.517,-79.027,-41
dla46_c.in1k,1.493,98.507,15.227,84.773,1.30,224,0.875,bilinear,-82.117,-79.723,-2
repvgg_a0.rvgg_in1k,1.467,98.533,17.587,82.413,9.11,224,0.875,bilinear,-87.813,-79.303,-45
regnetx_002.pycls_in1k,1.373,98.627,15.053,84.947,2.68,224,0.875,bicubic,-84.767,-80.917,-13
resnet18.tv_in1k,1.160,98.840,16.227,83.773,11.69,224,0.875,bilinear,-86.220,-80.063,-25
tf_mobilenetv3_small_minimal_100.in1k,1.040,98.960,11.493,88.507,2.04,224,0.875,bilinear,-80.360,-82.187,-1
resnet50.tv_in1k,0.000,100.000,14.453,85.547,25.56,224,0.875,bilinear,-91.880,-82.807,-120
| 0 |
hf_public_repos/pytorch-image-models | hf_public_repos/pytorch-image-models/results/generate_csv_results.py | import numpy as np
import pandas as pd
results = {
'results-imagenet.csv': [
'results-imagenet-real.csv',
'results-imagenetv2-matched-frequency.csv',
'results-sketch.csv'
],
'results-imagenet-a-clean.csv': [
'results-imagenet-a.csv',
],
'results-imagenet-r-clean.csv': [
'results-imagenet-r.csv',
],
}
def diff(base_df, test_csv):
base_models = base_df['model'].values
test_df = pd.read_csv(test_csv)
test_models = test_df['model'].values
rank_diff = np.zeros_like(test_models, dtype='object')
top1_diff = np.zeros_like(test_models, dtype='object')
top5_diff = np.zeros_like(test_models, dtype='object')
for rank, model in enumerate(test_models):
if model in base_models:
base_rank = int(np.where(base_models == model)[0])
top1_d = test_df['top1'][rank] - base_df['top1'][base_rank]
top5_d = test_df['top5'][rank] - base_df['top5'][base_rank]
# rank_diff
if rank == base_rank:
rank_diff[rank] = f'0'
elif rank > base_rank:
rank_diff[rank] = f'-{rank - base_rank}'
else:
rank_diff[rank] = f'+{base_rank - rank}'
# top1_diff
if top1_d >= .0:
top1_diff[rank] = f'+{top1_d:.3f}'
else:
top1_diff[rank] = f'-{abs(top1_d):.3f}'
# top5_diff
if top5_d >= .0:
top5_diff[rank] = f'+{top5_d:.3f}'
else:
top5_diff[rank] = f'-{abs(top5_d):.3f}'
else:
rank_diff[rank] = ''
top1_diff[rank] = ''
top5_diff[rank] = ''
test_df['top1_diff'] = top1_diff
test_df['top5_diff'] = top5_diff
test_df['rank_diff'] = rank_diff
test_df['param_count'] = test_df['param_count'].map('{:,.2f}'.format)
test_df.sort_values(['top1', 'top5', 'model'], ascending=[False, False, True], inplace=True)
test_df.to_csv(test_csv, index=False, float_format='%.3f')
for base_results, test_results in results.items():
base_df = pd.read_csv(base_results)
base_df.sort_values(['top1', 'top5', 'model'], ascending=[False, False, True], inplace=True)
for test_csv in test_results:
diff(base_df, test_csv)
base_df['param_count'] = base_df['param_count'].map('{:,.2f}'.format)
base_df.to_csv(base_results, index=False, float_format='%.3f')
| 0 |
hf_public_repos/pytorch-image-models | hf_public_repos/pytorch-image-models/results/results-imagenet-a-clean.csv | model,top1,top1_err,top5,top5_err,param_count,img_size,crop_pct,interpolation
eva02_large_patch14_448.mim_in22k_ft_in22k_in1k,98.930,1.070,99.910,0.090,305.08,448,1.000,bicubic
eva02_large_patch14_448.mim_m38m_ft_in22k_in1k,98.850,1.150,99.880,0.120,305.08,448,1.000,bicubic
eva02_large_patch14_448.mim_in22k_ft_in1k,98.840,1.160,99.830,0.170,305.08,448,1.000,bicubic
eva_giant_patch14_560.m30m_ft_in22k_in1k,98.830,1.170,99.900,0.100,"1,014.45",560,1.000,bicubic
eva_giant_patch14_336.clip_ft_in1k,98.820,1.180,99.810,0.190,"1,013.01",336,1.000,bicubic
eva_giant_patch14_336.m30m_ft_in22k_in1k,98.810,1.190,99.900,0.100,"1,013.01",336,1.000,bicubic
eva_large_patch14_336.in22k_ft_in22k_in1k,98.740,1.260,99.800,0.200,304.53,336,1.000,bicubic
eva_large_patch14_336.in22k_ft_in1k,98.730,1.270,99.870,0.130,304.53,336,1.000,bicubic
eva02_large_patch14_448.mim_m38m_ft_in1k,98.730,1.270,99.790,0.210,305.08,448,1.000,bicubic
convnextv2_huge.fcmae_ft_in22k_in1k_384,98.670,1.330,99.860,0.140,660.29,384,1.000,bicubic
eva02_base_patch14_448.mim_in22k_ft_in22k_in1k,98.640,1.360,99.800,0.200,87.12,448,1.000,bicubic
maxvit_base_tf_512.in21k_ft_in1k,98.620,1.380,99.800,0.200,119.88,512,1.000,bicubic
maxvit_xlarge_tf_512.in21k_ft_in1k,98.620,1.380,99.800,0.200,475.77,512,1.000,bicubic
maxvit_large_tf_512.in21k_ft_in1k,98.620,1.380,99.790,0.210,212.33,512,1.000,bicubic
convnextv2_huge.fcmae_ft_in22k_in1k_512,98.600,1.400,99.870,0.130,660.29,512,1.000,bicubic
beit_large_patch16_512.in22k_ft_in22k_in1k,98.560,1.440,99.840,0.160,305.67,512,1.000,bicubic
tf_efficientnet_l2.ns_jft_in1k,98.550,1.450,99.820,0.180,480.31,800,0.960,bicubic
beitv2_large_patch16_224.in1k_ft_in22k_in1k,98.540,1.460,99.760,0.240,304.43,224,0.950,bicubic
beit_large_patch16_384.in22k_ft_in22k_in1k,98.520,1.480,99.820,0.180,305.00,384,1.000,bicubic
maxvit_base_tf_384.in21k_ft_in1k,98.520,1.480,99.750,0.250,119.65,384,1.000,bicubic
tf_efficientnet_l2.ns_jft_in1k_475,98.500,1.500,99.830,0.170,480.31,475,0.936,bicubic
maxvit_xlarge_tf_384.in21k_ft_in1k,98.500,1.500,99.780,0.220,475.32,384,1.000,bicubic
maxvit_large_tf_384.in21k_ft_in1k,98.490,1.510,99.750,0.250,212.03,384,1.000,bicubic
convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_384,98.480,1.520,99.780,0.220,200.13,384,1.000,bicubic
deit3_large_patch16_384.fb_in22k_ft_in1k,98.460,1.540,99.760,0.240,304.76,384,1.000,bicubic
eva_giant_patch14_224.clip_ft_in1k,98.460,1.540,99.750,0.250,"1,012.56",224,0.900,bicubic
regnety_1280.swag_ft_in1k,98.450,1.550,99.870,0.130,644.81,384,1.000,bicubic
eva02_base_patch14_448.mim_in22k_ft_in1k,98.440,1.560,99.820,0.180,87.12,448,1.000,bicubic
caformer_b36.sail_in22k_ft_in1k_384,98.440,1.560,99.800,0.200,98.75,384,1.000,bicubic
convnext_xxlarge.clip_laion2b_soup_ft_in1k,98.440,1.560,99.800,0.200,846.47,256,1.000,bicubic
convnext_xlarge.fb_in22k_ft_in1k_384,98.420,1.580,99.810,0.190,350.20,384,1.000,bicubic
vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k,98.420,1.580,99.810,0.190,632.46,336,1.000,bicubic
eva_large_patch14_196.in22k_ft_in22k_in1k,98.420,1.580,99.770,0.230,304.14,196,1.000,bicubic
convnextv2_large.fcmae_ft_in22k_in1k_384,98.400,1.600,99.760,0.240,197.96,384,1.000,bicubic
eva_large_patch14_196.in22k_ft_in1k,98.360,1.640,99.820,0.180,304.14,196,1.000,bicubic
convnextv2_base.fcmae_ft_in22k_in1k_384,98.350,1.650,99.770,0.230,88.72,384,1.000,bicubic
vit_large_patch14_clip_336.laion2b_ft_in12k_in1k,98.340,1.660,99.760,0.240,304.53,336,1.000,bicubic
vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k,98.300,1.700,99.760,0.240,632.05,224,1.000,bicubic
convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_320,98.280,1.720,99.770,0.230,200.13,320,1.000,bicubic
convformer_b36.sail_in22k_ft_in1k_384,98.260,1.740,99.830,0.170,99.88,384,1.000,bicubic
maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k,98.260,1.740,99.780,0.220,116.09,384,1.000,bicubic
vit_large_patch14_clip_336.openai_ft_in12k_in1k,98.260,1.740,99.770,0.230,304.53,336,1.000,bicubic
convnext_large_mlp.clip_laion2b_augreg_ft_in1k_384,98.250,1.750,99.760,0.240,200.13,384,1.000,bicubic
convnext_large.fb_in22k_ft_in1k_384,98.240,1.760,99.750,0.250,197.77,384,1.000,bicubic
vit_large_patch16_384.augreg_in21k_ft_in1k,98.220,1.780,99.800,0.200,304.72,384,1.000,bicubic
vit_large_patch14_clip_224.openai_ft_in12k_in1k,98.220,1.780,99.720,0.280,304.20,224,1.000,bicubic
vit_large_patch14_clip_336.laion2b_ft_in1k,98.220,1.780,99.720,0.280,304.53,336,1.000,bicubic
seresnextaa201d_32x8d.sw_in12k_ft_in1k_384,98.210,1.790,99.780,0.220,149.39,384,1.000,bicubic
vit_base_patch16_clip_384.openai_ft_in12k_in1k,98.190,1.810,99.660,0.340,86.86,384,0.950,bicubic
beit_large_patch16_224.in22k_ft_in22k_in1k,98.180,1.820,99.760,0.240,304.43,224,0.900,bicubic
deit3_large_patch16_224.fb_in22k_ft_in1k,98.170,1.830,99.760,0.240,304.37,224,1.000,bicubic
maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k,98.170,1.830,99.760,0.240,116.14,384,1.000,bicubic
deit3_huge_patch14_224.fb_in22k_ft_in1k,98.170,1.830,99.730,0.270,632.13,224,1.000,bicubic
caformer_b36.sail_in22k_ft_in1k,98.160,1.840,99.780,0.220,98.75,224,1.000,bicubic
vit_large_patch14_clip_224.openai_ft_in1k,98.160,1.840,99.660,0.340,304.20,224,1.000,bicubic
caformer_m36.sail_in22k_ft_in1k_384,98.150,1.850,99.750,0.250,56.20,384,1.000,bicubic
swinv2_large_window12to24_192to384.ms_in22k_ft_in1k,98.130,1.870,99.710,0.290,196.74,384,1.000,bicubic
swinv2_base_window12to24_192to384.ms_in22k_ft_in1k,98.120,1.880,99.780,0.220,87.92,384,1.000,bicubic
convnext_large.fb_in22k_ft_in1k,98.120,1.880,99.740,0.260,197.77,288,1.000,bicubic
convnext_xlarge.fb_in22k_ft_in1k,98.110,1.890,99.780,0.220,350.20,288,1.000,bicubic
convnextv2_large.fcmae_ft_in22k_in1k,98.090,1.910,99.770,0.230,197.96,288,1.000,bicubic
vit_large_patch14_clip_224.laion2b_ft_in12k_in1k,98.080,1.920,99.760,0.240,304.20,224,1.000,bicubic
convnext_base.fb_in22k_ft_in1k_384,98.080,1.920,99.650,0.350,88.59,384,1.000,bicubic
coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k,98.070,1.930,99.720,0.280,73.88,384,1.000,bicubic
regnety_320.swag_ft_in1k,98.060,1.940,99.860,0.140,145.05,384,1.000,bicubic
convnextv2_base.fcmae_ft_in22k_in1k,98.060,1.940,99.760,0.240,88.72,288,1.000,bicubic
swin_large_patch4_window12_384.ms_in22k_ft_in1k,98.050,1.950,99.690,0.310,196.74,384,1.000,bicubic
convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384,98.040,1.960,99.750,0.250,88.59,384,1.000,bicubic
convformer_m36.sail_in22k_ft_in1k_384,98.040,1.960,99.690,0.310,57.05,384,1.000,bicubic
vit_huge_patch14_clip_224.laion2b_ft_in1k,98.020,1.980,99.720,0.280,632.05,224,1.000,bicubic
vit_base_patch16_clip_384.laion2b_ft_in12k_in1k,97.990,2.010,99.660,0.340,86.86,384,1.000,bicubic
caformer_s36.sail_in22k_ft_in1k_384,97.970,2.030,99.720,0.280,39.30,384,1.000,bicubic
seresnextaa101d_32x8d.sw_in12k_ft_in1k_288,97.970,2.030,99.700,0.300,93.59,320,1.000,bicubic
convnext_large_mlp.clip_laion2b_augreg_ft_in1k,97.950,2.050,99.710,0.290,200.13,256,1.000,bicubic
convformer_b36.sail_in22k_ft_in1k,97.940,2.060,99.760,0.240,99.88,224,1.000,bicubic
tf_efficientnet_b7.ns_jft_in1k,97.920,2.080,99.720,0.280,66.35,600,0.949,bicubic
beitv2_large_patch16_224.in1k_ft_in1k,97.910,2.090,99.660,0.340,304.43,224,0.950,bicubic
seresnextaa101d_32x8d.sw_in12k_ft_in1k,97.910,2.090,99.660,0.340,93.59,288,1.000,bicubic
swin_base_patch4_window12_384.ms_in22k_ft_in1k,97.900,2.100,99.710,0.290,87.90,384,1.000,bicubic
convnextv2_huge.fcmae_ft_in1k,97.900,2.100,99.670,0.330,660.29,288,1.000,bicubic
tf_efficientnetv2_xl.in21k_ft_in1k,97.900,2.100,99.570,0.430,208.12,512,1.000,bicubic
vit_large_patch14_clip_224.laion2b_ft_in1k,97.890,2.110,99.650,0.350,304.20,224,1.000,bicubic
tiny_vit_21m_512.dist_in22k_ft_in1k,97.870,2.130,99.630,0.370,21.27,512,1.000,bicubic
convnext_base.fb_in22k_ft_in1k,97.860,2.140,99.680,0.320,88.59,288,1.000,bicubic
vit_large_r50_s32_384.augreg_in21k_ft_in1k,97.860,2.140,99.670,0.330,329.09,384,1.000,bicubic
swinv2_large_window12to16_192to256.ms_in22k_ft_in1k,97.850,2.150,99.650,0.350,196.74,256,0.900,bicubic
convformer_s36.sail_in22k_ft_in1k_384,97.850,2.150,99.640,0.360,40.01,384,1.000,bicubic
deit3_base_patch16_384.fb_in22k_ft_in1k,97.840,2.160,99.680,0.320,86.88,384,1.000,bicubic
caformer_m36.sail_in22k_ft_in1k,97.840,2.160,99.670,0.330,56.20,224,1.000,bicubic
vit_base_patch16_384.augreg_in21k_ft_in1k,97.840,2.160,99.670,0.330,86.86,384,1.000,bicubic
maxvit_large_tf_512.in1k,97.830,2.170,99.560,0.440,212.33,512,1.000,bicubic
beit_base_patch16_384.in22k_ft_in22k_in1k,97.820,2.180,99.700,0.300,86.74,384,1.000,bicubic
tf_efficientnetv2_m.in21k_ft_in1k,97.820,2.180,99.600,0.400,54.14,480,1.000,bicubic
maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k,97.810,2.190,99.650,0.350,116.14,224,0.950,bicubic
tf_efficientnetv2_l.in21k_ft_in1k,97.800,2.200,99.770,0.230,118.52,480,1.000,bicubic
convnext_small.in12k_ft_in1k_384,97.800,2.200,99.660,0.340,50.22,384,1.000,bicubic
regnety_160.swag_ft_in1k,97.780,2.220,99.760,0.240,83.59,384,1.000,bicubic
dm_nfnet_f6.dm_in1k,97.780,2.220,99.650,0.350,438.36,576,0.956,bicubic
dm_nfnet_f5.dm_in1k,97.780,2.220,99.600,0.400,377.21,544,0.954,bicubic
volo_d5_512.sail_in1k,97.770,2.230,99.670,0.330,296.09,512,1.150,bicubic
maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k,97.760,2.240,99.700,0.300,116.09,224,0.950,bicubic
volo_d5_448.sail_in1k,97.750,2.250,99.620,0.380,295.91,448,1.150,bicubic
maxvit_small_tf_512.in1k,97.750,2.250,99.550,0.450,69.13,512,1.000,bicubic
maxvit_base_tf_512.in1k,97.730,2.270,99.610,0.390,119.88,512,1.000,bicubic
vit_base_patch16_clip_384.laion2b_ft_in1k,97.720,2.280,99.630,0.370,86.86,384,1.000,bicubic
beitv2_base_patch16_224.in1k_ft_in22k_in1k,97.690,2.310,99.680,0.320,86.53,224,0.900,bicubic
vit_base_patch8_224.augreg2_in21k_ft_in1k,97.690,2.310,99.650,0.350,86.58,224,0.900,bicubic
volo_d4_448.sail_in1k,97.670,2.330,99.610,0.390,193.41,448,1.150,bicubic
swinv2_base_window12to16_192to256.ms_in22k_ft_in1k,97.660,2.340,99.720,0.280,87.92,256,0.900,bicubic
convnextv2_large.fcmae_ft_in1k,97.660,2.340,99.610,0.390,197.96,288,1.000,bicubic
regnety_1280.swag_lc_in1k,97.650,2.350,99.640,0.360,644.81,224,0.965,bicubic
coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k,97.650,2.350,99.570,0.430,73.88,224,0.950,bicubic
swin_large_patch4_window7_224.ms_in22k_ft_in1k,97.650,2.350,99.570,0.430,196.53,224,0.900,bicubic
dm_nfnet_f4.dm_in1k,97.640,2.360,99.540,0.460,316.07,512,0.951,bicubic
vit_large_patch16_224.augreg_in21k_ft_in1k,97.630,2.370,99.590,0.410,304.33,224,0.900,bicubic
tf_efficientnet_b6.ns_jft_in1k,97.620,2.380,99.580,0.420,43.04,528,0.942,bicubic
convnext_base.clip_laiona_augreg_ft_in1k_384,97.620,2.380,99.550,0.450,88.59,384,1.000,bicubic
convnext_small.fb_in22k_ft_in1k_384,97.610,2.390,99.600,0.400,50.22,384,1.000,bicubic
tiny_vit_21m_384.dist_in22k_ft_in1k,97.610,2.390,99.590,0.410,21.23,384,1.000,bicubic
convnext_base.clip_laion2b_augreg_ft_in12k_in1k,97.600,2.400,99.720,0.280,88.59,256,1.000,bicubic
convformer_m36.sail_in22k_ft_in1k,97.600,2.400,99.620,0.380,57.05,224,1.000,bicubic
caformer_s36.sail_in22k_ft_in1k,97.600,2.400,99.610,0.390,39.30,224,1.000,bicubic
maxvit_tiny_tf_512.in1k,97.580,2.420,99.560,0.440,31.05,512,1.000,bicubic
vit_base_patch8_224.augreg_in21k_ft_in1k,97.570,2.430,99.670,0.330,86.58,224,0.900,bicubic
maxvit_base_tf_384.in1k,97.570,2.430,99.590,0.410,119.65,384,1.000,bicubic
maxvit_large_tf_384.in1k,97.570,2.430,99.530,0.470,212.03,384,1.000,bicubic
volo_d3_448.sail_in1k,97.550,2.450,99.560,0.440,86.63,448,1.000,bicubic
vit_base_patch16_clip_384.openai_ft_in1k,97.540,2.460,99.660,0.340,86.86,384,1.000,bicubic
convformer_b36.sail_in1k_384,97.530,2.470,99.520,0.480,99.88,384,1.000,bicubic
vit_base_patch16_clip_224.openai_ft_in12k_in1k,97.530,2.470,99.500,0.500,86.57,224,0.950,bicubic
coatnet_2_rw_224.sw_in12k_ft_in1k,97.520,2.480,99.600,0.400,73.87,224,0.950,bicubic
xcit_large_24_p8_384.fb_dist_in1k,97.520,2.480,99.540,0.460,188.93,384,1.000,bicubic
xcit_large_24_p16_384.fb_dist_in1k,97.520,2.480,99.480,0.520,189.10,384,1.000,bicubic
tf_efficientnet_b5.ns_jft_in1k,97.500,2.500,99.640,0.360,30.39,456,0.934,bicubic
caformer_b36.sail_in1k_384,97.500,2.500,99.580,0.420,98.75,384,1.000,bicubic
resnetv2_152x4_bit.goog_in21k_ft_in1k,97.490,2.510,99.610,0.390,936.53,480,1.000,bilinear
deit3_base_patch16_224.fb_in22k_ft_in1k,97.480,2.520,99.600,0.400,86.59,224,1.000,bicubic
cait_m48_448.fb_dist_in1k,97.480,2.520,99.550,0.450,356.46,448,1.000,bicubic
dm_nfnet_f3.dm_in1k,97.470,2.530,99.560,0.440,254.92,416,0.940,bicubic
tf_efficientnetv2_l.in1k,97.470,2.530,99.530,0.470,118.52,480,1.000,bicubic
regnety_160.lion_in12k_ft_in1k,97.450,2.550,99.600,0.400,83.59,288,1.000,bicubic
regnety_160.sw_in12k_ft_in1k,97.450,2.550,99.590,0.410,83.59,288,1.000,bicubic
vit_base_patch16_clip_224.laion2b_ft_in12k_in1k,97.450,2.550,99.540,0.460,86.57,224,0.950,bicubic
vit_medium_patch16_gap_384.sw_in12k_ft_in1k,97.440,2.560,99.640,0.360,39.03,384,0.950,bicubic
caformer_m36.sail_in1k_384,97.440,2.560,99.600,0.400,56.20,384,1.000,bicubic
maxvit_small_tf_384.in1k,97.430,2.570,99.510,0.490,69.02,384,1.000,bicubic
deit3_large_patch16_384.fb_in1k,97.420,2.580,99.620,0.380,304.76,384,1.000,bicubic
caformer_s18.sail_in22k_ft_in1k_384,97.420,2.580,99.570,0.430,26.34,384,1.000,bicubic
flexivit_large.1200ep_in1k,97.410,2.590,99.600,0.400,304.36,240,0.950,bicubic
efficientnet_b5.sw_in12k_ft_in1k,97.410,2.590,99.550,0.450,30.39,448,1.000,bicubic
convformer_m36.sail_in1k_384,97.410,2.590,99.470,0.530,57.05,384,1.000,bicubic
cait_m36_384.fb_dist_in1k,97.400,2.600,99.510,0.490,271.22,384,1.000,bicubic
caformer_s36.sail_in1k_384,97.390,2.610,99.540,0.460,39.30,384,1.000,bicubic
volo_d5_224.sail_in1k,97.380,2.620,99.570,0.430,295.46,224,0.960,bicubic
resnext101_32x32d.fb_wsl_ig1b_ft_in1k,97.370,2.630,99.680,0.320,468.53,224,0.875,bilinear
convnext_small.fb_in22k_ft_in1k,97.360,2.640,99.530,0.470,50.22,288,1.000,bicubic
vit_base_patch32_clip_384.laion2b_ft_in12k_in1k,97.360,2.640,99.520,0.480,88.30,384,1.000,bicubic
convnext_small.in12k_ft_in1k,97.350,2.650,99.580,0.420,50.22,288,1.000,bicubic
convnext_tiny.in12k_ft_in1k_384,97.340,2.660,99.600,0.400,28.59,384,1.000,bicubic
cait_s36_384.fb_dist_in1k,97.330,2.670,99.540,0.460,68.37,384,1.000,bicubic
volo_d2_384.sail_in1k,97.320,2.680,99.600,0.400,58.87,384,1.000,bicubic
maxvit_tiny_tf_384.in1k,97.310,2.690,99.500,0.500,30.98,384,1.000,bicubic
vit_base_patch32_clip_448.laion2b_ft_in12k_in1k,97.310,2.690,99.480,0.520,88.34,448,1.000,bicubic
flexivit_large.600ep_in1k,97.280,2.720,99.590,0.410,304.36,240,0.950,bicubic
swin_base_patch4_window7_224.ms_in22k_ft_in1k,97.280,2.720,99.540,0.460,87.77,224,0.900,bicubic
regnety_120.sw_in12k_ft_in1k,97.280,2.720,99.530,0.470,51.82,288,1.000,bicubic
volo_d4_224.sail_in1k,97.280,2.720,99.520,0.480,192.96,224,0.960,bicubic
xcit_medium_24_p8_384.fb_dist_in1k,97.280,2.720,99.510,0.490,84.32,384,1.000,bicubic
xcit_medium_24_p16_384.fb_dist_in1k,97.280,2.720,99.470,0.530,84.40,384,1.000,bicubic
convformer_s36.sail_in1k_384,97.280,2.720,99.430,0.570,40.01,384,1.000,bicubic
convformer_s18.sail_in22k_ft_in1k_384,97.270,2.730,99.550,0.450,26.77,384,1.000,bicubic
inception_next_base.sail_in1k_384,97.260,2.740,99.490,0.510,86.67,384,1.000,bicubic
flexivit_large.300ep_in1k,97.250,2.750,99.490,0.510,304.36,240,0.950,bicubic
xcit_small_24_p8_384.fb_dist_in1k,97.240,2.760,99.610,0.390,47.63,384,1.000,bicubic
convnext_base.clip_laion2b_augreg_ft_in1k,97.240,2.760,99.550,0.450,88.59,256,1.000,bicubic
convnextv2_tiny.fcmae_ft_in22k_in1k_384,97.240,2.760,99.520,0.480,28.64,384,1.000,bicubic
xcit_small_12_p8_384.fb_dist_in1k,97.230,2.770,99.480,0.520,26.21,384,1.000,bicubic
convnextv2_base.fcmae_ft_in1k,97.220,2.780,99.540,0.460,88.72,288,1.000,bicubic
regnety_2560.seer_ft_in1k,97.220,2.780,99.520,0.480,"1,282.60",384,1.000,bicubic
resnext101_32x8d.fb_swsl_ig1b_ft_in1k,97.210,2.790,99.570,0.430,88.79,224,0.875,bilinear
tf_efficientnetv2_m.in1k,97.210,2.790,99.530,0.470,54.14,480,1.000,bicubic
tf_efficientnet_b7.ap_in1k,97.200,2.800,99.540,0.460,66.35,600,0.949,bicubic
regnetz_e8.ra3_in1k,97.200,2.800,99.500,0.500,57.70,320,1.000,bicubic
tf_efficientnet_b8.ra_in1k,97.200,2.800,99.500,0.500,87.41,672,0.954,bicubic
tiny_vit_21m_224.dist_in22k_ft_in1k,97.200,2.800,99.490,0.510,21.20,224,0.950,bicubic
vit_base_r50_s16_384.orig_in21k_ft_in1k,97.190,2.810,99.560,0.440,98.95,384,1.000,bicubic
beitv2_base_patch16_224.in1k_ft_in1k,97.170,2.830,99.470,0.530,86.53,224,0.900,bicubic
regnety_320.swag_lc_in1k,97.160,2.840,99.670,0.330,145.05,224,0.965,bicubic
vit_base_patch16_224.augreg2_in21k_ft_in1k,97.150,2.850,99.540,0.460,86.57,224,0.900,bicubic
coat_lite_medium_384.in1k,97.150,2.850,99.450,0.550,44.57,384,1.000,bicubic
eva02_small_patch14_336.mim_in22k_ft_in1k,97.140,2.860,99.470,0.530,22.13,336,1.000,bicubic
deit3_small_patch16_384.fb_in22k_ft_in1k,97.130,2.870,99.510,0.490,22.21,384,1.000,bicubic
vit_base_patch16_clip_224.laion2b_ft_in1k,97.130,2.870,99.460,0.540,86.57,224,1.000,bicubic
xcit_small_24_p16_384.fb_dist_in1k,97.120,2.880,99.450,0.550,47.67,384,1.000,bicubic
tf_efficientnet_b8.ap_in1k,97.110,2.890,99.660,0.340,87.41,672,0.954,bicubic
dm_nfnet_f2.dm_in1k,97.110,2.890,99.510,0.490,193.78,352,0.920,bicubic
vit_base_patch32_clip_384.openai_ft_in12k_in1k,97.110,2.890,99.500,0.500,88.30,384,0.950,bicubic
convnext_large.fb_in1k,97.100,2.900,99.450,0.550,197.77,288,1.000,bicubic
ecaresnet269d.ra2_in1k,97.090,2.910,99.470,0.530,102.09,352,1.000,bicubic
volo_d3_224.sail_in1k,97.090,2.910,99.470,0.530,86.33,224,0.960,bicubic
beit_base_patch16_224.in22k_ft_in22k_in1k,97.080,2.920,99.610,0.390,86.53,224,0.900,bicubic
tf_efficientnet_b6.ap_in1k,97.080,2.920,99.610,0.390,43.04,528,0.942,bicubic
convformer_s36.sail_in22k_ft_in1k,97.080,2.920,99.560,0.440,40.01,224,1.000,bicubic
convnext_tiny.fb_in22k_ft_in1k_384,97.080,2.920,99.510,0.490,28.59,384,1.000,bicubic
eca_nfnet_l2.ra3_in1k,97.080,2.920,99.510,0.490,56.72,384,1.000,bicubic
vit_base_patch16_clip_224.openai_ft_in1k,97.080,2.920,99.490,0.510,86.57,224,0.900,bicubic
caformer_s18.sail_in1k_384,97.080,2.920,99.420,0.580,26.34,384,1.000,bicubic
cait_s24_384.fb_dist_in1k,97.070,2.930,99.430,0.570,47.06,384,1.000,bicubic
xcit_large_24_p8_224.fb_dist_in1k,97.070,2.930,99.420,0.580,188.93,224,1.000,bicubic
convnext_tiny.in12k_ft_in1k,97.060,2.940,99.550,0.450,28.59,288,1.000,bicubic
deit3_base_patch16_384.fb_in1k,97.040,2.960,99.390,0.610,86.88,384,1.000,bicubic
convformer_s18.sail_in1k_384,97.040,2.960,99.380,0.620,26.77,384,1.000,bicubic
hrnet_w48_ssld.paddle_in1k,97.030,2.970,99.640,0.360,77.47,288,1.000,bilinear
dm_nfnet_f1.dm_in1k,97.030,2.970,99.390,0.610,132.63,320,0.910,bicubic
resnetv2_152x2_bit.goog_in21k_ft_in1k,97.000,3.000,99.590,0.410,236.34,448,1.000,bilinear
tf_efficientnet_b7.ra_in1k,97.000,3.000,99.520,0.480,66.35,600,0.949,bicubic
volo_d2_224.sail_in1k,97.000,3.000,99.390,0.610,58.68,224,0.960,bicubic
efficientnetv2_rw_m.agc_in1k,96.980,3.020,99.530,0.470,53.24,416,1.000,bicubic
resnetv2_101x3_bit.goog_in21k_ft_in1k,96.980,3.020,99.490,0.510,387.93,448,1.000,bilinear
caformer_b36.sail_in1k,96.980,3.020,99.340,0.660,98.75,224,1.000,bicubic
deit3_medium_patch16_224.fb_in22k_ft_in1k,96.970,3.030,99.430,0.570,38.85,224,1.000,bicubic
deit_base_distilled_patch16_384.fb_in1k,96.960,3.040,99.480,0.520,87.63,384,1.000,bicubic
seresnextaa101d_32x8d.ah_in1k,96.960,3.040,99.390,0.610,93.59,288,1.000,bicubic
maxvit_large_tf_224.in1k,96.960,3.040,99.250,0.750,211.79,224,0.950,bicubic
tf_efficientnet_b4.ns_jft_in1k,96.950,3.050,99.580,0.420,19.34,380,0.922,bicubic
maxvit_base_tf_224.in1k,96.950,3.050,99.260,0.740,119.47,224,0.950,bicubic
deit3_large_patch16_224.fb_in1k,96.940,3.060,99.340,0.660,304.37,224,0.900,bicubic
davit_base.msft_in1k,96.940,3.060,99.260,0.740,87.95,224,0.950,bicubic
mvitv2_large.fb_in1k,96.930,3.070,99.400,0.600,217.99,224,0.900,bicubic
xcit_small_12_p16_384.fb_dist_in1k,96.920,3.080,99.400,0.600,26.25,384,1.000,bicubic
xcit_medium_24_p8_224.fb_dist_in1k,96.920,3.080,99.390,0.610,84.32,224,1.000,bicubic
volo_d1_384.sail_in1k,96.910,3.090,99.520,0.480,26.78,384,1.000,bicubic
resnetrs420.tf_in1k,96.910,3.090,99.460,0.540,191.89,416,1.000,bicubic
deit3_huge_patch14_224.fb_in1k,96.900,3.100,99.480,0.520,632.13,224,0.900,bicubic
convformer_b36.sail_in1k,96.900,3.100,99.220,0.780,99.88,224,1.000,bicubic
caformer_m36.sail_in1k,96.890,3.110,99.430,0.570,56.20,224,1.000,bicubic
vit_base_patch16_224.augreg_in21k_ft_in1k,96.880,3.120,99.530,0.470,86.57,224,0.900,bicubic
xcit_small_24_p8_224.fb_dist_in1k,96.870,3.130,99.480,0.520,47.63,224,1.000,bicubic
regnety_1280.seer_ft_in1k,96.860,3.140,99.390,0.610,644.81,384,1.000,bicubic
convnextv2_tiny.fcmae_ft_in22k_in1k,96.850,3.150,99.460,0.540,28.64,288,1.000,bicubic
regnety_640.seer_ft_in1k,96.850,3.150,99.420,0.580,281.38,384,1.000,bicubic
rexnetr_300.sw_in12k_ft_in1k,96.840,3.160,99.510,0.490,34.81,288,1.000,bicubic
resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_384,96.830,3.170,99.450,0.550,236.34,384,1.000,bicubic
convnext_base.fb_in1k,96.830,3.170,99.410,0.590,88.59,288,1.000,bicubic
regnety_160.swag_lc_in1k,96.820,3.180,99.650,0.350,83.59,224,0.965,bicubic
resnext101_32x16d.fb_wsl_ig1b_ft_in1k,96.810,3.190,99.600,0.400,194.03,224,0.875,bilinear
maxxvit_rmlp_small_rw_256.sw_in1k,96.800,3.200,99.380,0.620,66.01,256,0.950,bicubic
xcit_large_24_p16_224.fb_dist_in1k,96.800,3.200,99.350,0.650,189.10,224,1.000,bicubic
vit_large_r50_s32_224.augreg_in21k_ft_in1k,96.790,3.210,99.340,0.660,328.99,224,0.900,bicubic
fastvit_ma36.apple_dist_in1k,96.780,3.220,99.330,0.670,44.07,256,0.950,bicubic
seresnet152d.ra2_in1k,96.770,3.230,99.440,0.560,66.84,320,1.000,bicubic
seresnext101_32x8d.ah_in1k,96.770,3.230,99.350,0.650,93.57,288,1.000,bicubic
mvitv2_base.fb_in1k,96.760,3.240,99.260,0.740,51.47,224,0.900,bicubic
resnetrs350.tf_in1k,96.750,3.250,99.370,0.630,163.96,384,1.000,bicubic
swinv2_base_window16_256.ms_in1k,96.750,3.250,99.350,0.650,87.92,256,0.900,bicubic
flexivit_base.1200ep_in1k,96.740,3.260,99.360,0.640,86.59,240,0.950,bicubic
edgenext_base.in21k_ft_in1k,96.730,3.270,99.420,0.580,18.51,320,1.000,bicubic
tf_efficientnetv2_s.in21k_ft_in1k,96.730,3.270,99.420,0.580,21.46,384,1.000,bicubic
resnet200d.ra2_in1k,96.730,3.270,99.330,0.670,64.69,320,1.000,bicubic
vit_base_patch16_384.orig_in21k_ft_in1k,96.720,3.280,99.500,0.500,86.86,384,1.000,bicubic
regnetz_040.ra3_in1k,96.720,3.280,99.480,0.520,27.12,320,1.000,bicubic
resnetv2_50x3_bit.goog_in21k_ft_in1k,96.710,3.290,99.550,0.450,217.32,448,1.000,bilinear
caformer_s18.sail_in22k_ft_in1k,96.710,3.290,99.490,0.510,26.34,224,1.000,bicubic
regnetz_040_h.ra3_in1k,96.700,3.300,99.500,0.500,28.94,320,1.000,bicubic
vit_small_patch16_384.augreg_in21k_ft_in1k,96.700,3.300,99.480,0.520,22.20,384,1.000,bicubic
edgenext_base.usi_in1k,96.700,3.300,99.430,0.570,18.51,320,1.000,bicubic
xcit_small_12_p8_224.fb_dist_in1k,96.700,3.300,99.380,0.620,26.21,224,1.000,bicubic
resnetrs200.tf_in1k,96.700,3.300,99.370,0.630,93.21,320,1.000,bicubic
resnetaa101d.sw_in12k_ft_in1k,96.700,3.300,99.360,0.640,44.57,288,1.000,bicubic
seresnext101d_32x8d.ah_in1k,96.700,3.300,99.360,0.640,93.59,288,1.000,bicubic
eca_nfnet_l1.ra2_in1k,96.700,3.300,99.290,0.710,41.41,320,1.000,bicubic
repvgg_d2se.rvgg_in1k,96.690,3.310,99.370,0.630,133.33,320,1.000,bilinear
caformer_s36.sail_in1k,96.690,3.310,99.360,0.640,39.30,224,1.000,bicubic
maxvit_small_tf_224.in1k,96.690,3.310,99.360,0.640,68.93,224,0.950,bicubic
resnetrs270.tf_in1k,96.690,3.310,99.350,0.650,129.86,352,1.000,bicubic
vit_small_r26_s32_384.augreg_in21k_ft_in1k,96.680,3.320,99.580,0.420,36.47,384,1.000,bicubic
tf_efficientnet_b5.ap_in1k,96.680,3.320,99.460,0.540,30.39,456,0.934,bicubic
convformer_m36.sail_in1k,96.680,3.320,99.080,0.920,57.05,224,1.000,bicubic
vit_medium_patch16_gap_256.sw_in12k_ft_in1k,96.670,3.330,99.490,0.510,38.86,256,0.950,bicubic
tf_efficientnet_b6.aa_in1k,96.670,3.330,99.370,0.630,43.04,528,0.942,bicubic
deit3_small_patch16_224.fb_in22k_ft_in1k,96.660,3.340,99.330,0.670,22.06,224,1.000,bicubic
flexivit_base.600ep_in1k,96.650,3.350,99.330,0.670,86.59,240,0.950,bicubic
davit_small.msft_in1k,96.630,3.370,99.350,0.650,49.75,224,0.950,bicubic
convformer_s18.sail_in22k_ft_in1k,96.630,3.370,99.340,0.660,26.77,224,1.000,bicubic
efficientvit_b3.r288_in1k,96.630,3.370,99.220,0.780,48.65,288,1.000,bicubic
coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k,96.630,3.370,99.160,0.840,41.72,224,0.950,bicubic
resmlp_big_24_224.fb_in22k_ft_in1k,96.620,3.380,99.510,0.490,129.14,224,0.875,bicubic
regnetz_d8.ra3_in1k,96.620,3.380,99.450,0.550,23.37,320,1.000,bicubic
resnest200e.in1k,96.610,3.390,99.350,0.650,70.20,320,0.909,bicubic
resnext101_32x16d.fb_swsl_ig1b_ft_in1k,96.600,3.400,99.530,0.470,194.03,224,0.875,bilinear
flexivit_base.300ep_in1k,96.600,3.400,99.270,0.730,86.59,240,0.950,bicubic
xcit_medium_24_p16_224.fb_dist_in1k,96.600,3.400,99.270,0.730,84.40,224,1.000,bicubic
regnetz_d32.ra3_in1k,96.590,3.410,99.380,0.620,27.58,320,0.950,bicubic
swin_base_patch4_window12_384.ms_in1k,96.580,3.420,99.250,0.750,87.90,384,1.000,bicubic
resnetrs152.tf_in1k,96.580,3.420,99.240,0.760,86.62,320,1.000,bicubic
convformer_s36.sail_in1k,96.580,3.420,99.170,0.830,40.01,224,1.000,bicubic
maxvit_rmlp_small_rw_224.sw_in1k,96.580,3.420,99.120,0.880,64.90,224,0.900,bicubic
regnetz_d8_evos.ch_in1k,96.570,3.430,99.460,0.540,23.46,320,1.000,bicubic
gcvit_base.in1k,96.560,3.440,99.230,0.770,90.32,224,0.875,bicubic
focalnet_base_srf.ms_in1k,96.560,3.440,99.150,0.850,88.15,224,0.900,bicubic
inception_next_base.sail_in1k,96.560,3.440,99.080,0.920,86.67,224,0.950,bicubic
cait_xs24_384.fb_dist_in1k,96.540,3.460,99.420,0.580,26.67,384,1.000,bicubic
efficientnetv2_rw_s.ra2_in1k,96.540,3.460,99.360,0.640,23.94,384,1.000,bicubic
tf_efficientnet_b7.aa_in1k,96.540,3.460,99.300,0.700,66.35,600,0.949,bicubic
crossvit_18_dagger_408.in1k,96.540,3.460,99.260,0.740,44.61,408,1.000,bicubic
coatnet_rmlp_2_rw_224.sw_in1k,96.540,3.460,99.100,0.900,73.88,224,0.950,bicubic
regnety_080.ra3_in1k,96.530,3.470,99.320,0.680,39.18,288,1.000,bicubic
xcit_tiny_24_p8_384.fb_dist_in1k,96.530,3.470,99.320,0.680,12.11,384,1.000,bicubic
swinv2_base_window8_256.ms_in1k,96.530,3.470,99.270,0.730,87.92,256,0.900,bicubic
convnext_small.fb_in1k,96.520,3.480,99.340,0.660,50.22,288,1.000,bicubic
resnest269e.in1k,96.510,3.490,99.350,0.650,110.93,416,0.928,bicubic
vit_base_patch32_384.augreg_in21k_ft_in1k,96.490,3.510,99.410,0.590,88.30,384,1.000,bicubic
swin_small_patch4_window7_224.ms_in22k_ft_in1k,96.480,3.520,99.390,0.610,49.61,224,0.900,bicubic
fastvit_ma36.apple_in1k,96.470,3.530,99.280,0.720,44.07,256,0.950,bicubic
tf_efficientnet_b5.aa_in1k,96.470,3.530,99.240,0.760,30.39,456,0.934,bicubic
swinv2_small_window16_256.ms_in1k,96.470,3.530,99.200,0.800,49.73,256,0.900,bicubic
coat_lite_medium.in1k,96.470,3.530,99.150,0.850,44.57,224,0.900,bicubic
cs3se_edgenet_x.c2ns_in1k,96.450,3.550,99.400,0.600,50.72,320,1.000,bicubic
resmlp_big_24_224.fb_distilled_in1k,96.450,3.550,99.310,0.690,129.14,224,0.875,bicubic
vit_base_patch16_224_miil.in21k_ft_in1k,96.450,3.550,99.300,0.700,86.54,224,0.875,bilinear
focalnet_base_lrf.ms_in1k,96.450,3.550,99.120,0.880,88.75,224,0.900,bicubic
resnext101_32x4d.fb_swsl_ig1b_ft_in1k,96.420,3.580,99.470,0.530,44.18,224,0.875,bilinear
maxvit_rmlp_tiny_rw_256.sw_in1k,96.420,3.580,99.380,0.620,29.15,256,0.950,bicubic
cait_s24_224.fb_dist_in1k,96.420,3.580,99.150,0.850,46.92,224,1.000,bicubic
regnetv_064.ra3_in1k,96.410,3.590,99.360,0.640,30.58,288,1.000,bicubic
xcit_small_24_p8_224.fb_in1k,96.410,3.590,99.150,0.850,47.63,224,1.000,bicubic
xcit_large_24_p8_224.fb_in1k,96.400,3.600,98.990,1.010,188.93,224,1.000,bicubic
resnet152d.ra2_in1k,96.390,3.610,99.390,0.610,60.21,320,1.000,bicubic
tf_efficientnet_b3.ns_jft_in1k,96.390,3.610,99.350,0.650,12.23,300,0.904,bicubic
crossvit_15_dagger_408.in1k,96.390,3.610,99.160,0.840,28.50,408,1.000,bicubic
convnextv2_nano.fcmae_ft_in22k_in1k_384,96.370,3.630,99.400,0.600,15.62,384,1.000,bicubic
mvitv2_small.fb_in1k,96.370,3.630,99.200,0.800,34.87,224,0.900,bicubic
xception65.ra3_in1k,96.360,3.640,99.240,0.760,39.92,299,0.940,bicubic
fastvit_sa36.apple_dist_in1k,96.360,3.640,99.230,0.770,31.53,256,0.900,bicubic
regnety_064.ra3_in1k,96.360,3.640,99.230,0.770,30.58,288,1.000,bicubic
pvt_v2_b5.in1k,96.360,3.640,99.170,0.830,81.96,224,0.900,bicubic
regnety_160.deit_in1k,96.350,3.650,99.330,0.670,83.59,288,1.000,bicubic
pvt_v2_b4.in1k,96.350,3.650,99.180,0.820,62.56,224,0.900,bicubic
regnety_320.seer_ft_in1k,96.340,3.660,99.350,0.650,145.05,384,1.000,bicubic
tf_efficientnet_b5.ra_in1k,96.340,3.660,99.310,0.690,30.39,456,0.934,bicubic
tf_efficientnetv2_s.in1k,96.340,3.660,99.200,0.800,21.46,384,1.000,bicubic
resnext101_32x8d.fb_wsl_ig1b_ft_in1k,96.330,3.670,99.430,0.570,88.79,224,0.875,bilinear
repvit_m2_3.dist_450e_in1k,96.330,3.670,99.400,0.600,23.69,224,0.950,bicubic
volo_d1_224.sail_in1k,96.320,3.680,99.310,0.690,26.63,224,0.960,bicubic
dm_nfnet_f0.dm_in1k,96.310,3.690,99.320,0.680,71.49,256,0.900,bicubic
deit3_base_patch16_224.fb_in1k,96.300,3.700,99.180,0.820,86.59,224,0.900,bicubic
resnet101d.ra2_in1k,96.290,3.710,99.230,0.770,44.57,320,1.000,bicubic
tiny_vit_11m_224.dist_in22k_ft_in1k,96.290,3.710,99.190,0.810,11.00,224,0.950,bicubic
efficientvit_b3.r256_in1k,96.290,3.710,99.120,0.880,48.65,256,1.000,bicubic
gcvit_small.in1k,96.280,3.720,99.140,0.860,51.09,224,0.875,bicubic
swinv2_small_window8_256.ms_in1k,96.270,3.730,99.210,0.790,49.73,256,0.900,bicubic
inception_next_small.sail_in1k,96.240,3.760,99.220,0.780,49.37,224,0.875,bicubic
nest_base_jx.goog_in1k,96.240,3.760,99.200,0.800,67.72,224,0.875,bicubic
fastvit_sa36.apple_in1k,96.240,3.760,99.190,0.810,31.53,256,0.900,bicubic
twins_svt_large.in1k,96.240,3.760,99.170,0.830,99.27,224,0.900,bicubic
swin_s3_base_224.ms_in1k,96.240,3.760,99.150,0.850,71.13,224,0.900,bicubic
maxvit_tiny_rw_224.sw_in1k,96.240,3.760,99.130,0.870,29.06,224,0.950,bicubic
pit_b_distilled_224.in1k,96.230,3.770,99.110,0.890,74.79,224,0.900,bicubic
swin_s3_small_224.ms_in1k,96.230,3.770,99.080,0.920,49.74,224,0.900,bicubic
ecaresnet101d.miil_in1k,96.220,3.780,99.310,0.690,44.57,288,0.950,bicubic
tf_efficientnetv2_b3.in21k_ft_in1k,96.220,3.780,99.230,0.770,14.36,300,0.900,bicubic
xcit_small_24_p16_224.fb_dist_in1k,96.220,3.780,99.210,0.790,47.67,224,1.000,bicubic
xception65p.ra3_in1k,96.210,3.790,99.180,0.820,39.82,299,0.940,bicubic
deit3_small_patch16_384.fb_in1k,96.200,3.800,99.290,0.710,22.21,384,1.000,bicubic
rexnetr_200.sw_in12k_ft_in1k,96.200,3.800,99.260,0.740,16.52,288,1.000,bicubic
resnet152.a1h_in1k,96.200,3.800,99.220,0.780,60.19,288,1.000,bicubic
regnetv_040.ra3_in1k,96.190,3.810,99.330,0.670,20.64,288,1.000,bicubic
convnextv2_tiny.fcmae_ft_in1k,96.190,3.810,99.250,0.750,28.64,288,1.000,bicubic
gcvit_tiny.in1k,96.180,3.820,99.230,0.770,28.22,224,0.875,bicubic
focalnet_small_lrf.ms_in1k,96.180,3.820,99.190,0.810,50.34,224,0.900,bicubic
swinv2_cr_small_ns_224.sw_in1k,96.180,3.820,99.140,0.860,49.70,224,0.900,bicubic
mobilevitv2_175.cvnets_in22k_ft_in1k_384,96.180,3.820,99.120,0.880,14.25,384,1.000,bicubic
tf_efficientnet_b4.ap_in1k,96.160,3.840,99.270,0.730,19.34,380,0.922,bicubic
tresnet_v2_l.miil_in21k_ft_in1k,96.160,3.840,99.240,0.760,46.17,224,0.875,bilinear
deit_base_patch16_384.fb_in1k,96.160,3.840,99.140,0.860,86.86,384,1.000,bicubic
twins_svt_base.in1k,96.160,3.840,99.050,0.950,56.07,224,0.900,bicubic
fastvit_sa24.apple_dist_in1k,96.150,3.850,99.210,0.790,21.55,256,0.900,bicubic
efficientnet_b4.ra2_in1k,96.150,3.850,99.190,0.810,19.34,384,1.000,bicubic
sequencer2d_l.in1k,96.150,3.850,99.160,0.840,54.30,224,0.875,bicubic
regnetz_c16_evos.ch_in1k,96.140,3.860,99.360,0.640,13.49,320,0.950,bicubic
twins_pcpvt_large.in1k,96.140,3.860,99.170,0.830,60.99,224,0.900,bicubic
caformer_s18.sail_in1k,96.140,3.860,99.000,1.000,26.34,224,1.000,bicubic
vit_base_patch32_clip_224.laion2b_ft_in12k_in1k,96.130,3.870,99.220,0.780,88.22,224,0.900,bicubic
tiny_vit_21m_224.in1k,96.130,3.870,99.160,0.840,21.20,224,0.950,bicubic
repvit_m2_3.dist_300e_in1k,96.120,3.880,99.340,0.660,23.69,224,0.950,bicubic
resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k,96.120,3.880,99.270,0.730,236.34,224,0.875,bicubic
nfnet_l0.ra2_in1k,96.120,3.880,99.240,0.760,35.07,288,1.000,bicubic
swin_base_patch4_window7_224.ms_in1k,96.120,3.880,99.060,0.940,87.77,224,0.900,bicubic
resnetv2_50x1_bit.goog_distilled_in1k,96.110,3.890,99.280,0.720,25.55,224,0.875,bicubic
efficientformer_l7.snap_dist_in1k,96.110,3.890,99.270,0.730,82.23,224,0.950,bicubic
xcit_small_12_p8_224.fb_in1k,96.110,3.890,99.160,0.840,26.21,224,1.000,bicubic
resnetv2_101x1_bit.goog_in21k_ft_in1k,96.100,3.900,99.280,0.720,44.54,448,1.000,bilinear
maxvit_tiny_tf_224.in1k,96.100,3.900,99.270,0.730,30.92,224,0.950,bicubic
deit_base_distilled_patch16_224.fb_in1k,96.090,3.910,99.190,0.810,87.34,224,0.900,bicubic
xcit_medium_24_p8_224.fb_in1k,96.090,3.910,98.890,1.110,84.32,224,1.000,bicubic
resnext101_64x4d.c1_in1k,96.080,3.920,99.240,0.760,83.46,288,1.000,bicubic
regnety_320.tv2_in1k,96.080,3.920,99.230,0.770,145.05,224,0.965,bicubic
deit3_medium_patch16_224.fb_in1k,96.080,3.920,99.200,0.800,38.85,224,0.900,bicubic
xcit_tiny_12_p8_384.fb_dist_in1k,96.080,3.920,99.140,0.860,6.71,384,1.000,bicubic
swinv2_cr_small_224.sw_in1k,96.080,3.920,98.860,1.140,49.70,224,0.900,bicubic
tf_efficientnet_b5.in1k,96.070,3.930,99.290,0.710,30.39,456,0.934,bicubic
efficientformerv2_l.snap_dist_in1k,96.070,3.930,99.190,0.810,26.32,224,0.950,bicubic
focalnet_small_srf.ms_in1k,96.070,3.930,99.120,0.880,49.89,224,0.900,bicubic
convnextv2_nano.fcmae_ft_in22k_in1k,96.060,3.940,99.220,0.780,15.62,288,1.000,bicubic
mobilevitv2_200.cvnets_in22k_ft_in1k_384,96.060,3.940,99.080,0.920,18.45,384,1.000,bicubic
resnetv2_101.a1h_in1k,96.050,3.950,99.170,0.830,44.54,288,1.000,bicubic
cs3edgenet_x.c2_in1k,96.050,3.950,99.140,0.860,47.82,288,1.000,bicubic
maxxvit_rmlp_nano_rw_256.sw_in1k,96.040,3.960,99.260,0.740,16.78,256,0.950,bicubic
resnext101_64x4d.tv_in1k,96.030,3.970,99.160,0.840,83.46,224,0.875,bilinear
xcit_small_12_p16_224.fb_dist_in1k,96.030,3.970,99.130,0.870,26.25,224,1.000,bicubic
coatnet_1_rw_224.sw_in1k,96.030,3.970,99.060,0.940,41.72,224,0.950,bicubic
efficientvit_b3.r224_in1k,96.030,3.970,98.990,1.010,48.65,224,0.950,bicubic
regnety_040.ra3_in1k,96.020,3.980,99.190,0.810,20.65,288,1.000,bicubic
resnet101.a1h_in1k,96.020,3.980,99.140,0.860,44.55,288,1.000,bicubic
cs3sedarknet_x.c2ns_in1k,96.020,3.980,99.110,0.890,35.40,288,1.000,bicubic
convnext_tiny_hnf.a2h_in1k,96.020,3.980,99.070,0.930,28.59,288,1.000,bicubic
hrnet_w18_ssld.paddle_in1k,95.990,4.010,99.320,0.680,21.30,288,1.000,bilinear
convnext_nano.in12k_ft_in1k,95.990,4.010,99.310,0.690,15.59,288,1.000,bicubic
pvt_v2_b3.in1k,95.990,4.010,99.190,0.810,45.24,224,0.900,bicubic
regnetx_320.tv2_in1k,95.990,4.010,99.100,0.900,107.81,224,0.965,bicubic
tresnet_xl.miil_in1k_448,95.980,4.020,99.130,0.870,78.44,448,0.875,bilinear
sequencer2d_s.in1k,95.980,4.020,99.050,0.950,27.65,224,0.875,bicubic
regnety_160.tv2_in1k,95.970,4.030,99.150,0.850,83.59,224,0.965,bicubic
nest_small_jx.goog_in1k,95.970,4.030,99.030,0.970,38.35,224,0.875,bicubic
maxvit_rmlp_nano_rw_256.sw_in1k,95.970,4.030,98.970,1.030,15.50,256,0.950,bicubic
efficientvit_b2.r288_in1k,95.960,4.040,99.190,0.810,24.33,288,1.000,bicubic
regnety_032.ra_in1k,95.960,4.040,99.190,0.810,19.44,288,1.000,bicubic
coatnet_rmlp_1_rw_224.sw_in1k,95.960,4.040,99.160,0.840,41.69,224,0.950,bicubic
resnext101_32x8d.tv2_in1k,95.950,4.050,99.080,0.920,88.79,224,0.965,bilinear
convformer_s18.sail_in1k,95.950,4.050,98.900,1.100,26.77,224,1.000,bicubic
xcit_tiny_24_p16_384.fb_dist_in1k,95.940,4.060,99.220,0.780,12.12,384,1.000,bicubic
eca_nfnet_l0.ra2_in1k,95.940,4.060,99.210,0.790,24.14,288,1.000,bicubic
regnetz_c16.ra3_in1k,95.940,4.060,99.110,0.890,13.46,320,1.000,bicubic
swinv2_tiny_window16_256.ms_in1k,95.930,4.070,99.150,0.850,28.35,256,0.900,bicubic
swin_small_patch4_window7_224.ms_in1k,95.930,4.070,99.020,0.980,49.61,224,0.900,bicubic
fastvit_sa24.apple_in1k,95.920,4.080,99.160,0.840,21.55,256,0.900,bicubic
maxvit_nano_rw_256.sw_in1k,95.920,4.080,99.010,0.990,15.45,256,0.950,bicubic
coat_small.in1k,95.910,4.090,99.150,0.850,21.69,224,0.900,bicubic
tf_efficientnet_b4.aa_in1k,95.900,4.100,99.170,0.830,19.34,380,0.922,bicubic
repvit_m1_5.dist_450e_in1k,95.900,4.100,99.120,0.880,14.64,224,0.950,bicubic
maxxvitv2_nano_rw_256.sw_in1k,95.900,4.100,99.050,0.950,23.70,256,0.950,bicubic
regnetx_160.tv2_in1k,95.880,4.120,99.090,0.910,54.28,224,0.965,bicubic
resnet51q.ra2_in1k,95.870,4.130,99.130,0.870,35.70,288,1.000,bilinear
resnext50_32x4d.fb_swsl_ig1b_ft_in1k,95.860,4.140,99.250,0.750,25.03,224,0.875,bilinear
resnest101e.in1k,95.860,4.140,99.200,0.800,48.28,256,0.875,bilinear
tresnet_l.miil_in1k_448,95.860,4.140,99.120,0.880,55.99,448,0.875,bilinear
regnety_080_tv.tv2_in1k,95.860,4.140,99.100,0.900,39.38,224,0.965,bicubic
mvitv2_tiny.fb_in1k,95.860,4.140,99.070,0.930,24.17,224,0.900,bicubic
cs3darknet_x.c2ns_in1k,95.850,4.150,99.170,0.830,35.05,288,1.000,bicubic
rexnet_300.nav_in1k,95.840,4.160,99.130,0.870,34.71,224,0.875,bicubic
resnetaa50d.sw_in12k_ft_in1k,95.830,4.170,99.170,0.830,25.58,288,1.000,bicubic
vit_large_patch32_384.orig_in21k_ft_in1k,95.830,4.170,99.150,0.850,306.63,384,1.000,bicubic
cait_xxs36_384.fb_dist_in1k,95.830,4.170,99.090,0.910,17.37,384,1.000,bicubic
tf_efficientnet_b4.in1k,95.820,4.180,99.050,0.950,19.34,380,0.922,bicubic
xcit_tiny_24_p8_224.fb_dist_in1k,95.810,4.190,99.210,0.790,12.11,224,1.000,bicubic
sequencer2d_m.in1k,95.810,4.190,99.110,0.890,38.31,224,0.875,bicubic
convnextv2_nano.fcmae_ft_in1k,95.800,4.200,99.090,0.910,15.62,288,1.000,bicubic
resnext101_32x16d.fb_ssl_yfcc100m_ft_in1k,95.790,4.210,99.180,0.820,194.03,224,0.875,bilinear
convnext_tiny.fb_in1k,95.790,4.210,99.160,0.840,28.59,288,1.000,bicubic
twins_pcpvt_base.in1k,95.790,4.210,99.130,0.870,43.83,224,0.900,bicubic
resnet61q.ra2_in1k,95.780,4.220,98.990,1.010,36.85,288,1.000,bicubic
tf_efficientnet_b2.ns_jft_in1k,95.770,4.230,99.120,0.880,9.11,260,0.890,bicubic
vit_relpos_base_patch16_clsgap_224.sw_in1k,95.770,4.230,99.040,0.960,86.43,224,0.900,bicubic
poolformerv2_m48.sail_in1k,95.770,4.230,98.980,1.020,73.35,224,1.000,bicubic
ecaresnet101d_pruned.miil_in1k,95.760,4.240,99.180,0.820,24.88,288,0.950,bicubic
seresnext50_32x4d.racm_in1k,95.740,4.260,99.180,0.820,27.56,288,0.950,bicubic
gc_efficientnetv2_rw_t.agc_in1k,95.740,4.260,99.020,0.980,13.68,288,1.000,bicubic
efficientnet_b3.ra2_in1k,95.720,4.280,99.040,0.960,12.23,320,1.000,bicubic
tresnet_m.miil_in21k_ft_in1k,95.710,4.290,99.030,0.970,31.39,224,0.875,bilinear
pnasnet5large.tf_in1k,95.710,4.290,98.920,1.080,86.06,331,0.911,bicubic
coatnet_bn_0_rw_224.sw_in1k,95.700,4.300,99.050,0.950,27.44,224,0.950,bicubic
mobilevitv2_150.cvnets_in22k_ft_in1k_384,95.690,4.310,99.140,0.860,10.59,384,1.000,bicubic
nasnetalarge.tf_in1k,95.690,4.310,98.930,1.070,88.75,331,0.911,bicubic
crossvit_15_dagger_240.in1k,95.680,4.320,98.830,1.170,28.21,240,0.875,bicubic
flexivit_small.600ep_in1k,95.670,4.330,99.060,0.940,22.06,240,0.950,bicubic
xcit_tiny_24_p8_224.fb_in1k,95.660,4.340,99.050,0.950,12.11,224,1.000,bicubic
davit_tiny.msft_in1k,95.660,4.340,99.030,0.970,28.36,224,0.950,bicubic
efficientvit_b2.r256_in1k,95.650,4.350,99.060,0.940,24.33,256,1.000,bicubic
repvit_m1_5.dist_300e_in1k,95.640,4.360,98.990,1.010,14.64,224,0.950,bicubic
wide_resnet50_2.racm_in1k,95.630,4.370,99.240,0.760,68.88,288,0.950,bicubic
vit_small_r26_s32_224.augreg_in21k_ft_in1k,95.630,4.370,99.190,0.810,36.43,224,0.900,bicubic
resnetv2_50d_evos.ah_in1k,95.630,4.370,99.110,0.890,25.59,288,1.000,bicubic
poolformer_m48.sail_in1k,95.630,4.370,98.940,1.060,73.47,224,0.950,bicubic
pit_b_224.in1k,95.630,4.370,98.660,1.340,73.76,224,0.900,bicubic
efficientformer_l3.snap_dist_in1k,95.590,4.410,99.160,0.840,31.41,224,0.950,bicubic
efficientnetv2_rw_t.ra2_in1k,95.590,4.410,99.070,0.930,13.65,288,1.000,bicubic
gcvit_xtiny.in1k,95.590,4.410,99.040,0.960,19.98,224,0.875,bicubic
crossvit_18_dagger_240.in1k,95.570,4.430,99.060,0.940,44.27,240,0.875,bicubic
vit_relpos_base_patch16_224.sw_in1k,95.560,4.440,99.030,0.970,86.43,224,0.900,bicubic
pvt_v2_b2_li.in1k,95.560,4.440,98.990,1.010,22.55,224,0.900,bicubic
flexivit_small.1200ep_in1k,95.550,4.450,99.110,0.890,22.06,240,0.950,bicubic
convit_base.fb_in1k,95.550,4.450,98.880,1.120,86.54,224,0.875,bicubic
wide_resnet101_2.tv2_in1k,95.540,4.460,99.080,0.920,126.89,224,0.965,bilinear
coat_lite_small.in1k,95.540,4.460,98.860,1.140,19.84,224,0.900,bicubic
xcit_small_24_p16_224.fb_in1k,95.540,4.460,98.780,1.220,47.67,224,1.000,bicubic
xcit_medium_24_p16_224.fb_in1k,95.540,4.460,98.720,1.280,84.40,224,1.000,bicubic
levit_384.fb_dist_in1k,95.530,4.470,99.060,0.940,39.13,224,0.900,bicubic
levit_conv_384.fb_dist_in1k,95.530,4.470,99.050,0.950,39.13,224,0.900,bicubic
ecaresnet50t.ra2_in1k,95.520,4.480,99.110,0.890,25.57,320,0.950,bicubic
fbnetv3_g.ra2_in1k,95.520,4.480,98.990,1.010,16.62,288,0.950,bilinear
vit_base_patch32_clip_224.laion2b_ft_in1k,95.520,4.480,98.870,1.130,88.22,224,0.900,bicubic
resnet101.a1_in1k,95.520,4.480,98.850,1.150,44.55,288,1.000,bicubic
crossvit_base_240.in1k,95.520,4.480,98.810,1.190,105.03,240,0.875,bicubic
swin_tiny_patch4_window7_224.ms_in22k_ft_in1k,95.510,4.490,99.200,0.800,28.29,224,0.900,bicubic
resnext101_32x8d.fb_ssl_yfcc100m_ft_in1k,95.510,4.490,99.120,0.880,88.79,224,0.875,bilinear
xception41p.ra3_in1k,95.510,4.490,98.910,1.090,26.91,299,0.940,bicubic
focalnet_tiny_srf.ms_in1k,95.500,4.500,99.130,0.870,28.43,224,0.900,bicubic
vit_relpos_medium_patch16_rpn_224.sw_in1k,95.500,4.500,99.080,0.920,38.73,224,0.900,bicubic
resnet152.tv2_in1k,95.500,4.500,98.960,1.040,60.19,224,0.965,bilinear
resnet152.a1_in1k,95.500,4.500,98.780,1.220,60.19,288,1.000,bicubic
swinv2_tiny_window8_256.ms_in1k,95.490,4.510,99.100,0.900,28.35,256,0.900,bicubic
tiny_vit_11m_224.in1k,95.490,4.510,98.990,1.010,11.00,224,0.950,bicubic
flexivit_small.300ep_in1k,95.490,4.510,98.960,1.040,22.06,240,0.950,bicubic
resnet152.a2_in1k,95.490,4.510,98.790,1.210,60.19,288,1.000,bicubic
pvt_v2_b2.in1k,95.480,4.520,99.000,1.000,25.36,224,0.900,bicubic
resnetv2_50d_gn.ah_in1k,95.480,4.520,98.950,1.050,25.57,288,1.000,bicubic
visformer_small.in1k,95.480,4.520,98.900,1.100,40.22,224,0.900,bicubic
vit_relpos_medium_patch16_cls_224.sw_in1k,95.470,4.530,98.950,1.050,38.76,224,0.900,bicubic
inception_next_tiny.sail_in1k,95.460,4.540,99.010,0.990,28.06,224,0.875,bicubic
vit_relpos_medium_patch16_224.sw_in1k,95.460,4.540,98.960,1.040,38.75,224,0.900,bicubic
focalnet_tiny_lrf.ms_in1k,95.460,4.540,98.910,1.090,28.65,224,0.900,bicubic
ecaresnet50d.miil_in1k,95.450,4.550,99.090,0.910,25.58,288,0.950,bicubic
deit_base_patch16_224.fb_in1k,95.450,4.550,98.840,1.160,86.57,224,0.900,bicubic
resnext50_32x4d.a1h_in1k,95.450,4.550,98.840,1.160,25.03,288,1.000,bicubic
resnext101_32x4d.fb_ssl_yfcc100m_ft_in1k,95.440,4.560,99.120,0.880,44.18,224,0.875,bilinear
tresnet_xl.miil_in1k,95.440,4.560,99.050,0.950,78.44,224,0.875,bilinear
crossvit_18_240.in1k,95.440,4.560,98.790,1.210,43.27,240,0.875,bicubic
coatnet_0_rw_224.sw_in1k,95.440,4.560,98.720,1.280,27.44,224,0.950,bicubic
resnetrs101.tf_in1k,95.430,4.570,99.040,0.960,63.62,288,0.940,bicubic
coatnext_nano_rw_224.sw_in1k,95.430,4.570,99.010,0.990,14.70,224,0.900,bicubic
coatnet_rmlp_nano_rw_224.sw_in1k,95.430,4.570,98.990,1.010,15.15,224,0.900,bicubic
xcit_small_12_p16_224.fb_in1k,95.430,4.570,98.840,1.160,26.25,224,1.000,bicubic
xcit_large_24_p16_224.fb_in1k,95.430,4.570,98.630,1.370,189.10,224,1.000,bicubic
halo2botnet50ts_256.a1h_in1k,95.420,4.580,99.020,0.980,22.64,256,0.950,bicubic
ecaresnet50t.a1_in1k,95.410,4.590,99.010,0.990,25.57,288,1.000,bicubic
resnet101.a2_in1k,95.410,4.590,98.940,1.060,44.55,288,1.000,bicubic
resnet50.fb_swsl_ig1b_ft_in1k,95.400,4.600,99.300,0.700,25.56,224,0.875,bilinear
edgenext_small.usi_in1k,95.400,4.600,99.100,0.900,5.59,320,1.000,bicubic
poolformerv2_m36.sail_in1k,95.400,4.600,98.870,1.130,56.08,224,1.000,bicubic
vit_base_patch16_rpn_224.sw_in1k,95.390,4.610,98.940,1.060,86.54,224,0.900,bicubic
poolformer_m36.sail_in1k,95.390,4.610,98.860,1.140,56.17,224,0.950,bicubic
vit_small_patch16_224.augreg_in21k_ft_in1k,95.370,4.630,99.140,0.860,22.05,224,0.900,bicubic
swinv2_cr_tiny_ns_224.sw_in1k,95.370,4.630,98.940,1.060,28.33,224,0.900,bicubic
efficientformerv2_s2.snap_dist_in1k,95.360,4.640,98.930,1.070,12.71,224,0.950,bicubic
ecaresnet50t.a2_in1k,95.350,4.650,98.920,1.080,25.57,288,1.000,bicubic
convnext_nano.d1h_in1k,95.350,4.650,98.860,1.140,15.59,288,1.000,bicubic
vit_base_patch16_224.orig_in21k_ft_in1k,95.340,4.660,99.000,1.000,86.57,224,0.900,bicubic
seresnet50.ra2_in1k,95.330,4.670,99.010,0.990,28.09,288,0.950,bicubic
tf_efficientnet_b3.ap_in1k,95.320,4.680,98.900,1.100,12.23,300,0.904,bicubic
cs3sedarknet_l.c2ns_in1k,95.310,4.690,99.130,0.870,21.91,288,0.950,bicubic
poolformerv2_s36.sail_in1k,95.310,4.690,98.920,1.080,30.79,224,1.000,bicubic
regnety_032.tv2_in1k,95.310,4.690,98.910,1.090,19.44,224,0.965,bicubic
mixer_b16_224.miil_in21k_ft_in1k,95.300,4.700,98.880,1.120,59.88,224,0.875,bilinear
ecaresnetlight.miil_in1k,95.290,4.710,99.030,0.970,30.16,288,0.950,bicubic
vit_small_patch16_384.augreg_in1k,95.290,4.710,99.000,1.000,22.20,384,1.000,bicubic
tresnet_l.miil_in1k,95.280,4.720,99.010,0.990,55.99,224,0.875,bilinear
cait_xxs24_384.fb_dist_in1k,95.280,4.720,98.960,1.040,12.03,384,1.000,bicubic
resnet101.tv2_in1k,95.280,4.720,98.910,1.090,44.55,224,0.965,bilinear
resnet50_gn.a1h_in1k,95.250,4.750,99.000,1.000,25.56,288,0.950,bicubic
vit_srelpos_medium_patch16_224.sw_in1k,95.240,4.760,98.990,1.010,38.74,224,0.900,bicubic
nest_tiny_jx.goog_in1k,95.240,4.760,98.980,1.020,17.06,224,0.875,bicubic
gcresnet50t.ra2_in1k,95.240,4.760,98.910,1.090,25.90,288,1.000,bicubic
coatnet_nano_rw_224.sw_in1k,95.240,4.760,98.870,1.130,15.14,224,0.900,bicubic
convnextv2_pico.fcmae_ft_in1k,95.230,4.770,98.920,1.080,9.07,288,0.950,bicubic
mobilevitv2_175.cvnets_in22k_ft_in1k,95.220,4.780,98.800,1.200,14.25,256,0.888,bicubic
convit_small.fb_in1k,95.210,4.790,98.900,1.100,27.78,224,0.875,bicubic
twins_pcpvt_small.in1k,95.210,4.790,98.880,1.120,24.11,224,0.900,bicubic
repvit_m3.dist_in1k,95.200,4.800,99.090,0.910,10.68,224,0.950,bicubic
resnetaa50.a1h_in1k,95.200,4.800,98.920,1.080,25.56,288,1.000,bicubic
efficientvit_b2.r224_in1k,95.200,4.800,98.820,1.180,24.33,224,0.950,bicubic
twins_svt_small.in1k,95.190,4.810,98.880,1.120,24.06,224,0.900,bicubic
swin_s3_tiny_224.ms_in1k,95.180,4.820,98.950,1.050,28.33,224,0.900,bicubic
regnetz_b16.ra3_in1k,95.170,4.830,99.080,0.920,9.72,288,1.000,bicubic
tf_efficientnet_b1.ns_jft_in1k,95.160,4.840,99.100,0.900,7.79,240,0.882,bicubic
mobilevitv2_200.cvnets_in22k_ft_in1k,95.160,4.840,98.950,1.050,18.45,256,0.888,bicubic
tf_efficientnetv2_b3.in1k,95.160,4.840,98.820,1.180,14.36,300,0.904,bicubic
cs3darknet_focus_l.c2ns_in1k,95.150,4.850,98.960,1.040,21.15,288,0.950,bicubic
vit_relpos_small_patch16_224.sw_in1k,95.150,4.850,98.960,1.040,21.98,224,0.900,bicubic
crossvit_15_240.in1k,95.150,4.850,98.930,1.070,27.53,240,0.875,bicubic
lamhalobotnet50ts_256.a1h_in1k,95.150,4.850,98.880,1.120,22.57,256,0.950,bicubic
fastvit_sa12.apple_dist_in1k,95.150,4.850,98.810,1.190,11.58,256,0.900,bicubic
pit_s_distilled_224.in1k,95.140,4.860,98.890,1.110,24.04,224,0.900,bicubic
mobilevitv2_150.cvnets_in22k_ft_in1k,95.140,4.860,98.860,1.140,10.59,256,0.888,bicubic
swin_tiny_patch4_window7_224.ms_in1k,95.140,4.860,98.850,1.150,28.29,224,0.900,bicubic
halonet50ts.a1h_in1k,95.140,4.860,98.780,1.220,22.73,256,0.940,bicubic
convnext_nano_ols.d1h_in1k,95.140,4.860,98.730,1.270,15.65,288,1.000,bicubic
xcit_tiny_12_p16_384.fb_dist_in1k,95.130,4.870,99.020,0.980,6.72,384,1.000,bicubic
cs3darknet_l.c2ns_in1k,95.120,4.880,98.980,1.020,21.16,288,0.950,bicubic
efficientnet_el.ra_in1k,95.120,4.880,98.970,1.030,10.59,300,0.904,bicubic
vit_base_patch32_clip_224.openai_ft_in1k,95.110,4.890,98.980,1.020,88.22,224,0.900,bicubic
ecaresnet50d_pruned.miil_in1k,95.110,4.890,98.930,1.070,19.94,288,0.950,bicubic
gernet_l.idstcv_in1k,95.110,4.890,98.900,1.100,31.08,256,0.875,bilinear
regnetx_080.tv2_in1k,95.100,4.900,98.830,1.170,39.57,224,0.965,bicubic
xcit_tiny_12_p8_224.fb_dist_in1k,95.090,4.910,98.910,1.090,6.71,224,1.000,bicubic
convmixer_1536_20.in1k,95.080,4.920,99.030,0.970,51.63,224,0.960,bicubic
poolformer_s36.sail_in1k,95.080,4.920,98.910,1.090,30.86,224,0.900,bicubic
legacy_senet154.in1k,95.070,4.930,98.830,1.170,115.09,224,0.875,bilinear
tiny_vit_5m_224.dist_in22k_ft_in1k,95.050,4.950,98.970,1.030,5.39,224,0.950,bicubic
seresnet33ts.ra2_in1k,95.040,4.960,98.900,1.100,19.78,288,1.000,bicubic
tnt_s_patch16_224,95.040,4.960,98.830,1.170,23.76,224,0.900,bicubic
vit_srelpos_small_patch16_224.sw_in1k,95.030,4.970,98.950,1.050,21.97,224,0.900,bicubic
vit_small_patch32_384.augreg_in21k_ft_in1k,95.020,4.980,98.990,1.010,22.92,384,1.000,bicubic
resnet152s.gluon_in1k,95.020,4.980,98.930,1.070,60.32,224,0.875,bicubic
levit_256.fb_dist_in1k,95.020,4.980,98.880,1.120,18.89,224,0.900,bicubic
levit_conv_256.fb_dist_in1k,95.020,4.980,98.880,1.120,18.89,224,0.900,bicubic
resnetv2_50x1_bit.goog_in21k_ft_in1k,95.010,4.990,99.060,0.940,25.55,448,1.000,bilinear
vit_base_patch32_224.augreg_in21k_ft_in1k,95.010,4.990,99.030,0.970,88.22,224,0.900,bicubic
resnet50d.ra2_in1k,95.010,4.990,98.980,1.020,25.58,288,0.950,bicubic
tf_efficientnet_b3.aa_in1k,95.010,4.990,98.910,1.090,12.23,300,0.904,bicubic
tresnet_m.miil_in1k_448,94.990,5.010,98.980,1.020,31.39,448,0.875,bilinear
deit3_small_patch16_224.fb_in1k,94.990,5.010,98.460,1.540,22.06,224,0.900,bicubic
resnet50.d_in1k,94.980,5.020,98.840,1.160,25.56,288,1.000,bicubic
resnest50d_4s2x40d.in1k,94.970,5.030,99.080,0.920,30.42,224,0.875,bicubic
coat_mini.in1k,94.970,5.030,98.780,1.220,10.34,224,0.900,bicubic
rexnet_200.nav_in1k,94.940,5.060,99.010,0.990,16.37,224,0.875,bicubic
vit_base_patch16_384.augreg_in1k,94.940,5.060,98.890,1.110,86.86,384,1.000,bicubic
seresnext101_64x4d.gluon_in1k,94.930,5.070,98.830,1.170,88.23,224,0.875,bicubic
gcresnet33ts.ra2_in1k,94.920,5.080,98.810,1.190,19.88,288,1.000,bicubic
resnet50.c2_in1k,94.920,5.080,98.810,1.190,25.56,288,1.000,bicubic
senet154.gluon_in1k,94.920,5.080,98.760,1.240,115.09,224,0.875,bicubic
eva02_tiny_patch14_336.mim_in22k_ft_in1k,94.910,5.090,98.880,1.120,5.76,336,1.000,bicubic
repvit_m1_1.dist_450e_in1k,94.900,5.100,98.960,1.040,8.80,224,0.950,bicubic
resnext50_32x4d.fb_ssl_yfcc100m_ft_in1k,94.890,5.110,98.870,1.130,25.03,224,0.875,bilinear
mobilevitv2_175.cvnets_in1k,94.890,5.110,98.860,1.140,14.25,256,0.888,bicubic
resmlp_36_224.fb_distilled_in1k,94.890,5.110,98.860,1.140,44.69,224,0.875,bicubic
seresnext101_32x4d.gluon_in1k,94.890,5.110,98.820,1.180,48.96,224,0.875,bicubic
tf_efficientnet_lite4.in1k,94.880,5.120,99.020,0.980,13.01,380,0.920,bilinear
fastvit_sa12.apple_in1k,94.880,5.120,98.890,1.110,11.58,256,0.900,bicubic
wide_resnet50_2.tv2_in1k,94.870,5.130,98.940,1.060,68.88,224,0.965,bilinear
ese_vovnet39b.ra_in1k,94.870,5.130,98.910,1.090,24.57,288,0.950,bicubic
gcresnext50ts.ch_in1k,94.860,5.140,98.860,1.140,15.67,288,1.000,bicubic
resnet50.b1k_in1k,94.860,5.140,98.810,1.190,25.56,288,1.000,bicubic
crossvit_small_240.in1k,94.850,5.150,99.020,0.980,26.86,240,0.875,bicubic
resnest50d.in1k,94.850,5.150,98.880,1.120,27.48,224,0.875,bilinear
resnetv2_50.a1h_in1k,94.850,5.150,98.870,1.130,25.55,288,1.000,bicubic
mobilevitv2_200.cvnets_in1k,94.840,5.160,98.710,1.290,18.45,256,0.888,bicubic
convnext_tiny.fb_in22k_ft_in1k,94.840,5.160,98.530,1.470,28.59,288,1.000,bicubic
fastvit_s12.apple_dist_in1k,94.830,5.170,98.810,1.190,9.47,256,0.900,bicubic
cspresnext50.ra_in1k,94.830,5.170,98.770,1.230,20.57,256,0.887,bilinear
resnext50_32x4d.a1_in1k,94.830,5.170,98.590,1.410,25.03,288,1.000,bicubic
res2net101d.in1k,94.820,5.180,98.770,1.230,45.23,224,0.875,bilinear
resnet50.a1h_in1k,94.770,5.230,98.690,1.310,25.56,224,1.000,bicubic
lambda_resnet50ts.a1h_in1k,94.770,5.230,98.470,1.530,21.54,256,0.950,bicubic
repvit_m1_1.dist_300e_in1k,94.760,5.240,98.930,1.070,8.80,224,0.950,bicubic
convnext_pico.d1_in1k,94.760,5.240,98.710,1.290,9.05,288,0.950,bicubic
sehalonet33ts.ra2_in1k,94.760,5.240,98.570,1.430,13.69,256,0.940,bicubic
resnet50.a1_in1k,94.740,5.260,98.710,1.290,25.56,288,1.000,bicubic
repvit_m2.dist_in1k,94.740,5.260,98.680,1.320,8.80,224,0.950,bicubic
resnest50d_1s4x24d.in1k,94.730,5.270,98.980,1.020,25.68,224,0.875,bicubic
resnet50.c1_in1k,94.730,5.270,98.930,1.070,25.56,288,1.000,bicubic
resnet50.b2k_in1k,94.730,5.270,98.820,1.180,25.56,288,1.000,bicubic
resnet152d.gluon_in1k,94.730,5.270,98.750,1.250,60.21,224,0.875,bicubic
resnet152.a3_in1k,94.730,5.270,98.680,1.320,60.19,224,0.950,bicubic
resnet50d.a1_in1k,94.730,5.270,98.490,1.510,25.58,288,1.000,bicubic
resnet101s.gluon_in1k,94.720,5.280,98.820,1.180,44.67,224,0.875,bicubic
deit_small_distilled_patch16_224.fb_in1k,94.710,5.290,99.030,0.970,22.44,224,0.900,bicubic
resnext50_32x4d.ra_in1k,94.700,5.300,98.760,1.240,25.03,288,0.950,bicubic
xcit_tiny_12_p8_224.fb_in1k,94.690,5.310,98.830,1.170,6.71,224,1.000,bicubic
haloregnetz_b.ra3_in1k,94.690,5.310,98.660,1.340,11.68,224,0.940,bicubic
resmlp_big_24_224.fb_in1k,94.680,5.320,98.500,1.500,129.14,224,0.875,bicubic
edgenext_small_rw.sw_in1k,94.670,5.330,98.780,1.220,7.83,320,1.000,bicubic
resnext101_64x4d.gluon_in1k,94.670,5.330,98.660,1.340,83.46,224,0.875,bicubic
regnetx_032.tv2_in1k,94.660,5.340,98.850,1.150,15.30,224,0.965,bicubic
cspdarknet53.ra_in1k,94.660,5.340,98.800,1.200,27.64,256,0.887,bilinear
seresnet50.a2_in1k,94.660,5.340,98.780,1.220,28.09,288,1.000,bicubic
seresnet50.a1_in1k,94.660,5.340,98.720,1.280,28.09,288,1.000,bicubic
poolformerv2_s24.sail_in1k,94.650,5.350,98.840,1.160,21.34,224,1.000,bicubic
maxvit_rmlp_pico_rw_256.sw_in1k,94.640,5.360,98.810,1.190,7.52,256,0.950,bicubic
resnet50.tv2_in1k,94.640,5.360,98.800,1.200,25.56,224,0.965,bilinear
efficientnet_b3_pruned.in1k,94.630,5.370,98.760,1.240,9.86,300,0.904,bicubic
resnet50.a2_in1k,94.630,5.370,98.660,1.340,25.56,288,1.000,bicubic
eca_resnet33ts.ra2_in1k,94.620,5.380,98.910,1.090,19.68,288,1.000,bicubic
darknet53.c2ns_in1k,94.620,5.380,98.900,1.100,41.61,288,1.000,bicubic
gernet_m.idstcv_in1k,94.620,5.380,98.860,1.140,21.14,224,0.875,bilinear
resnext50_32x4d.tv2_in1k,94.620,5.380,98.780,1.220,25.03,224,0.965,bilinear
convnext_pico_ols.d1_in1k,94.620,5.380,98.760,1.240,9.06,288,1.000,bicubic
efficientnet_b2.ra_in1k,94.620,5.380,98.710,1.290,9.11,288,1.000,bicubic
tresnet_m.miil_in1k,94.620,5.380,98.550,1.450,31.39,224,0.875,bilinear
sebotnet33ts_256.a1h_in1k,94.610,5.390,98.510,1.490,13.70,256,0.940,bicubic
fastvit_t12.apple_dist_in1k,94.590,5.410,98.790,1.210,7.55,256,0.900,bicubic
inception_resnet_v2.tf_in1k,94.580,5.420,98.790,1.210,55.84,299,0.897,bicubic
resnet50d.a2_in1k,94.580,5.420,98.690,1.310,25.58,288,1.000,bicubic
resnext50_32x4d.a2_in1k,94.580,5.420,98.650,1.350,25.03,288,1.000,bicubic
pit_s_224.in1k,94.570,5.430,98.700,1.300,23.46,224,0.900,bicubic
poolformer_s24.sail_in1k,94.560,5.440,98.900,1.100,21.39,224,0.900,bicubic
repvgg_b3.rvgg_in1k,94.550,5.450,98.780,1.220,123.09,224,0.875,bilinear
mobilevitv2_150.cvnets_in1k,94.550,5.450,98.710,1.290,10.59,256,0.888,bicubic
resnext50d_32x4d.bt_in1k,94.550,5.450,98.690,1.310,25.05,288,0.950,bicubic
regnety_320.pycls_in1k,94.540,5.460,98.850,1.150,145.05,224,0.875,bicubic
tf_efficientnet_b3.in1k,94.540,5.460,98.800,1.200,12.23,300,0.904,bicubic
nf_resnet50.ra2_in1k,94.540,5.460,98.790,1.210,25.56,288,0.940,bicubic
xcit_tiny_24_p16_224.fb_dist_in1k,94.540,5.460,98.780,1.220,12.12,224,1.000,bicubic
resnext101_32x4d.gluon_in1k,94.540,5.460,98.630,1.370,44.18,224,0.875,bicubic
repvit_m1_0.dist_450e_in1k,94.530,5.470,98.880,1.120,7.30,224,0.950,bicubic
regnety_016.tv2_in1k,94.520,5.480,98.820,1.180,11.20,224,0.965,bicubic
resnet50.ram_in1k,94.520,5.480,98.650,1.350,25.56,288,0.950,bicubic
repvgg_b3g4.rvgg_in1k,94.510,5.490,98.970,1.030,83.83,224,0.875,bilinear
tf_efficientnet_b2.ap_in1k,94.510,5.490,98.620,1.380,9.11,260,0.890,bicubic
convmixer_768_32.in1k,94.500,5.500,98.860,1.140,21.11,224,0.960,bicubic
efficientvit_b1.r288_in1k,94.490,5.510,98.520,1.480,9.10,288,1.000,bicubic
efficientformer_l1.snap_dist_in1k,94.480,5.520,98.830,1.170,12.29,224,0.950,bicubic
rexnet_150.nav_in1k,94.480,5.520,98.800,1.200,9.73,224,0.875,bicubic
regnety_120.pycls_in1k,94.470,5.530,98.820,1.180,51.82,224,0.875,bicubic
darknetaa53.c2ns_in1k,94.470,5.530,98.770,1.230,36.02,288,1.000,bilinear
resnetblur50.bt_in1k,94.460,5.540,98.840,1.160,25.56,288,0.950,bicubic
resnet50.fb_ssl_yfcc100m_ft_in1k,94.450,5.550,98.920,1.080,25.56,224,0.875,bilinear
resmlp_24_224.fb_distilled_in1k,94.450,5.550,98.770,1.230,30.02,224,0.875,bicubic
regnetx_320.pycls_in1k,94.450,5.550,98.740,1.260,107.81,224,0.875,bicubic
gcvit_xxtiny.in1k,94.420,5.580,98.890,1.110,12.00,224,0.875,bicubic
tf_efficientnetv2_b2.in1k,94.420,5.580,98.580,1.420,10.10,260,0.890,bicubic
tf_efficientnet_el.in1k,94.400,5.600,98.710,1.290,10.59,300,0.904,bicubic
efficientnet_el_pruned.in1k,94.390,5.610,98.740,1.260,10.59,300,0.904,bicubic
tf_efficientnet_b2.aa_in1k,94.380,5.620,98.610,1.390,9.11,260,0.890,bicubic
legacy_seresnext101_32x4d.in1k,94.370,5.630,98.630,1.370,48.96,224,0.875,bilinear
inception_v4.tf_in1k,94.370,5.630,98.580,1.420,42.68,299,0.875,bicubic
regnety_160.pycls_in1k,94.360,5.640,98.860,1.140,83.59,224,0.875,bicubic
deit_small_patch16_224.fb_in1k,94.350,5.650,98.690,1.310,22.05,224,0.900,bicubic
ecaresnet50t.a3_in1k,94.350,5.650,98.670,1.330,25.57,224,0.950,bicubic
dpn107.mx_in1k,94.340,5.660,98.500,1.500,86.92,224,0.875,bicubic
seresnext50_32x4d.gluon_in1k,94.330,5.670,98.610,1.390,27.56,224,0.875,bicubic
ecaresnet26t.ra2_in1k,94.320,5.680,98.720,1.280,16.01,320,0.950,bicubic
resnet50.bt_in1k,94.320,5.680,98.640,1.360,25.56,288,0.950,bicubic
resnetrs50.tf_in1k,94.320,5.680,98.640,1.360,35.69,224,0.910,bicubic
res2net50d.in1k,94.320,5.680,98.530,1.470,25.72,224,0.875,bilinear
repvit_m1_0.dist_300e_in1k,94.300,5.700,98.850,1.150,7.30,224,0.950,bicubic
xception71.tf_in1k,94.300,5.700,98.650,1.350,42.34,299,0.903,bicubic
dpn92.mx_in1k,94.290,5.710,98.750,1.250,37.67,224,0.875,bicubic
cait_xxs36_224.fb_dist_in1k,94.270,5.730,98.710,1.290,17.30,224,1.000,bicubic
tiny_vit_5m_224.in1k,94.240,5.760,98.690,1.310,5.39,224,0.950,bicubic
skresnext50_32x4d.ra_in1k,94.240,5.760,98.460,1.540,27.48,224,0.875,bicubic
regnetx_120.pycls_in1k,94.230,5.770,98.670,1.330,46.11,224,0.875,bicubic
resnet101d.gluon_in1k,94.230,5.770,98.540,1.460,44.57,224,0.875,bicubic
resnet50.ra_in1k,94.210,5.790,98.620,1.380,25.56,288,0.950,bicubic
tf_efficientnet_lite3.in1k,94.200,5.800,98.640,1.360,8.20,300,0.904,bilinear
efficientformerv2_s1.snap_dist_in1k,94.200,5.800,98.630,1.370,6.19,224,0.950,bicubic
resmlp_36_224.fb_in1k,94.190,5.810,98.660,1.340,44.69,224,0.875,bicubic
convnextv2_femto.fcmae_ft_in1k,94.190,5.810,98.610,1.390,5.23,288,0.950,bicubic
mixnet_xl.ra_in1k,94.180,5.820,98.320,1.680,11.90,224,0.875,bicubic
regnety_080.pycls_in1k,94.170,5.830,98.680,1.320,39.18,224,0.875,bicubic
inception_resnet_v2.tf_ens_adv_in1k,94.170,5.830,98.600,1.400,55.84,299,0.897,bicubic
levit_192.fb_dist_in1k,94.170,5.830,98.540,1.460,10.95,224,0.900,bicubic
levit_conv_192.fb_dist_in1k,94.170,5.830,98.540,1.460,10.95,224,0.900,bicubic
resnet152c.gluon_in1k,94.160,5.840,98.640,1.360,60.21,224,0.875,bicubic
dpn98.mx_in1k,94.160,5.840,98.590,1.410,61.57,224,0.875,bicubic
vit_base_patch16_224.sam_in1k,94.150,5.850,98.670,1.330,86.57,224,0.900,bicubic
gmlp_s16_224.ra3_in1k,94.150,5.850,98.500,1.500,19.42,224,0.875,bicubic
regnetx_160.pycls_in1k,94.140,5.860,98.750,1.250,54.28,224,0.875,bicubic
regnety_064.pycls_in1k,94.140,5.860,98.710,1.290,30.58,224,0.875,bicubic
efficientnet_b2_pruned.in1k,94.140,5.860,98.520,1.480,8.31,260,0.890,bicubic
regnetx_016.tv2_in1k,94.130,5.870,98.750,1.250,9.19,224,0.965,bicubic
fastvit_s12.apple_in1k,94.130,5.870,98.620,1.380,9.47,256,0.900,bicubic
nf_regnet_b1.ra2_in1k,94.110,5.890,98.620,1.380,10.22,288,0.900,bicubic
tf_efficientnet_b2.in1k,94.110,5.890,98.450,1.550,9.11,260,0.890,bicubic
resnet33ts.ra2_in1k,94.100,5.900,98.650,1.350,19.68,288,1.000,bicubic
efficientvit_b1.r256_in1k,94.090,5.910,98.360,1.640,9.10,256,1.000,bicubic
xcit_tiny_24_p16_224.fb_in1k,94.080,5.920,98.540,1.460,12.12,224,1.000,bicubic
resnet152.gluon_in1k,94.070,5.930,98.460,1.540,60.19,224,0.875,bicubic
dpn131.mx_in1k,94.050,5.950,98.710,1.290,79.25,224,0.875,bicubic
coat_lite_mini.in1k,94.040,5.960,98.550,1.450,11.01,224,0.900,bicubic
eca_halonext26ts.c1_in1k,94.040,5.960,98.490,1.510,10.76,256,0.940,bicubic
resnet101.a3_in1k,94.030,5.970,98.660,1.340,44.55,224,0.950,bicubic
hrnet_w64.ms_in1k,94.030,5.970,98.590,1.410,128.06,224,0.875,bilinear
resmlp_24_224.fb_in1k,94.030,5.970,98.330,1.670,30.02,224,0.875,bicubic
halonet26t.a1h_in1k,94.000,6.000,98.500,1.500,12.48,256,0.950,bicubic
dpn68b.ra_in1k,94.000,6.000,98.340,1.660,12.61,288,1.000,bicubic
fbnetv3_b.ra2_in1k,93.970,6.030,98.630,1.370,8.60,256,0.950,bilinear
resnet50.am_in1k,93.970,6.030,98.520,1.480,25.56,224,0.875,bicubic
dla102x2.in1k,93.970,6.030,98.490,1.510,41.28,224,0.875,bilinear
mobilevitv2_125.cvnets_in1k,93.960,6.040,98.550,1.450,7.48,256,0.888,bicubic
tf_efficientnetv2_b1.in1k,93.950,6.050,98.620,1.380,8.14,240,0.882,bicubic
convnext_femto.d1_in1k,93.930,6.070,98.520,1.480,5.22,288,0.950,bicubic
fbnetv3_d.ra2_in1k,93.920,6.080,98.740,1.260,10.31,256,0.950,bilinear
convnext_femto_ols.d1_in1k,93.920,6.080,98.620,1.380,5.23,288,0.950,bicubic
hrnet_w48.ms_in1k,93.920,6.080,98.610,1.390,77.47,224,0.875,bilinear
fastvit_t12.apple_in1k,93.920,6.080,98.600,1.400,7.55,256,0.900,bicubic
tf_efficientnet_cc_b1_8e.in1k,93.920,6.080,98.260,1.740,39.72,240,0.882,bicubic
regnetx_064.pycls_in1k,93.900,6.100,98.640,1.360,26.21,224,0.875,bicubic
rexnet_130.nav_in1k,93.900,6.100,98.400,1.600,7.56,224,0.875,bicubic
vit_small_patch16_224.augreg_in1k,93.890,6.110,98.440,1.560,22.05,224,0.900,bicubic
regnety_040.pycls_in1k,93.880,6.120,98.660,1.340,20.65,224,0.875,bicubic
repvgg_b2g4.rvgg_in1k,93.880,6.120,98.590,1.410,61.76,224,0.875,bilinear
regnetx_080.pycls_in1k,93.880,6.120,98.520,1.480,39.57,224,0.875,bicubic
efficientnet_em.ra2_in1k,93.830,6.170,98.820,1.180,6.90,240,0.882,bicubic
resnet32ts.ra2_in1k,93.830,6.170,98.650,1.350,17.96,288,1.000,bicubic
lambda_resnet26t.c1_in1k,93.830,6.170,98.640,1.360,10.96,256,0.940,bicubic
resnext101_32x8d.tv_in1k,93.820,6.180,98.580,1.420,88.79,224,0.875,bilinear
resnext50_32x4d.gluon_in1k,93.810,6.190,98.420,1.580,25.03,224,0.875,bicubic
pvt_v2_b1.in1k,93.800,6.200,98.660,1.340,14.01,224,0.900,bicubic
pit_xs_distilled_224.in1k,93.780,6.220,98.620,1.380,11.00,224,0.900,bicubic
eca_botnext26ts_256.c1_in1k,93.780,6.220,98.500,1.500,10.59,256,0.950,bicubic
xception65.tf_in1k,93.780,6.220,98.370,1.630,39.92,299,0.903,bicubic
legacy_seresnext50_32x4d.in1k,93.750,6.250,98.580,1.420,27.56,224,0.875,bilinear
resnet50d.gluon_in1k,93.750,6.250,98.390,1.610,25.58,224,0.875,bicubic
resnet101.gluon_in1k,93.750,6.250,98.380,1.620,44.55,224,0.875,bicubic
cspresnet50.ra_in1k,93.740,6.260,98.640,1.360,21.62,256,0.887,bilinear
wide_resnet101_2.tv_in1k,93.740,6.260,98.540,1.460,126.89,224,0.875,bilinear
mobileone_s4.apple_in1k,93.740,6.260,98.230,1.770,14.95,224,0.900,bilinear
vit_relpos_base_patch32_plus_rpn_256.sw_in1k,93.740,6.260,98.070,1.930,119.42,256,0.900,bicubic
res2net101_26w_4s.in1k,93.720,6.280,98.310,1.690,45.21,224,0.875,bilinear
lambda_resnet26rpt_256.c1_in1k,93.710,6.290,98.520,1.480,10.99,256,0.940,bicubic
regnety_008_tv.tv2_in1k,93.690,6.310,98.490,1.510,6.43,224,0.965,bicubic
tf_efficientnet_b1.ap_in1k,93.680,6.320,98.360,1.640,7.79,240,0.882,bicubic
resnet101c.gluon_in1k,93.670,6.330,98.420,1.580,44.57,224,0.875,bicubic
resnext50_32x4d.a3_in1k,93.660,6.340,98.520,1.480,25.03,224,0.950,bicubic
vit_tiny_patch16_384.augreg_in21k_ft_in1k,93.650,6.350,98.590,1.410,5.79,384,1.000,bicubic
resnet34d.ra2_in1k,93.640,6.360,98.540,1.460,21.82,288,0.950,bicubic
resnet50s.gluon_in1k,93.640,6.360,98.460,1.540,25.68,224,0.875,bicubic
vit_base_patch32_384.augreg_in1k,93.640,6.360,98.400,1.600,88.30,384,1.000,bicubic
vit_base_patch16_224.augreg_in1k,93.640,6.360,98.240,1.760,86.57,224,0.900,bicubic
tf_efficientnet_b0.ns_jft_in1k,93.620,6.380,98.640,1.360,5.29,224,0.875,bicubic
cait_xxs24_224.fb_dist_in1k,93.610,6.390,98.460,1.540,11.96,224,1.000,bicubic
repvit_m0_9.dist_450e_in1k,93.600,6.400,98.500,1.500,5.49,224,0.950,bicubic
coat_tiny.in1k,93.580,6.420,98.420,1.580,5.50,224,0.900,bicubic
regnetx_040.pycls_in1k,93.560,6.440,98.530,1.470,22.12,224,0.875,bicubic
visformer_tiny.in1k,93.560,6.440,98.490,1.510,10.32,224,0.900,bicubic
seresnext26t_32x4d.bt_in1k,93.560,6.440,98.390,1.610,16.81,288,0.950,bicubic
hrnet_w44.ms_in1k,93.550,6.450,98.700,1.300,67.06,224,0.875,bilinear
hrnet_w18.ms_aug_in1k,93.550,6.450,98.600,1.400,21.30,224,0.950,bilinear
hrnet_w32.ms_in1k,93.530,6.470,98.450,1.550,41.23,224,0.875,bilinear
xcit_nano_12_p8_384.fb_dist_in1k,93.520,6.480,98.530,1.470,3.05,384,1.000,bicubic
efficientvit_b1.r224_in1k,93.510,6.490,98.320,1.680,9.10,224,0.950,bicubic
botnet26t_256.c1_in1k,93.510,6.490,98.300,1.700,12.49,256,0.950,bicubic
repvgg_b2.rvgg_in1k,93.500,6.500,98.730,1.270,89.02,224,0.875,bilinear
hrnet_w40.ms_in1k,93.500,6.500,98.570,1.430,57.56,224,0.875,bilinear
repghostnet_200.in1k,93.500,6.500,98.540,1.460,9.80,224,0.875,bicubic
dla102x.in1k,93.490,6.510,98.500,1.500,26.31,224,0.875,bilinear
tf_efficientnet_b1.aa_in1k,93.490,6.510,98.360,1.640,7.79,240,0.882,bicubic
resnet50d.a3_in1k,93.480,6.520,98.450,1.550,25.58,224,0.950,bicubic
inception_v3.gluon_in1k,93.470,6.530,98.570,1.430,23.83,299,0.875,bicubic
legacy_xception.tf_in1k,93.460,6.540,98.530,1.470,22.86,299,0.897,bicubic
repvit_m0_9.dist_300e_in1k,93.440,6.560,98.710,1.290,5.49,224,0.950,bicubic
seresnext26d_32x4d.bt_in1k,93.440,6.560,98.330,1.670,16.81,288,0.950,bicubic
xception41.tf_in1k,93.430,6.570,98.430,1.570,26.97,299,0.903,bicubic
mixnet_l.ft_in1k,93.430,6.570,98.220,1.780,7.33,224,0.875,bicubic
regnety_032.pycls_in1k,93.410,6.590,98.640,1.360,19.44,224,0.875,bicubic
xcit_tiny_12_p16_224.fb_dist_in1k,93.410,6.590,98.510,1.490,6.72,224,1.000,bicubic
res2net50_26w_6s.in1k,93.400,6.600,98.280,1.720,37.05,224,0.875,bilinear
res2net50_26w_8s.in1k,93.390,6.610,98.170,1.830,48.40,224,0.875,bilinear
legacy_seresnet152.in1k,93.380,6.620,98.340,1.660,66.82,224,0.875,bilinear
dla169.in1k,93.350,6.650,98.610,1.390,53.39,224,0.875,bilinear
cs3darknet_m.c2ns_in1k,93.350,6.650,98.600,1.400,9.31,288,0.950,bicubic
levit_conv_128.fb_dist_in1k,93.350,6.650,98.370,1.630,9.21,224,0.900,bicubic
repvgg_b1.rvgg_in1k,93.330,6.670,98.520,1.480,57.42,224,0.875,bilinear
levit_128.fb_dist_in1k,93.330,6.670,98.370,1.630,9.21,224,0.900,bicubic
resnest26d.gluon_in1k,93.320,6.680,98.620,1.380,17.07,224,0.875,bilinear
resnet152.tv_in1k,93.320,6.680,98.380,1.620,60.19,224,0.875,bilinear
bat_resnext26ts.ch_in1k,93.320,6.680,98.360,1.640,10.73,256,0.900,bicubic
inception_v3.tf_in1k,93.320,6.680,98.040,1.960,23.83,299,0.875,bicubic
tf_mixnet_l.in1k,93.320,6.680,98.030,1.970,7.33,224,0.875,bicubic
legacy_seresnet101.in1k,93.310,6.690,98.520,1.480,49.33,224,0.875,bilinear
selecsls60b.in1k,93.310,6.690,98.290,1.710,32.77,224,0.875,bicubic
mobilevitv2_100.cvnets_in1k,93.300,6.700,98.280,1.720,4.90,256,0.888,bicubic
repvit_m1.dist_in1k,93.290,6.710,98.440,1.560,5.49,224,0.950,bicubic
efficientnet_b1.ft_in1k,93.250,6.750,98.290,1.710,7.79,256,1.000,bicubic
coat_lite_tiny.in1k,93.240,6.760,98.260,1.740,5.72,224,0.900,bicubic
resnet26t.ra2_in1k,93.200,6.800,98.490,1.510,16.01,320,1.000,bicubic
hrnet_w30.ms_in1k,93.200,6.800,98.410,1.590,37.71,224,0.875,bilinear
dla60_res2next.in1k,93.190,6.810,98.400,1.600,17.03,224,0.875,bilinear
dla60_res2net.in1k,93.170,6.830,98.430,1.570,20.85,224,0.875,bilinear
efficientnet_es.ra_in1k,93.170,6.830,98.410,1.590,5.44,224,0.875,bicubic
mobilevit_s.cvnets_in1k,93.160,6.840,98.440,1.560,5.58,256,0.900,bicubic
wide_resnet50_2.tv_in1k,93.160,6.840,98.370,1.630,68.88,224,0.875,bilinear
gcresnext26ts.ch_in1k,93.160,6.840,98.320,1.680,10.48,288,1.000,bicubic
ese_vovnet19b_dw.ra_in1k,93.150,6.850,98.250,1.750,6.54,288,0.950,bicubic
regnetx_032.pycls_in1k,93.110,6.890,98.390,1.610,15.30,224,0.875,bicubic
tf_efficientnetv2_b0.in1k,93.110,6.890,98.390,1.610,7.14,224,0.875,bicubic
resnet34.a1_in1k,93.100,6.900,98.330,1.670,21.80,288,1.000,bicubic
pit_xs_224.in1k,93.100,6.900,98.310,1.690,10.62,224,0.900,bicubic
tf_efficientnet_b1.in1k,93.100,6.900,98.300,1.700,7.79,240,0.882,bicubic
convnext_atto_ols.a2_in1k,93.090,6.910,98.470,1.530,3.70,288,0.950,bicubic
dla60x.in1k,93.080,6.920,98.500,1.500,17.35,224,0.875,bilinear
eca_resnext26ts.ch_in1k,93.060,6.940,98.400,1.600,10.30,288,1.000,bicubic
dla102.in1k,93.050,6.950,98.550,1.450,33.27,224,0.875,bilinear
regnety_016.pycls_in1k,93.040,6.960,98.370,1.630,11.20,224,0.875,bicubic
resnet50c.gluon_in1k,93.030,6.970,98.400,1.600,25.58,224,0.875,bicubic
rexnet_100.nav_in1k,93.020,6.980,98.190,1.810,4.80,224,0.875,bicubic
selecsls60.in1k,93.010,6.990,98.300,1.700,30.67,224,0.875,bicubic
repvgg_b1g4.rvgg_in1k,93.000,7.000,98.430,1.570,39.97,224,0.875,bilinear
ghostnetv2_160.in1k,92.990,7.010,98.230,1.770,12.39,224,0.875,bicubic
cs3darknet_focus_m.c2ns_in1k,92.970,7.030,98.390,1.610,9.30,288,0.950,bicubic
hardcorenas_f.miil_green_in1k,92.970,7.030,98.160,1.840,8.20,224,0.875,bilinear
convnextv2_atto.fcmae_ft_in1k,92.970,7.030,98.060,1.940,3.71,288,0.950,bicubic
seresnext26ts.ch_in1k,92.960,7.040,98.410,1.590,10.39,288,1.000,bicubic
poolformerv2_s12.sail_in1k,92.960,7.040,98.360,1.640,11.89,224,1.000,bicubic
legacy_seresnet50.in1k,92.960,7.040,98.180,1.820,28.09,224,0.875,bilinear
tf_efficientnet_em.in1k,92.940,7.060,98.190,1.810,6.90,240,0.882,bicubic
mobileone_s3.apple_in1k,92.940,7.060,98.180,1.820,10.17,224,0.900,bilinear
inception_v3.tf_adv_in1k,92.900,7.100,98.140,1.860,23.83,299,0.875,bicubic
crossvit_9_dagger_240.in1k,92.890,7.110,98.240,1.760,8.78,240,0.875,bicubic
res2next50.in1k,92.840,7.160,98.180,1.820,24.67,224,0.875,bilinear
tf_efficientnet_cc_b0_8e.in1k,92.840,7.160,98.180,1.820,24.01,224,0.875,bicubic
gmixer_24_224.ra3_in1k,92.830,7.170,97.880,2.120,24.72,224,0.875,bicubic
mobileone_s2.apple_in1k,92.820,7.180,98.270,1.730,7.88,224,0.900,bilinear
resmlp_12_224.fb_distilled_in1k,92.820,7.180,98.140,1.860,15.35,224,0.875,bicubic
resnet101.tv_in1k,92.810,7.190,98.250,1.750,44.55,224,0.875,bilinear
dpn68b.mx_in1k,92.790,7.210,98.150,1.850,12.61,224,0.875,bicubic
convnext_atto.d2_in1k,92.790,7.210,98.080,1.920,3.70,288,0.950,bicubic
efficientnet_b1_pruned.in1k,92.780,7.220,98.040,1.960,6.33,240,0.882,bicubic
hrnet_w18_small_v2.gluon_in1k,92.770,7.230,98.410,1.590,15.60,224,0.875,bicubic
res2net50_14w_8s.in1k,92.770,7.230,98.160,1.840,25.06,224,0.875,bilinear
resnext50_32x4d.tv_in1k,92.750,7.250,98.270,1.730,25.03,224,0.875,bilinear
densenet201.tv_in1k,92.740,7.260,98.230,1.770,20.01,224,0.875,bicubic
inception_v3.tv_in1k,92.730,7.270,97.970,2.030,23.83,299,0.875,bicubic
resnet50.a3_in1k,92.720,7.280,98.170,1.830,25.56,224,0.950,bicubic
resnet34.a2_in1k,92.720,7.280,98.010,1.990,21.80,288,1.000,bicubic
efficientnet_b0.ra_in1k,92.690,7.310,98.070,1.930,5.29,224,0.875,bicubic
tf_efficientnet_lite2.in1k,92.650,7.350,98.230,1.770,6.09,260,0.890,bicubic
legacy_seresnext26_32x4d.in1k,92.640,7.360,98.120,1.880,16.79,224,0.875,bicubic
tf_efficientnet_lite1.in1k,92.630,7.370,98.060,1.940,5.42,240,0.882,bicubic
densenetblur121d.ra_in1k,92.620,7.380,98.260,1.740,8.00,288,0.950,bicubic
poolformer_s12.sail_in1k,92.610,7.390,98.180,1.820,11.92,224,0.900,bicubic
tf_efficientnet_cc_b0_4e.in1k,92.600,7.400,98.080,1.920,13.31,224,0.875,bicubic
hardcorenas_e.miil_green_in1k,92.570,7.430,98.100,1.900,8.07,224,0.875,bilinear
regnetx_008.tv2_in1k,92.550,7.450,98.180,1.820,7.26,224,0.965,bicubic
res2net50_48w_2s.in1k,92.550,7.450,98.080,1.920,25.29,224,0.875,bilinear
resnet50.gluon_in1k,92.540,7.460,98.170,1.830,25.56,224,0.875,bicubic
fastvit_t8.apple_dist_in1k,92.540,7.460,98.040,1.960,4.03,256,0.900,bicubic
densenet121.ra_in1k,92.520,7.480,98.220,1.780,7.98,288,0.950,bicubic
resnet26d.bt_in1k,92.520,7.480,98.210,1.790,16.01,288,0.950,bicubic
xcit_tiny_12_p16_224.fb_in1k,92.510,7.490,98.240,1.760,6.72,224,1.000,bicubic
res2net50_26w_4s.in1k,92.490,7.510,98.030,1.970,25.70,224,0.875,bilinear
densenet161.tv_in1k,92.480,7.520,98.300,1.700,28.68,224,0.875,bicubic
efficientvit_m5.r224_in1k,92.460,7.540,97.990,2.010,12.47,224,0.875,bicubic
tinynet_a.in1k,92.440,7.560,98.080,1.920,6.19,192,0.875,bicubic
resnet34.bt_in1k,92.410,7.590,98.150,1.850,21.80,288,0.950,bicubic
mixnet_m.ft_in1k,92.410,7.590,97.870,2.130,5.01,224,0.875,bicubic
convmixer_1024_20_ks9_p14.in1k,92.400,7.600,98.270,1.730,24.38,224,0.960,bicubic
hardcorenas_d.miil_green_in1k,92.400,7.600,98.080,1.920,7.50,224,0.875,bilinear
mobilenetv2_120d.ra_in1k,92.400,7.600,98.060,1.940,5.83,224,0.875,bicubic
skresnet34.ra_in1k,92.380,7.620,98.150,1.850,22.28,224,0.875,bicubic
repghostnet_150.in1k,92.370,7.630,98.050,1.950,6.58,224,0.875,bicubic
hrnet_w18.ms_in1k,92.320,7.680,98.260,1.740,21.30,224,0.875,bilinear
ghostnetv2_130.in1k,92.320,7.680,98.070,1.930,8.96,224,0.875,bicubic
tf_mixnet_m.in1k,92.300,7.700,97.890,2.110,5.01,224,0.875,bicubic
selecsls42b.in1k,92.290,7.710,98.110,1.890,32.46,224,0.875,bicubic
mobilenetv3_large_100.miil_in21k_ft_in1k,92.260,7.740,97.620,2.380,5.48,224,0.875,bilinear
tf_efficientnet_b0.aa_in1k,92.240,7.760,98.000,2.000,5.29,224,0.875,bicubic
resmlp_12_224.fb_in1k,92.220,7.780,98.150,1.850,15.35,224,0.875,bicubic
dla60.in1k,92.200,7.800,98.100,1.900,22.04,224,0.875,bilinear
tf_efficientnet_b0.ap_in1k,92.200,7.800,98.020,1.980,5.29,224,0.875,bicubic
regnetx_016.pycls_in1k,92.180,7.820,98.200,1.800,9.19,224,0.875,bicubic
gernet_s.idstcv_in1k,92.130,7.870,98.190,1.810,8.17,224,0.875,bilinear
resnext26ts.ra2_in1k,92.130,7.870,98.030,1.970,10.30,288,1.000,bicubic
xcit_nano_12_p8_224.fb_dist_in1k,92.080,7.920,98.160,1.840,3.05,224,1.000,bicubic
tf_efficientnet_b0.in1k,92.080,7.920,97.910,2.090,5.29,224,0.875,bicubic
seresnet50.a3_in1k,92.070,7.930,98.040,1.960,28.09,224,0.950,bicubic
fastvit_t8.apple_in1k,92.060,7.940,97.930,2.070,4.03,256,0.900,bicubic
vit_tiny_r_s16_p8_384.augreg_in21k_ft_in1k,92.040,7.960,98.290,1.710,6.36,384,1.000,bicubic
vit_small_patch32_224.augreg_in21k_ft_in1k,92.040,7.960,98.230,1.770,22.88,224,0.900,bicubic
hardcorenas_c.miil_green_in1k,92.020,7.980,97.840,2.160,5.52,224,0.875,bilinear
resnet26.bt_in1k,91.990,8.010,98.220,1.780,16.00,288,0.950,bicubic
dpn68.mx_in1k,91.990,8.010,98.020,1.980,12.61,224,0.875,bicubic
tf_efficientnet_es.in1k,91.970,8.030,97.880,2.120,5.44,224,0.875,bicubic
levit_128s.fb_dist_in1k,91.960,8.040,98.060,1.940,7.78,224,0.900,bicubic
levit_conv_128s.fb_dist_in1k,91.960,8.040,98.060,1.940,7.78,224,0.900,bicubic
efficientformerv2_s0.snap_dist_in1k,91.960,8.040,97.890,2.110,3.60,224,0.950,bicubic
repvgg_a2.rvgg_in1k,91.940,8.060,98.140,1.860,28.21,224,0.875,bilinear
densenet169.tv_in1k,91.940,8.060,98.100,1.900,14.15,224,0.875,bicubic
repghostnet_130.in1k,91.890,8.110,97.930,2.070,5.48,224,0.875,bicubic
resnet50.tv_in1k,91.880,8.120,98.040,1.960,25.56,224,0.875,bilinear
mixer_b16_224.goog_in21k_ft_in1k,91.880,8.120,97.260,2.740,59.88,224,0.875,bicubic
mobilenetv2_140.ra_in1k,91.840,8.160,97.860,2.140,6.11,224,0.875,bicubic
xcit_nano_12_p16_384.fb_dist_in1k,91.830,8.170,98.020,1.980,3.05,384,1.000,bicubic
mixnet_s.ft_in1k,91.820,8.180,97.700,2.300,4.13,224,0.875,bicubic
vit_tiny_patch16_224.augreg_in21k_ft_in1k,91.770,8.230,98.040,1.960,5.72,224,0.900,bicubic
mobilevitv2_075.cvnets_in1k,91.760,8.240,97.860,2.140,2.87,256,0.888,bicubic
hardcorenas_b.miil_green_in1k,91.760,8.240,97.780,2.220,5.18,224,0.875,bilinear
regnety_008.pycls_in1k,91.730,8.270,98.180,1.820,6.26,224,0.875,bicubic
resnest14d.gluon_in1k,91.720,8.280,97.870,2.130,10.61,224,0.875,bilinear
edgenext_x_small.in1k,91.710,8.290,97.600,2.400,2.34,288,1.000,bicubic
regnety_004.tv2_in1k,91.580,8.420,97.890,2.110,4.34,224,0.965,bicubic
tf_mixnet_s.in1k,91.520,8.480,97.620,2.380,4.13,224,0.875,bicubic
repvgg_b0.rvgg_in1k,91.390,8.610,98.000,2.000,15.82,224,0.875,bilinear
regnety_006.pycls_in1k,91.390,8.610,97.700,2.300,6.06,224,0.875,bicubic
hardcorenas_a.miil_green_in1k,91.350,8.650,97.850,2.150,5.26,224,0.875,bilinear
mobilenetv3_large_100.ra_in1k,91.350,8.650,97.710,2.290,5.48,224,0.875,bicubic
semnasnet_100.rmsp_in1k,91.310,8.690,97.560,2.440,3.89,224,0.875,bicubic
mobileone_s1.apple_in1k,91.280,8.720,97.820,2.180,4.83,224,0.900,bilinear
tf_mobilenetv3_large_100.in1k,91.230,8.770,97.660,2.340,5.48,224,0.875,bilinear
mobilenetv3_rw.rmsp_in1k,91.210,8.790,97.660,2.340,5.48,224,0.875,bicubic
hrnet_w18_small_v2.ms_in1k,91.200,8.800,97.900,2.100,15.60,224,0.875,bilinear
vit_base_patch32_224.augreg_in1k,91.190,8.810,97.390,2.610,88.22,224,0.900,bicubic
efficientnet_es_pruned.in1k,91.170,8.830,97.740,2.260,5.44,224,0.875,bicubic
efficientnet_lite0.ra_in1k,91.120,8.880,97.640,2.360,4.65,224,0.875,bicubic
regnetx_008.pycls_in1k,91.050,8.950,97.720,2.280,7.26,224,0.875,bicubic
tf_efficientnet_lite0.in1k,91.050,8.950,97.580,2.420,4.65,224,0.875,bicubic
xcit_nano_12_p8_224.fb_in1k,91.010,8.990,97.770,2.230,3.05,224,1.000,bicubic
resnet34.gluon_in1k,90.980,9.020,97.630,2.370,21.80,224,0.875,bicubic
mobilenetv2_110d.ra_in1k,90.960,9.040,97.540,2.460,4.52,224,0.875,bicubic
tinynet_b.in1k,90.920,9.080,97.660,2.340,3.73,188,0.875,bicubic
ghostnetv2_100.in1k,90.900,9.100,97.700,2.300,6.16,224,0.875,bicubic
legacy_seresnet34.in1k,90.900,9.100,97.580,2.420,21.96,224,0.875,bilinear
densenet121.tv_in1k,90.890,9.110,97.710,2.290,7.98,224,0.875,bicubic
mobilevit_xs.cvnets_in1k,90.820,9.180,97.930,2.070,2.32,256,0.900,bicubic
pit_ti_distilled_224.in1k,90.770,9.230,97.610,2.390,5.10,224,0.900,bicubic
dla34.in1k,90.760,9.240,97.650,2.350,15.74,224,0.875,bilinear
fbnetc_100.rmsp_in1k,90.730,9.270,97.210,2.790,5.57,224,0.875,bilinear
deit_tiny_distilled_patch16_224.fb_in1k,90.710,9.290,97.560,2.440,5.91,224,0.900,bicubic
repghostnet_111.in1k,90.710,9.290,97.470,2.530,4.54,224,0.875,bicubic
resnet18.fb_swsl_ig1b_ft_in1k,90.700,9.300,97.700,2.300,11.69,224,0.875,bilinear
convit_tiny.fb_in1k,90.660,9.340,97.730,2.270,5.71,224,0.875,bicubic
regnetx_004_tv.tv2_in1k,90.640,9.360,97.600,2.400,5.50,224,0.965,bicubic
crossvit_9_240.in1k,90.630,9.370,97.730,2.270,8.55,240,0.875,bicubic
repvgg_a1.rvgg_in1k,90.600,9.400,97.650,2.350,14.09,224,0.875,bilinear
efficientvit_m4.r224_in1k,90.580,9.420,97.530,2.470,8.80,224,0.875,bicubic
mnasnet_100.rmsp_in1k,90.500,9.500,97.470,2.530,4.38,224,0.875,bicubic
regnety_004.pycls_in1k,90.490,9.510,97.530,2.470,4.34,224,0.875,bicubic
regnetx_006.pycls_in1k,90.350,9.650,97.430,2.570,6.20,224,0.875,bicubic
spnasnet_100.rmsp_in1k,90.330,9.670,97.190,2.810,4.42,224,0.875,bilinear
repghostnet_100.in1k,90.290,9.710,97.480,2.520,4.07,224,0.875,bicubic
resnet18d.ra2_in1k,90.280,9.720,97.560,2.440,11.71,288,0.950,bicubic
crossvit_tiny_240.in1k,90.230,9.770,97.590,2.410,7.01,240,0.875,bicubic
resnet18.fb_ssl_yfcc100m_ft_in1k,90.210,9.790,97.550,2.450,11.69,224,0.875,bilinear
ghostnet_100.in1k,90.180,9.820,97.290,2.710,5.18,224,0.875,bicubic
vgg16_bn.tv_in1k,90.090,9.910,97.370,2.630,138.37,224,0.875,bilinear
vgg19_bn.tv_in1k,90.080,9.920,97.580,2.420,143.68,224,0.875,bilinear
semnasnet_075.rmsp_in1k,90.070,9.930,97.440,2.560,2.91,224,0.875,bicubic
resnet34.tv_in1k,89.950,10.050,97.340,2.660,21.80,224,0.875,bilinear
pit_ti_224.in1k,89.940,10.060,97.450,2.550,4.85,224,0.900,bicubic
resnet34.a3_in1k,89.940,10.060,97.180,2.820,21.80,224,0.950,bicubic
efficientvit_m3.r224_in1k,89.860,10.140,97.540,2.460,6.90,224,0.875,bicubic
vit_base_patch32_224.sam_in1k,89.740,10.260,97.000,3.000,88.22,224,0.900,bicubic
xcit_nano_12_p16_224.fb_dist_in1k,89.700,10.300,97.100,2.900,3.05,224,1.000,bicubic
resnet18.a1_in1k,89.680,10.320,97.100,2.900,11.69,288,1.000,bicubic
deit_tiny_patch16_224.fb_in1k,89.660,10.340,97.450,2.550,5.72,224,0.900,bicubic
skresnet18.ra_in1k,89.660,10.340,97.230,2.770,11.96,224,0.875,bicubic
tf_mobilenetv3_large_075.in1k,89.640,10.360,97.190,2.810,3.99,224,0.875,bilinear
mobilenetv2_100.ra_in1k,89.600,10.400,97.150,2.850,3.50,224,0.875,bicubic
resnet18.a2_in1k,89.570,10.430,96.960,3.040,11.69,288,1.000,bicubic
hrnet_w18_small.gluon_in1k,89.470,10.530,97.060,2.940,13.19,224,0.875,bicubic
repvgg_a0.rvgg_in1k,89.280,10.720,96.890,3.110,9.11,224,0.875,bilinear
vit_tiny_r_s16_p8_224.augreg_in21k_ft_in1k,89.180,10.820,97.220,2.780,6.34,224,0.900,bicubic
hrnet_w18_small.ms_in1k,89.050,10.950,97.120,2.880,13.19,224,0.875,bilinear
vgg19.tv_in1k,89.050,10.950,96.870,3.130,143.67,224,0.875,bilinear
resnet14t.c3_in1k,88.990,11.010,96.730,3.270,10.08,224,0.950,bicubic
tf_mobilenetv3_large_minimal_100.in1k,88.950,11.050,96.860,3.140,3.92,224,0.875,bilinear
regnetx_004.pycls_in1k,88.930,11.070,97.120,2.880,5.16,224,0.875,bicubic
legacy_seresnet18.in1k,88.890,11.110,96.980,3.020,11.78,224,0.875,bicubic
edgenext_xx_small.in1k,88.890,11.110,96.700,3.300,1.33,288,1.000,bicubic
repghostnet_080.in1k,88.840,11.160,96.700,3.300,3.28,224,0.875,bicubic
pvt_v2_b0.in1k,88.780,11.220,96.860,3.140,3.67,224,0.900,bicubic
vgg13_bn.tv_in1k,88.750,11.250,96.980,3.020,133.05,224,0.875,bilinear
lcnet_100.ra2_in1k,88.750,11.250,96.720,3.280,2.95,224,0.875,bicubic
xcit_nano_12_p16_224.fb_in1k,88.620,11.380,96.790,3.210,3.05,224,1.000,bicubic
vgg16.tv_in1k,88.560,11.440,96.800,3.200,138.36,224,0.875,bilinear
efficientvit_m2.r224_in1k,88.470,11.530,96.900,3.100,4.19,224,0.875,bicubic
resnet18.gluon_in1k,88.370,11.630,96.670,3.330,11.69,224,0.875,bicubic
mobileone_s0.apple_in1k,88.230,11.770,96.400,3.600,5.29,224,0.875,bilinear
mobilevitv2_050.cvnets_in1k,88.180,11.820,96.990,3.010,1.37,256,0.888,bicubic
efficientvit_b0.r224_in1k,87.940,12.060,96.130,3.870,3.41,224,0.950,bicubic
tinynet_c.in1k,87.780,12.220,96.370,3.630,2.46,184,0.875,bicubic
vgg11_bn.tv_in1k,87.500,12.500,96.820,3.180,132.87,224,0.875,bilinear
resnet18.tv_in1k,87.380,12.620,96.290,3.710,11.69,224,0.875,bilinear
regnety_002.pycls_in1k,87.370,12.630,96.610,3.390,3.16,224,0.875,bicubic
mobilevit_xxs.cvnets_in1k,87.160,12.840,96.100,3.900,1.27,256,0.900,bicubic
mixer_l16_224.goog_in21k_ft_in1k,87.150,12.850,93.520,6.480,208.20,224,0.875,bicubic
vgg13.tv_in1k,87.040,12.960,96.330,3.670,133.05,224,0.875,bilinear
efficientvit_m1.r224_in1k,86.790,13.210,96.030,3.970,2.98,224,0.875,bicubic
vgg11.tv_in1k,86.580,13.420,96.280,3.720,132.86,224,0.875,bilinear
repghostnet_058.in1k,86.540,13.460,95.900,4.100,2.55,224,0.875,bicubic
resnet18.a3_in1k,86.450,13.550,95.880,4.120,11.69,224,0.950,bicubic
dla60x_c.in1k,86.290,13.710,96.160,3.840,1.32,224,0.875,bilinear
resnet10t.c3_in1k,86.220,13.780,95.740,4.260,5.44,224,0.950,bicubic
regnetx_002.pycls_in1k,86.140,13.860,95.970,4.030,2.68,224,0.875,bicubic
lcnet_075.ra2_in1k,85.970,14.030,95.680,4.320,2.36,224,0.875,bicubic
mobilenetv3_small_100.lamb_in1k,85.220,14.780,95.650,4.350,2.54,224,0.875,bicubic
tf_mobilenetv3_small_100.in1k,85.190,14.810,95.770,4.230,2.54,224,0.875,bilinear
repghostnet_050.in1k,85.060,14.940,95.200,4.800,2.31,224,0.875,bicubic
tinynet_d.in1k,84.720,15.280,95.170,4.830,2.34,152,0.875,bicubic
mnasnet_small.lamb_in1k,84.420,15.580,95.190,4.810,2.03,224,0.875,bicubic
dla46x_c.in1k,84.230,15.770,95.270,4.730,1.07,224,0.875,bilinear
mobilenetv2_050.lamb_in1k,83.910,16.090,94.720,5.280,1.97,224,0.875,bicubic
dla46_c.in1k,83.610,16.390,94.950,5.050,1.30,224,0.875,bilinear
tf_mobilenetv3_small_075.in1k,83.500,16.500,94.840,5.160,2.04,224,0.875,bilinear
mobilenetv3_small_075.lamb_in1k,83.030,16.970,94.100,5.900,2.04,224,0.875,bicubic
efficientvit_m0.r224_in1k,82.350,17.650,94.430,5.570,2.35,224,0.875,bicubic
lcnet_050.ra2_in1k,81.800,18.200,93.710,6.290,1.88,224,0.875,bicubic
tf_mobilenetv3_small_minimal_100.in1k,81.400,18.600,93.680,6.320,2.04,224,0.875,bilinear
tinynet_e.in1k,78.920,21.080,92.540,7.460,2.04,106,0.875,bicubic
mobilenetv3_small_050.lamb_in1k,77.020,22.980,91.300,8.700,1.59,224,0.875,bicubic
| 0 |
hf_public_repos/pytorch-image-models | hf_public_repos/pytorch-image-models/results/benchmark-infer-amp-nhwc-pt113-cu117-rtx3090.csv | model,infer_samples_per_sec,infer_step_time,infer_batch_size,infer_img_size,infer_gmacs,infer_macts,param_count
tinynet_e,72737.62,14.068,1024,106,0.03,0.69,2.04
mobilenetv3_small_050,54822.3,18.668,1024,224,0.03,0.92,1.59
lcnet_035,53629.35,19.084,1024,224,0.03,1.04,1.64
lcnet_050,45492.41,22.499,1024,224,0.05,1.26,1.88
mobilenetv3_small_075,39215.51,26.102,1024,224,0.05,1.3,2.04
tinynet_d,37346.61,27.409,1024,152,0.05,1.42,2.34
mobilenetv3_small_100,36280.34,28.214,1024,224,0.06,1.42,2.54
tf_mobilenetv3_small_minimal_100,31726.33,32.265,1024,224,0.06,1.41,2.04
tf_mobilenetv3_small_075,31503.43,32.494,1024,224,0.05,1.3,2.04
lcnet_075,29817.69,34.332,1024,224,0.1,1.99,2.36
tf_mobilenetv3_small_100,29444.91,34.767,1024,224,0.06,1.42,2.54
mnasnet_small,25354.86,40.376,1024,224,0.07,2.16,2.03
lcnet_100,24134.76,42.417,1024,224,0.16,2.52,2.95
regnetx_002,23983.4,42.686,1024,224,0.2,2.16,2.68
levit_128s,22675.73,45.148,1024,224,0.31,1.88,7.78
regnety_002,21709.37,47.158,1024,224,0.2,2.17,3.16
mobilenetv2_035,21673.44,47.236,1024,224,0.07,2.86,1.68
mnasnet_050,20010.27,51.163,1024,224,0.11,3.07,2.22
ghostnet_050,18932.82,54.075,1024,224,0.05,1.77,2.59
tinynet_c,18428.42,55.556,1024,184,0.11,2.87,2.46
semnasnet_050,17215.18,59.471,1024,224,0.11,3.44,2.08
mobilenetv2_050,17194.94,59.542,1024,224,0.1,3.64,1.97
cs3darknet_focus_s,16189.76,63.24,1024,256,0.69,2.7,3.27
lcnet_150,15557.15,65.811,1024,224,0.34,3.79,4.5
cs3darknet_s,15369.47,66.615,1024,256,0.72,2.97,3.28
levit_128,15337.67,66.754,1024,224,0.41,2.71,9.21
gernet_s,15288.68,66.966,1024,224,0.75,2.65,8.17
mobilenetv3_large_075,14216.3,72.019,1024,224,0.16,4.0,3.99
mixer_s32_224,14182.92,72.188,1024,224,1.0,2.28,19.1
vit_tiny_r_s16_p8_224,14125.39,72.482,1024,224,0.44,2.06,6.34
resnet10t,14112.07,72.551,1024,224,1.1,2.43,5.44
vit_small_patch32_224,13799.47,74.195,1024,224,1.15,2.5,22.88
regnetx_004,13610.2,75.225,1024,224,0.4,3.14,5.16
levit_192,13524.14,75.706,1024,224,0.66,3.2,10.95
mobilenetv3_rw,12956.58,79.021,1024,224,0.23,4.41,5.48
hardcorenas_a,12803.61,79.966,1024,224,0.23,4.38,5.26
mobilenetv3_large_100,12749.93,80.304,1024,224,0.23,4.41,5.48
mnasnet_075,12532.36,81.697,1024,224,0.23,4.77,3.17
tf_mobilenetv3_large_075,12186.51,84.017,1024,224,0.16,4.0,3.99
tinynet_b,12083.18,84.735,1024,188,0.21,4.44,3.73
regnety_004,11918.36,85.906,1024,224,0.41,3.89,4.34
tf_mobilenetv3_large_minimal_100,11715.94,87.392,1024,224,0.22,4.4,3.92
hardcorenas_c,11548.05,88.662,1024,224,0.28,5.01,5.52
hardcorenas_b,11510.71,88.949,1024,224,0.26,5.09,5.18
ese_vovnet19b_slim_dw,11501.95,89.018,1024,224,0.4,5.28,1.9
ghostnet_100,11332.61,90.348,1024,224,0.15,3.55,5.18
mnasnet_100,11138.43,91.923,1024,224,0.33,5.46,4.38
gluon_resnet18_v1b,11098.78,92.252,1024,224,1.82,2.48,11.69
resnet18,11083.1,92.383,1024,224,1.82,2.48,11.69
swsl_resnet18,11062.48,92.555,1024,224,1.82,2.48,11.69
ssl_resnet18,11061.11,92.565,1024,224,1.82,2.48,11.69
tf_mobilenetv3_large_100,11018.56,92.922,1024,224,0.23,4.41,5.48
mnasnet_b1,10993.58,93.135,1024,224,0.33,5.46,4.38
hardcorenas_d,10910.47,93.843,1024,224,0.3,4.93,7.5
semnasnet_075,10898.09,93.951,1024,224,0.23,5.54,2.91
mobilenetv2_075,10893.76,93.988,1024,224,0.22,5.86,2.64
seresnet18,10385.56,98.588,1024,224,1.82,2.49,11.78
legacy_seresnet18,10064.41,101.734,1024,224,1.82,2.49,11.78
spnasnet_100,10009.21,102.296,1024,224,0.35,6.03,4.42
tf_efficientnetv2_b0,9930.95,103.1,1024,224,0.73,4.77,7.14
levit_256,9858.1,103.863,1024,224,1.13,4.23,18.89
tinynet_a,9720.11,105.337,1024,192,0.35,5.41,6.19
hardcorenas_f,9714.91,105.393,1024,224,0.35,5.57,8.2
semnasnet_100,9623.78,106.393,1024,224,0.32,6.23,3.89
mnasnet_a1,9623.77,106.393,1024,224,0.32,6.23,3.89
mobilenetv2_100,9598.91,106.667,1024,224,0.31,6.68,3.5
hardcorenas_e,9571.87,106.966,1024,224,0.35,5.65,8.07
dla46_c,9568.4,107.007,1024,224,0.58,4.5,1.3
efficientnet_lite0,9361.14,109.377,1024,224,0.4,6.74,4.65
fbnetc_100,9352.03,109.484,1024,224,0.4,6.51,5.57
resnet18d,9334.83,109.687,1024,224,2.06,3.29,11.71
ese_vovnet19b_slim,9109.47,112.4,1024,224,1.69,3.52,3.17
regnety_006,9097.63,112.542,1024,224,0.61,4.33,6.06
regnetz_005,8607.49,118.955,1024,224,0.52,5.86,7.12
xcit_nano_12_p16_224_dist,8577.2,119.375,1024,224,0.56,4.17,3.05
xcit_nano_12_p16_224,8554.61,119.689,1024,224,0.56,4.17,3.05
levit_256d,8382.88,122.143,1024,224,1.4,4.93,26.21
regnetx_006,8379.52,122.192,1024,224,0.61,3.98,6.2
ghostnet_130,8278.59,123.681,1024,224,0.24,4.6,7.36
tf_efficientnet_lite0,8080.51,126.714,1024,224,0.4,6.74,4.65
efficientnet_b0,7965.17,128.548,1024,224,0.4,6.75,5.29
mnasnet_140,7779.42,131.618,1024,224,0.6,7.71,7.12
deit_tiny_distilled_patch16_224,7467.68,137.113,1024,224,1.27,6.01,5.91
rexnetr_100,7464.12,137.179,1024,224,0.43,7.72,4.88
deit_tiny_patch16_224,7430.15,137.806,1024,224,1.26,5.97,5.72
resnet14t,7429.68,137.815,1024,224,1.69,5.8,10.08
vit_tiny_patch16_224,7424.93,137.902,1024,224,1.26,5.97,5.72
regnetx_008,7394.88,138.463,1024,224,0.81,5.15,7.26
mobilenetv2_110d,7247.12,141.287,1024,224,0.45,8.71,4.52
hrnet_w18_small,7232.93,141.561,1024,224,1.61,5.72,13.19
tf_efficientnet_b0,7016.18,145.938,1024,224,0.4,6.75,5.29
regnety_008,6938.46,147.571,1024,224,0.81,5.25,6.26
mobilevitv2_050,6848.87,149.503,1024,256,0.48,8.04,1.37
pit_ti_distilled_224,6811.68,150.317,1024,224,0.71,6.23,5.1
pit_ti_224,6784.24,150.927,1024,224,0.7,6.19,4.85
gernet_m,6679.85,153.286,1024,224,3.02,5.24,21.14
efficientnet_b1_pruned,6642.37,154.15,1024,240,0.4,6.21,6.33
resnet34,6496.42,157.614,1024,224,3.67,3.74,21.8
gluon_resnet34_v1b,6494.61,157.658,1024,224,3.67,3.74,21.8
tv_resnet34,6481.01,157.989,1024,224,3.67,3.74,21.8
tf_efficientnetv2_b1,6476.52,158.098,1024,240,1.21,7.34,8.14
semnasnet_140,6454.5,158.637,1024,224,0.6,8.87,6.11
nf_regnet_b0,6452.24,158.693,1024,256,0.64,5.58,8.76
ese_vovnet19b_dw,6335.13,161.627,1024,224,1.34,8.25,6.54
mobilenetv2_140,6271.56,163.266,1024,224,0.6,9.57,6.11
rexnet_100,6226.48,164.447,1024,224,0.41,7.44,4.8
efficientnet_lite1,6187.91,165.472,1024,240,0.62,10.14,5.42
efficientnet_es_pruned,6115.4,167.434,1024,224,1.81,8.73,5.44
efficientnet_es,6115.12,167.443,1024,224,1.81,8.73,5.44
visformer_tiny,6103.09,167.772,1024,224,1.27,5.72,10.32
seresnet34,6058.13,169.019,1024,224,3.67,3.74,21.96
fbnetv3_b,6018.76,170.124,1024,256,0.55,9.1,8.6
selecsls42,5953.76,171.98,1024,224,2.94,4.62,30.35
selecsls42b,5921.2,172.924,1024,224,2.98,4.62,32.46
resnet26,5895.21,173.69,1024,224,2.36,7.35,16.0
edgenext_xx_small,5893.72,173.732,1024,288,0.33,4.21,1.33
levit_384,5880.4,174.126,1024,224,2.36,6.26,39.13
resnet34d,5865.98,174.555,1024,224,3.91,4.54,21.82
legacy_seresnet34,5850.24,175.025,1024,224,3.67,3.74,21.96
dla34,5827.3,175.712,1024,224,3.07,5.02,15.74
tf_efficientnet_es,5781.29,177.112,1024,224,1.81,8.73,5.44
cs3darknet_focus_m,5721.39,178.967,1024,288,2.51,6.19,9.3
resnetblur18,5636.65,181.657,1024,224,2.34,3.39,11.69
rexnetr_130,5590.0,183.173,1024,224,0.68,9.81,7.61
mobilevit_xxs,5524.87,185.333,1024,256,0.42,8.34,1.27
tf_efficientnet_lite1,5524.68,185.339,1024,240,0.62,10.14,5.42
cs3darknet_m,5478.07,186.916,1024,288,2.63,6.69,9.31
convnext_atto,5460.54,187.516,1024,288,0.91,6.3,3.7
xcit_tiny_12_p16_224_dist,5457.72,187.611,1024,224,1.24,6.29,6.72
xcit_tiny_12_p16_224,5456.63,187.649,1024,224,1.24,6.29,6.72
skresnet18,5413.1,189.159,1024,224,1.82,3.24,11.96
darknet17,5401.37,189.571,1024,256,3.26,7.18,14.3
mixnet_s,5392.58,189.878,1024,224,0.25,6.25,4.13
resmlp_12_224,5366.15,190.814,1024,224,3.01,5.5,15.35
resmlp_12_distilled_224,5364.91,190.857,1024,224,3.01,5.5,15.35
convnext_atto_ols,5288.94,193.6,1024,288,0.96,6.8,3.7
vit_base_patch32_clip_224,5280.68,193.903,1024,224,4.41,5.01,88.22
vit_base_patch32_224,5280.52,193.908,1024,224,4.41,5.01,88.22
pit_xs_distilled_224,5272.13,194.218,1024,224,1.41,7.76,11.0
pit_xs_224,5271.0,194.259,1024,224,1.4,7.71,10.62
repvgg_b0,5252.66,194.939,1024,224,3.41,6.15,15.82
mixer_b32_224,5221.71,196.094,1024,224,3.24,6.29,60.29
pvt_v2_b0,5210.31,196.521,1024,224,0.57,7.99,3.67
resnetaa34d,5171.78,197.986,1024,224,4.43,5.07,21.82
selecsls60,5160.83,198.407,1024,224,3.59,5.52,30.67
selecsls60b,5119.51,200.008,1024,224,3.63,5.52,32.77
mobilenetv2_120d,5111.95,200.304,1024,224,0.69,11.97,5.83
resnet26d,5108.26,200.449,1024,224,2.6,8.15,16.01
gmixer_12_224,5064.97,202.162,1024,224,2.67,7.26,12.7
gmlp_ti16_224,5007.93,204.464,1024,224,1.34,7.55,5.87
mixer_s16_224,4998.69,204.842,1024,224,3.79,5.97,18.53
tf_mixnet_s,4989.18,205.231,1024,224,0.25,6.25,4.13
efficientnet_b0_g16_evos,4930.67,207.667,1024,224,1.01,7.42,8.11
rexnetr_150,4900.22,208.959,1024,224,0.89,11.13,9.78
fbnetv3_d,4881.14,209.776,1024,256,0.68,11.1,10.31
darknet21,4850.41,211.105,1024,256,3.93,7.47,20.86
nf_resnet26,4816.48,212.591,1024,224,2.41,7.35,16.0
efficientnet_lite2,4781.65,214.14,1024,260,0.89,12.9,6.09
convnext_femto,4749.12,215.607,1024,288,1.3,7.56,5.22
tf_efficientnetv2_b2,4718.26,217.018,1024,260,1.72,9.84,10.1
sedarknet21,4656.51,219.895,1024,256,3.93,7.47,20.95
dla46x_c,4636.77,220.831,1024,224,0.54,5.66,1.07
convnext_femto_ols,4618.33,221.714,1024,288,1.35,8.06,5.23
resnext26ts,4603.25,222.441,1024,256,2.43,10.52,10.3
efficientformer_l1,4566.14,224.248,1024,224,1.3,5.53,12.29
dpn48b,4506.78,227.201,1024,224,1.69,8.92,9.13
crossvit_tiny_240,4481.69,228.473,1024,240,1.57,9.08,7.01
dla60x_c,4459.27,229.622,1024,224,0.59,6.01,1.32
eca_resnext26ts,4456.63,229.759,1024,256,2.43,10.52,10.3
seresnext26ts,4453.99,229.896,1024,256,2.43,10.52,10.39
legacy_seresnext26_32x4d,4441.15,230.558,1024,224,2.49,9.39,16.79
gernet_l,4396.56,232.898,1024,256,4.57,8.0,31.08
mobilevitv2_075,4393.87,233.041,1024,256,1.05,12.06,2.87
gcresnext26ts,4384.92,233.516,1024,256,2.43,10.53,10.48
tf_efficientnet_b1,4370.6,234.282,1024,240,0.71,10.88,7.79
tf_efficientnet_lite2,4293.9,238.467,1024,260,0.89,12.9,6.09
rexnet_130,4262.16,240.243,1024,224,0.68,9.71,7.56
efficientnet_b1,4239.44,241.53,1024,256,0.77,12.22,7.79
vit_small_patch32_384,4239.1,241.55,1024,384,3.45,8.25,22.92
crossvit_9_240,4212.37,243.082,1024,240,1.85,9.52,8.55
crossvit_9_dagger_240,4095.03,250.049,1024,240,1.99,9.97,8.78
nf_ecaresnet26,4091.86,250.24,1024,224,2.41,7.36,16.0
nf_seresnet26,4088.47,250.449,1024,224,2.41,7.36,17.4
efficientnet_cc_b0_8e,4076.51,251.183,1024,224,0.42,9.42,24.01
efficientnet_cc_b0_4e,4073.3,251.382,1024,224,0.41,9.42,13.31
ecaresnet50d_pruned,4055.39,252.492,1024,224,2.53,6.43,19.94
efficientnet_b2_pruned,4030.92,254.025,1024,260,0.73,9.13,8.31
ecaresnext50t_32x4d,4018.73,254.796,1024,224,2.7,10.09,15.41
ecaresnext26t_32x4d,4017.09,254.9,1024,224,2.7,10.09,15.41
seresnext26t_32x4d,4014.43,255.069,1024,224,2.7,10.09,16.81
seresnext26tn_32x4d,4014.36,255.074,1024,224,2.7,10.09,16.81
repvgg_a2,3987.84,256.77,1024,224,5.7,6.26,28.21
poolformer_s12,3982.67,257.103,1024,224,1.82,5.53,11.92
seresnext26d_32x4d,3979.57,257.303,1024,224,2.73,10.19,16.81
vit_tiny_r_s16_p8_384,3963.05,258.374,1024,384,1.34,6.49,6.36
resnet26t,3939.46,259.923,1024,256,3.35,10.52,16.01
nf_regnet_b1,3911.64,261.772,1024,288,1.02,9.2,10.22
rexnet_150,3881.93,263.775,1024,224,0.9,11.21,9.73
nf_regnet_b2,3879.78,263.921,1024,272,1.22,9.27,14.31
resnetv2_50,3865.49,264.896,1024,224,4.11,11.11,25.55
regnetx_016,3852.41,265.794,1024,224,1.62,7.93,9.19
tf_efficientnet_cc_b0_4e,3812.08,268.608,1024,224,0.41,9.42,13.31
tf_efficientnet_cc_b0_8e,3803.67,269.202,1024,224,0.42,9.42,24.01
convnext_pico,3747.49,273.239,1024,288,2.27,10.08,9.05
ecaresnetlight,3744.45,273.459,1024,224,4.11,8.42,30.16
dpn68,3724.59,274.917,1024,224,2.35,10.47,12.61
edgenext_x_small,3714.71,275.646,1024,288,0.68,7.5,2.34
gluon_resnet50_v1b,3672.76,278.798,1024,224,4.11,11.11,25.56
ssl_resnet50,3671.85,278.866,1024,224,4.11,11.11,25.56
efficientnet_em,3671.25,278.913,1024,240,3.04,14.34,6.9
resnet50,3668.58,279.116,1024,224,4.11,11.11,25.56
swsl_resnet50,3668.32,279.136,1024,224,4.11,11.11,25.56
tv_resnet50,3667.14,279.225,1024,224,4.11,11.11,25.56
dpn68b,3667.07,279.229,1024,224,2.35,10.47,12.61
rexnetr_200,3659.45,279.811,1024,224,1.59,15.11,16.52
convnext_pico_ols,3651.34,280.434,1024,288,2.37,10.74,9.06
botnet26t_256,3594.28,284.883,1024,256,3.32,11.98,12.49
bat_resnext26ts,3569.91,286.828,1024,256,2.53,12.51,10.73
resnetv2_50t,3547.32,288.657,1024,224,4.32,11.82,25.57
mixnet_m,3537.26,289.477,1024,224,0.36,8.19,5.01
regnety_016,3531.88,289.919,1024,224,1.63,8.04,11.2
tf_efficientnet_em,3529.62,290.106,1024,240,3.04,14.34,6.9
resnetv2_50d,3525.02,290.482,1024,224,4.35,11.92,25.57
halonet26t,3515.15,291.299,1024,256,3.19,11.69,12.48
resnet32ts,3492.62,293.179,1024,256,4.63,11.58,17.96
hrnet_w18_small_v2,3482.81,294.001,1024,224,2.62,9.65,15.6
gluon_resnet50_v1c,3481.59,294.107,1024,224,4.35,11.92,25.58
dla60,3466.91,295.351,1024,224,4.26,10.16,22.04
resnet33ts,3460.78,295.875,1024,256,4.76,11.66,19.68
tf_efficientnet_b2,3402.3,300.962,1024,260,1.02,13.83,9.11
convit_tiny,3399.61,301.199,1024,224,1.26,7.94,5.71
resnet50t,3373.72,303.51,1024,224,4.32,11.82,25.57
tf_mixnet_m,3366.38,304.167,1024,224,0.36,8.19,5.01
efficientnet_b3_pruned,3360.1,304.74,1024,300,1.04,11.86,9.86
seresnet33ts,3354.27,305.27,1024,256,4.76,11.66,19.78
resnet50d,3351.47,305.527,1024,224,4.35,11.92,25.58
eca_resnet33ts,3350.95,305.574,1024,256,4.76,11.66,19.68
vit_small_resnet26d_224,3346.77,305.954,1024,224,5.07,11.12,63.61
cs3darknet_focus_l,3335.18,307.018,1024,288,5.9,10.16,21.15
gluon_resnet50_v1d,3334.65,307.068,1024,224,4.35,11.92,25.58
mobilevitv2_100,3324.63,307.994,1024,256,1.84,16.08,4.9
vovnet39a,3320.12,308.408,1024,224,7.09,6.73,22.6
legacy_seresnet50,3312.33,309.135,1024,224,3.88,10.6,28.09
efficientnet_b0_gn,3307.86,309.554,1024,224,0.42,6.75,5.29
gcresnet33ts,3307.01,309.633,1024,256,4.76,11.68,19.88
pit_s_distilled_224,3301.25,310.173,1024,224,2.9,11.64,24.04
pit_s_224,3299.97,310.295,1024,224,2.88,11.56,23.46
mobilevit_xs,3252.28,314.844,1024,256,1.05,16.33,2.32
deit_small_distilled_patch16_224,3233.6,316.663,1024,224,4.63,12.02,22.44
efficientnet_b2a,3223.97,317.608,1024,288,1.12,16.2,9.11
efficientnet_b2,3223.9,317.615,1024,288,1.12,16.2,9.11
deit_small_patch16_224,3218.99,318.1,1024,224,4.61,11.95,22.05
vit_small_patch16_224,3218.38,318.16,1024,224,4.61,11.95,22.05
cs3darknet_l,3210.26,318.965,1024,288,6.16,10.83,21.16
ese_vovnet39b,3206.21,319.369,1024,224,7.09,6.74,24.57
eca_vovnet39b,3203.77,319.612,1024,224,7.09,6.74,22.6
convnextv2_atto,3196.73,320.315,1024,288,0.91,6.3,3.71
coatnet_pico_rw_224,3189.82,321.008,1024,224,2.05,14.62,10.85
seresnet50,3181.57,321.841,1024,224,4.11,11.13,28.09
pvt_v2_b1,3147.37,325.339,1024,224,2.12,15.39,14.01
coat_lite_tiny,3146.41,325.439,1024,224,1.6,11.65,5.72
res2net50_48w_2s,3127.52,327.404,1024,224,4.18,11.72,25.29
eca_botnext26ts_256,3112.32,329.003,1024,256,2.46,11.6,10.59
ecaresnet101d_pruned,3103.16,329.973,1024,224,3.48,7.69,24.88
efficientnet_b0_g8_gn,3073.2,333.192,1024,224,0.66,6.75,6.56
ssl_resnext50_32x4d,3071.68,333.356,1024,224,4.26,14.4,25.03
dla60x,3071.64,333.359,1024,224,3.54,13.8,17.35
swsl_resnext50_32x4d,3070.7,333.464,1024,224,4.26,14.4,25.03
tv_resnext50_32x4d,3069.81,333.56,1024,224,4.26,14.4,25.03
resnext50_32x4d,3069.72,333.57,1024,224,4.26,14.4,25.03
gluon_resnext50_32x4d,3068.47,333.704,1024,224,4.26,14.4,25.03
vit_small_r26_s32_224,3061.92,334.417,1024,224,3.56,9.85,36.43
skresnet34,3055.95,335.073,1024,224,3.67,5.13,22.28
deit3_small_patch16_224_in21ft1k,3048.82,335.855,1024,224,4.61,11.95,22.06
deit3_small_patch16_224,3047.23,336.031,1024,224,4.61,11.95,22.06
eca_halonext26ts,3035.71,337.305,1024,256,2.44,11.46,10.76
haloregnetz_b,3032.47,337.665,1024,224,1.97,11.94,11.68
vit_relpos_base_patch32_plus_rpn_256,3026.45,338.338,1024,256,7.68,8.01,119.42
vit_relpos_small_patch16_rpn_224,3019.95,339.067,1024,224,4.59,13.05,21.97
vit_relpos_small_patch16_224,3008.26,340.383,1024,224,4.59,13.05,21.98
vit_srelpos_small_patch16_224,3000.96,341.213,1024,224,4.59,12.16,21.97
xcit_nano_12_p16_384_dist,3000.48,341.266,1024,384,1.64,12.15,3.05
cs3sedarknet_l,2995.41,341.845,1024,288,6.16,10.83,21.91
resnetaa50d,2993.03,342.116,1024,224,5.39,12.44,25.58
vgg11,2983.47,85.796,256,224,7.61,7.44,132.86
selecsls84,2973.16,344.402,1024,224,5.9,7.57,50.95
resnetrs50,2963.42,345.535,1024,224,4.48,12.14,35.69
seresnet50t,2957.12,346.271,1024,224,4.32,11.83,28.1
resnest14d,2954.69,346.556,1024,224,2.76,7.33,10.61
gluon_resnet50_v1s,2953.65,346.677,1024,224,5.47,13.52,25.68
coat_lite_mini,2952.61,346.799,1024,224,2.0,12.25,11.01
ecaresnet50d,2945.96,347.583,1024,224,4.35,11.93,25.58
densenet121,2933.45,349.064,1024,224,2.87,6.9,7.98
tv_densenet121,2929.69,349.514,1024,224,2.87,6.9,7.98
vit_base_patch32_plus_256,2929.65,349.519,1024,256,7.79,7.76,119.48
rexnet_200,2927.94,349.723,1024,224,1.56,14.91,16.37
xcit_tiny_24_p16_224_dist,2927.0,349.834,1024,224,2.34,11.82,12.12
xcit_tiny_24_p16_224,2921.97,350.436,1024,224,2.34,11.82,12.12
coatnet_nano_cc_224,2867.38,357.108,1024,224,2.24,15.02,13.76
gcresnext50ts,2857.34,358.363,1024,256,3.75,15.46,15.67
lambda_resnet26rpt_256,2853.55,358.839,1024,256,3.16,11.87,10.99
resnext50d_32x4d,2845.08,359.908,1024,224,4.5,15.2,25.05
mixnet_l,2828.6,361.996,1024,224,0.58,10.84,7.33
densenet121d,2824.08,362.584,1024,224,3.11,7.7,8.0
efficientnet_lite3,2821.84,362.87,1024,300,1.65,21.85,8.2
cspresnet50,2793.65,366.534,1024,256,4.54,11.5,21.62
coatnet_nano_rw_224,2781.93,368.077,1024,224,2.41,15.41,15.14
vgg11_bn,2760.38,370.949,1024,224,7.62,7.44,132.87
vovnet57a,2755.77,371.572,1024,224,8.95,7.52,36.64
resmlp_24_224,2750.33,372.306,1024,224,5.96,10.91,30.02
resmlp_24_distilled_224,2740.33,373.665,1024,224,5.96,10.91,30.02
convnextv2_femto,2735.91,374.269,1024,288,1.3,7.56,5.23
flexivit_small,2735.78,374.287,1024,240,5.35,14.18,22.06
gcresnet50t,2732.04,374.8,1024,256,5.42,14.67,25.9
legacy_seresnext50_32x4d,2722.84,376.065,1024,224,4.26,14.42,27.56
seresnext50_32x4d,2721.47,376.256,1024,224,4.26,14.42,27.56
gluon_seresnext50_32x4d,2720.58,376.379,1024,224,4.26,14.42,27.56
visformer_small,2719.93,376.468,1024,224,4.88,11.43,40.22
twins_svt_small,2713.39,377.374,1024,224,2.94,13.75,24.06
resnetv2_50x1_bit_distilled,2708.81,378.014,1024,224,4.23,11.11,25.55
res2net50_14w_8s,2692.9,380.248,1024,224,4.21,13.28,25.06
resnetblur50,2685.97,381.228,1024,224,5.16,12.02,25.56
vit_base_resnet26d_224,2684.6,381.421,1024,224,6.97,13.16,101.4
tf_mixnet_l,2680.8,381.958,1024,224,0.58,10.84,7.33
seresnetaa50d,2658.93,385.106,1024,224,5.4,12.46,28.11
dla60_res2net,2656.16,385.506,1024,224,4.15,12.34,20.85
cspresnet50d,2655.05,385.668,1024,256,4.86,12.55,21.64
coatnext_nano_rw_224,2655.0,385.674,1024,224,2.47,12.8,14.7
ese_vovnet57b,2654.33,385.773,1024,224,8.95,7.52,38.61
tf_efficientnetv2_b3,2654.14,385.8,1024,300,3.04,15.74,14.36
cspresnet50w,2641.68,387.621,1024,256,5.04,12.19,28.12
res2net50_26w_4s,2629.64,389.395,1024,224,4.28,12.61,25.7
regnetz_b16,2626.71,389.828,1024,288,2.39,16.43,9.72
convnext_nano,2611.78,392.059,1024,288,4.06,13.84,15.59
efficientnetv2_rw_t,2601.49,393.609,1024,288,3.19,16.42,13.65
fbnetv3_g,2595.29,394.549,1024,288,1.77,21.09,16.62
gmixer_24_224,2595.15,394.571,1024,224,5.28,14.45,24.72
mobilevit_s,2586.09,395.952,1024,256,2.03,19.94,5.58
coatnet_rmlp_nano_rw_224,2569.7,398.478,1024,224,2.62,20.34,15.15
gcvit_xxtiny,2561.41,399.768,1024,224,2.14,15.36,12.0
tf_efficientnet_lite3,2530.94,404.582,1024,300,1.65,21.85,8.2
efficientnet_cc_b1_8e,2530.65,404.628,1024,240,0.75,15.44,39.72
densenetblur121d,2522.66,405.908,1024,224,3.11,7.9,8.0
resnetblur50d,2509.45,408.045,1024,224,5.4,12.82,25.58
nf_ecaresnet50,2490.39,411.168,1024,224,4.21,11.13,25.56
inception_v3,2485.21,412.025,1024,299,5.73,8.97,23.83
nf_seresnet50,2482.66,412.449,1024,224,4.21,11.13,28.09
tf_inception_v3,2481.38,412.658,1024,299,5.73,8.97,23.83
gc_efficientnetv2_rw_t,2480.59,412.793,1024,288,3.2,16.45,13.68
adv_inception_v3,2479.41,412.983,1024,299,5.73,8.97,23.83
repvgg_b1g4,2473.34,414.003,1024,224,8.15,10.64,39.97
mobilevitv2_125,2472.28,414.18,1024,256,2.86,20.1,7.48
gluon_inception_v3,2468.42,414.827,1024,299,5.73,8.97,23.83
nf_regnet_b3,2461.52,415.991,1024,320,2.05,14.61,18.59
xcit_small_12_p16_224_dist,2446.89,418.478,1024,224,4.82,12.58,26.25
xcit_small_12_p16_224,2446.42,418.558,1024,224,4.82,12.58,26.25
cspresnext50,2438.96,419.836,1024,256,4.05,15.86,20.57
convnext_nano_ols,2435.0,420.521,1024,288,4.38,15.5,15.65
regnetx_032,2429.42,421.489,1024,224,3.2,11.37,15.3
densenet169,2426.29,422.031,1024,224,3.4,7.3,14.15
sehalonet33ts,2419.4,423.234,1024,256,3.55,14.7,13.69
tf_efficientnet_cc_b1_8e,2406.19,425.557,1024,240,0.75,15.44,39.72
semobilevit_s,2402.02,426.294,1024,256,2.03,19.95,5.74
resnetv2_101,2330.6,439.36,1024,224,7.83,16.23,44.54
twins_pcpvt_small,2312.72,442.754,1024,224,3.83,18.08,24.11
xcit_nano_12_p8_224_dist,2295.5,446.077,1024,224,2.16,15.71,3.05
xcit_nano_12_p8_224,2292.87,446.587,1024,224,2.16,15.71,3.05
gmlp_s16_224,2290.73,447.007,1024,224,4.42,15.1,19.42
cs3darknet_focus_x,2287.2,447.697,1024,256,8.03,10.69,35.02
vit_base_r26_s32_224,2275.25,450.047,1024,224,6.81,12.36,101.38
gluon_resnet101_v1b,2260.37,453.01,1024,224,7.83,16.23,44.55
tv_resnet101,2258.59,453.368,1024,224,7.83,16.23,44.55
resnet101,2258.28,453.43,1024,224,7.83,16.23,44.55
skresnet50,2234.62,458.23,1024,224,4.11,12.5,25.8
ecaresnet26t,2232.29,458.709,1024,320,5.24,16.44,16.01
edgenext_small,2226.69,459.86,1024,320,1.97,14.16,5.59
dla102,2219.96,461.255,1024,224,7.19,14.18,33.27
res2next50,2214.71,462.347,1024,224,4.2,13.71,24.67
dla60_res2next,2210.67,463.194,1024,224,3.49,13.17,17.03
resnetv2_101d,2203.82,464.633,1024,224,8.07,17.04,44.56
gluon_resnet101_v1c,2194.65,466.578,1024,224,8.08,17.04,44.57
resnest26d,2170.04,471.869,1024,224,3.64,9.97,17.07
vgg13,2149.71,476.331,1024,224,11.31,12.25,133.05
gluon_resnet101_v1d,2137.49,479.053,1024,224,8.08,17.04,44.57
skresnet50d,2115.22,484.098,1024,224,4.36,13.31,25.82
convnextv2_pico,2108.5,485.64,1024,288,2.27,10.08,9.07
vit_base_resnet50d_224,2101.17,487.333,1024,224,8.73,16.92,110.97
coatnet_0_rw_224,2082.49,491.706,1024,224,4.43,18.73,27.44
crossvit_small_240,2081.5,491.94,1024,240,5.63,18.17,26.86
deit3_medium_patch16_224_in21ft1k,2076.53,493.118,1024,224,8.0,15.93,38.85
deit3_medium_patch16_224,2072.34,494.116,1024,224,8.0,15.93,38.85
mobilevitv2_150,2071.36,494.349,1024,256,4.09,24.11,10.59
mobilevitv2_150_in22ft1k,2070.3,494.603,1024,256,4.09,24.11,10.59
sebotnet33ts_256,2067.91,247.581,512,256,3.89,17.46,13.7
wide_resnet50_2,2057.08,497.78,1024,224,11.43,14.4,68.88
vit_relpos_medium_patch16_rpn_224,2044.85,500.757,1024,224,7.97,17.02,38.73
efficientformer_l3,2041.79,501.507,1024,224,3.93,12.01,31.41
poolformer_s24,2040.35,501.863,1024,224,3.41,10.68,21.39
vit_relpos_medium_patch16_224,2037.47,502.572,1024,224,7.97,17.02,38.75
cspdarknet53,2035.94,502.949,1024,256,6.57,16.81,27.64
resnet51q,2034.41,503.329,1024,288,8.07,20.94,35.7
vit_srelpos_medium_patch16_224,2033.15,503.638,1024,224,7.96,16.21,38.74
maxvit_rmlp_pico_rw_256,2008.78,509.748,1024,256,1.85,24.86,7.52
vit_relpos_medium_patch16_cls_224,2007.24,510.141,1024,224,8.03,18.24,38.76
dla102x,2006.55,510.315,1024,224,5.89,19.42,26.31
legacy_seresnet101,2003.12,511.188,1024,224,7.61,15.74,49.33
swin_tiny_patch4_window7_224,1995.14,513.235,1024,224,4.51,17.06,28.29
repvgg_b1,1985.42,515.747,1024,224,13.16,10.64,57.42
resnetaa101d,1982.98,516.381,1024,224,9.12,17.56,44.57
coatnet_rmlp_0_rw_224,1981.75,516.703,1024,224,4.72,24.89,27.45
tf_efficientnet_b3,1975.92,518.226,1024,300,1.87,23.83,12.23
gcvit_xtiny,1969.68,519.869,1024,224,2.93,20.26,19.98
hrnet_w18,1967.17,520.531,1024,224,4.32,16.31,21.3
gluon_resnet101_v1s,1965.68,520.926,1024,224,9.19,18.64,44.67
maxvit_pico_rw_256,1965.38,521.006,1024,256,1.83,22.3,7.46
resnetaa50,1958.15,522.93,1024,288,8.52,19.24,25.56
seresnet101,1954.63,523.871,1024,224,7.84,16.27,49.33
efficientnet_b3,1949.54,525.239,1024,320,2.01,26.52,12.23
efficientnet_b3a,1949.11,525.356,1024,320,2.01,26.52,12.23
edgenext_small_rw,1932.68,529.816,1024,320,2.46,14.85,7.83
regnetx_040,1932.62,529.839,1024,224,3.99,12.2,22.12
cs3sedarknet_xdw,1925.4,531.825,1024,256,5.97,17.18,21.6
coatnet_bn_0_rw_224,1920.71,533.123,1024,224,4.67,22.04,27.44
xcit_tiny_12_p16_384_dist,1911.65,535.652,1024,384,3.64,18.26,6.72
ssl_resnext101_32x4d,1910.73,535.909,1024,224,8.01,21.23,44.18
swsl_resnext101_32x4d,1910.43,535.993,1024,224,8.01,21.23,44.18
resnext101_32x4d,1909.99,536.115,1024,224,8.01,21.23,44.18
gluon_resnext101_32x4d,1909.34,536.298,1024,224,8.01,21.23,44.18
darknet53,1903.77,537.866,1024,288,11.78,15.68,41.61
darknetaa53,1898.12,539.468,1024,288,10.08,15.68,36.02
crossvit_15_240,1892.46,541.083,1024,240,5.81,19.77,27.53
halonet50ts,1881.53,544.226,1024,256,5.3,19.2,22.73
vgg13_bn,1879.72,544.749,1024,224,11.33,12.25,133.05
mixnet_xl,1872.46,546.86,1024,224,0.93,14.57,11.9
res2net50_26w_6s,1870.88,547.321,1024,224,6.33,15.28,37.05
ecaresnet101d,1869.88,547.616,1024,224,8.08,17.07,44.57
densenet201,1869.57,547.706,1024,224,4.34,7.85,20.01
nf_resnet101,1858.48,550.976,1024,224,8.01,16.23,44.55
coatnet_0_224,1857.28,275.661,512,224,4.58,24.01,25.04
pvt_v2_b2,1854.85,552.053,1024,224,4.05,27.53,25.36
crossvit_15_dagger_240,1850.69,553.295,1024,240,6.13,20.43,28.21
resmlp_36_224,1846.41,554.574,1024,224,8.91,16.33,44.69
resmlp_36_distilled_224,1845.04,554.99,1024,224,8.91,16.33,44.69
resnet61q,1841.84,555.954,1024,288,9.87,21.52,36.85
swin_s3_tiny_224,1817.5,563.398,1024,224,4.64,19.13,28.33
cait_xxs24_224,1796.55,569.968,1024,224,2.53,20.29,11.96
cs3darknet_x,1789.33,572.268,1024,288,10.6,14.36,35.05
vit_medium_patch16_gap_240,1785.54,573.481,1024,240,9.22,18.81,44.4
nf_resnet50,1784.84,573.708,1024,288,6.88,18.37,25.56
resnet50_gn,1764.31,580.385,1024,224,4.14,11.11,25.56
mixer_b16_224_miil,1761.45,581.327,1024,224,12.62,14.53,59.88
mixer_b16_224,1759.76,581.885,1024,224,12.62,14.53,59.88
resnetblur101d,1757.96,582.482,1024,224,9.12,17.94,44.57
eca_nfnet_l0,1726.58,593.068,1024,288,7.12,17.29,24.14
nfnet_l0,1721.83,594.705,1024,288,7.13,17.29,35.07
vit_large_patch32_224,1717.59,596.169,1024,224,15.41,13.32,327.9
vgg16,1717.44,596.224,1024,224,15.47,13.56,138.36
regnetz_c16,1710.89,598.505,1024,320,3.92,25.88,13.46
pvt_v2_b2_li,1709.89,598.855,1024,224,3.91,27.6,22.55
resnest50d_1s4x24d,1705.52,600.391,1024,224,4.43,13.57,25.68
coat_lite_small,1704.55,600.733,1024,224,3.96,22.09,19.84
resnetv2_50d_frn,1697.1,603.368,1024,224,4.33,11.92,25.59
cs3sedarknet_x,1689.8,605.975,1024,288,10.6,14.37,35.4
seresnext101_32x4d,1687.65,606.747,1024,224,8.02,21.26,48.96
gluon_seresnext101_32x4d,1687.1,606.945,1024,224,8.02,21.26,48.96
legacy_seresnext101_32x4d,1684.69,607.813,1024,224,8.02,21.26,48.96
regnetv_040,1682.92,608.454,1024,288,6.6,20.3,20.64
mobilevitv2_175,1677.66,457.769,768,256,5.54,28.13,14.25
regnety_040,1677.03,610.59,1024,288,6.61,20.3,20.65
mobilevitv2_175_in22ft1k,1677.0,457.949,768,256,5.54,28.13,14.25
convnext_tiny_hnf,1676.16,610.908,1024,288,7.39,22.21,28.59
res2net101_26w_4s,1675.37,611.195,1024,224,8.1,18.45,45.21
vit_tiny_patch16_384,1665.76,614.72,1024,384,4.7,25.39,5.79
sequencer2d_s,1661.32,616.362,1024,224,4.96,11.31,27.65
ese_vovnet39b_evos,1661.21,616.404,1024,224,7.07,6.74,24.58
vit_base_patch32_384,1649.27,620.868,1024,384,13.06,16.5,88.3
vit_base_patch32_clip_384,1648.64,621.105,1024,384,13.06,16.5,88.3
mixer_l32_224,1645.23,622.393,1024,224,11.27,19.86,206.94
convnext_tiny,1642.14,623.562,1024,288,7.39,22.21,28.59
botnet50ts_256,1639.64,312.25,512,256,5.54,22.23,22.74
swinv2_cr_tiny_224,1630.02,628.199,1024,224,4.66,28.45,28.33
resnetv2_50d_evob,1627.44,629.196,1024,224,4.33,11.92,25.59
twins_pcpvt_base,1615.12,633.996,1024,224,6.68,25.25,43.83
resnetv2_152,1614.43,634.268,1024,224,11.55,22.56,60.19
hrnet_w32,1605.06,637.96,1024,224,8.97,22.02,41.23
swinv2_cr_tiny_ns_224,1600.43,639.811,1024,224,4.66,28.45,28.33
xception41p,1598.79,480.351,768,299,9.25,39.86,26.91
tv_resnet152,1582.54,647.049,1024,224,11.56,22.56,60.19
gluon_resnet152_v1b,1581.57,647.444,1024,224,11.56,22.56,60.19
resnet152,1581.02,647.671,1024,224,11.56,22.56,60.19
xception,1579.88,648.138,1024,299,8.4,35.83,22.86
halo2botnet50ts_256,1572.75,651.076,1024,256,5.02,21.78,22.64
res2net50_26w_8s,1568.85,652.695,1024,224,8.37,17.95,48.4
vit_medium_patch16_gap_256,1564.22,654.626,1024,256,10.59,22.15,38.86
resnetv2_152d,1557.03,657.648,1024,224,11.8,23.36,60.2
efficientnet_el_pruned,1555.14,658.449,1024,300,8.0,30.7,10.59
maxvit_rmlp_nano_rw_256,1551.85,659.845,1024,256,4.47,31.92,15.5
regnetx_064,1550.52,660.413,1024,224,6.49,16.37,26.21
efficientnet_el,1549.97,660.646,1024,300,8.0,30.7,10.59
gluon_resnet152_v1c,1548.96,661.078,1024,224,11.8,23.36,60.21
nf_ecaresnet101,1546.58,662.091,1024,224,8.01,16.27,44.55
nf_seresnet101,1539.38,665.191,1024,224,8.02,16.27,49.33
mvitv2_tiny,1537.54,665.985,1024,224,4.7,21.16,24.17
nfnet_f0,1525.01,671.456,1024,256,12.62,18.05,71.49
vgg16_bn,1523.86,671.963,1024,224,15.5,13.56,138.37
cs3edgenet_x,1521.21,673.136,1024,288,14.59,16.36,47.82
gluon_resnet152_v1d,1520.11,673.621,1024,224,11.8,23.36,60.21
maxvit_nano_rw_256,1517.43,674.812,1024,256,4.46,30.28,15.45
tf_efficientnet_el,1506.16,679.862,1024,300,8.0,30.7,10.59
convnextv2_nano,1500.71,511.746,768,288,4.06,13.84,15.62
resnest50d,1492.63,686.022,1024,224,5.4,14.36,27.48
ese_vovnet99b,1489.17,687.617,1024,224,16.51,11.27,63.2
dla169,1471.11,696.059,1024,224,11.6,20.2,53.39
regnety_032,1467.85,697.604,1024,288,5.29,18.61,19.44
skresnext50_32x4d,1463.28,699.785,1024,224,4.5,17.18,27.48
xcit_tiny_12_p8_224_dist,1458.7,701.981,1024,224,4.81,23.6,6.71
xcit_tiny_12_p8_224,1458.23,702.211,1024,224,4.81,23.6,6.71
convit_small,1457.54,702.541,1024,224,5.76,17.87,27.78
mobilevitv2_200_in22ft1k,1456.59,527.247,768,256,7.22,32.15,18.45
mobilevitv2_200,1456.02,527.451,768,256,7.22,32.15,18.45
ecaresnet50t,1438.32,711.929,1024,320,8.82,24.13,25.57
gluon_resnet152_v1s,1432.22,714.961,1024,224,12.92,24.96,60.32
nest_tiny,1415.33,542.618,768,224,5.83,25.48,17.06
regnety_040s_gn,1412.65,724.867,1024,224,4.03,12.29,20.65
vgg19,1393.71,183.67,256,224,19.63,14.86,143.67
jx_nest_tiny,1389.62,552.657,768,224,5.83,25.48,17.06
legacy_seresnet152,1383.83,739.96,1024,224,11.33,22.08,66.82
densenet161,1376.52,743.891,1024,224,7.79,11.06,28.68
poolformer_s36,1370.67,747.069,1024,224,5.0,15.82,30.86
vit_small_resnet50d_s16_224,1367.59,748.748,1024,224,13.48,24.82,57.53
twins_svt_base,1362.65,751.463,1024,224,8.59,26.33,56.07
seresnet152,1361.7,751.99,1024,224,11.57,22.61,66.82
xception41,1356.44,566.173,768,299,9.28,39.86,26.97
maxvit_tiny_rw_224,1350.45,758.254,1024,224,5.11,33.11,29.06
crossvit_18_240,1348.85,759.154,1024,240,9.05,26.26,43.27
maxxvit_rmlp_nano_rw_256,1347.73,759.767,1024,256,4.37,26.05,16.78
efficientnet_lite4,1343.74,571.528,768,380,4.04,45.66,13.01
gcvit_tiny,1339.65,764.364,1024,224,4.79,29.82,28.22
pvt_v2_b3,1325.92,772.282,1024,224,6.92,37.7,45.24
crossvit_18_dagger_240,1313.78,779.419,1024,240,9.5,27.03,44.27
volo_d1_224,1312.37,780.255,1024,224,6.94,24.43,26.63
xcit_small_24_p16_224_dist,1307.3,783.278,1024,224,9.1,23.64,47.67
tresnet_m,1305.71,784.234,1024,224,5.74,7.31,31.39
inception_v4,1305.41,784.412,1024,299,12.28,15.09,42.68
repvgg_b2,1305.22,784.529,1024,224,20.45,12.9,89.02
xcit_small_24_p16_224,1303.71,785.433,1024,224,9.1,23.64,47.67
sequencer2d_m,1295.72,790.281,1024,224,6.55,14.26,38.31
edgenext_base,1283.77,797.633,1024,320,6.01,24.32,18.51
hrnet_w30,1280.53,799.653,1024,224,8.15,21.21,37.71
dm_nfnet_f0,1275.46,802.834,1024,256,12.62,18.05,71.49
coatnet_rmlp_1_rw_224,1268.37,807.322,1024,224,7.85,35.47,41.69
maxxvitv2_nano_rw_256,1259.7,812.877,1024,256,6.26,23.05,23.7
efficientnetv2_s,1254.49,816.255,1024,384,8.44,35.77,21.46
vgg19_bn,1246.52,205.36,256,224,19.66,14.86,143.68
nf_regnet_b4,1235.79,828.604,1024,384,4.7,28.61,30.21
swin_small_patch4_window7_224,1235.74,828.641,1024,224,8.77,27.47,49.61
tf_efficientnet_lite4,1232.22,623.25,768,380,4.04,45.66,13.01
regnetz_d32,1223.51,836.919,1024,320,9.33,37.08,27.58
mixnet_xxl,1219.27,629.871,768,224,2.04,23.43,23.96
tf_efficientnetv2_s,1219.16,839.906,1024,384,8.44,35.77,21.46
deit_base_patch16_224,1213.08,844.121,1024,224,17.58,23.9,86.57
deit_base_distilled_patch16_224,1212.98,844.19,1024,224,17.68,24.05,87.34
vit_base_patch16_clip_224,1211.82,844.996,1024,224,17.58,23.9,86.57
vit_base_patch16_224_miil,1211.26,845.389,1024,224,17.59,23.91,94.4
dpn92,1210.45,845.948,1024,224,6.54,18.21,37.67
vit_base_patch16_224,1210.28,846.074,1024,224,17.58,23.9,86.57
coatnet_rmlp_1_rw2_224,1208.65,847.215,1024,224,8.11,40.13,41.72
cait_xxs36_224,1205.51,849.419,1024,224,3.77,30.34,17.3
maxvit_tiny_tf_224,1200.3,639.828,768,224,5.6,35.78,30.92
swinv2_tiny_window8_256,1200.06,853.274,1024,256,5.96,24.57,28.35
efficientnetv2_rw_s,1199.87,853.413,1024,384,8.72,38.03,23.94
dla102x2,1198.52,854.374,1024,224,9.34,29.91,41.28
regnetx_160,1195.08,856.833,1024,224,15.99,25.52,54.28
dpn98,1183.92,864.908,1024,224,11.73,25.2,61.57
vit_base_patch16_rpn_224,1180.39,867.498,1024,224,17.49,23.75,86.54
twins_pcpvt_large,1168.64,876.22,1024,224,9.84,35.82,60.99
deit3_base_patch16_224,1164.77,879.134,1024,224,17.58,23.9,86.59
deit3_base_patch16_224_in21ft1k,1164.5,879.334,1024,224,17.58,23.9,86.59
regnetz_d8,1163.64,879.982,1024,320,6.19,37.08,23.37
swsl_resnext101_32x8d,1158.15,884.156,1024,224,16.48,31.21,88.79
resnext101_32x8d,1158.05,884.232,1024,224,16.48,31.21,88.79
ssl_resnext101_32x8d,1158.02,884.255,1024,224,16.48,31.21,88.79
wide_resnet101_2,1157.66,884.531,1024,224,22.8,21.23,126.89
ig_resnext101_32x8d,1157.3,884.8,1024,224,16.48,31.21,88.79
coatnet_1_rw_224,1155.72,886.014,1024,224,8.04,34.6,41.72
vit_base_patch16_gap_224,1154.73,886.777,1024,224,17.49,25.59,86.57
vit_base_patch32_clip_448,1154.21,887.173,1024,448,17.93,23.9,88.34
resnet200,1149.71,890.646,1024,224,15.07,32.19,64.67
mvitv2_small,1146.92,892.812,1024,224,7.0,28.08,34.87
xception65p,1145.07,670.686,768,299,13.91,52.48,39.82
cs3se_edgenet_x,1143.17,895.738,1024,320,18.01,20.21,50.72
vit_relpos_base_patch16_rpn_224,1143.15,895.76,1024,224,17.51,24.97,86.41
vit_relpos_base_patch16_224,1141.31,897.204,1024,224,17.51,24.97,86.43
tnt_s_patch16_224,1135.32,901.935,1024,224,5.24,24.37,23.76
resnetrs101,1134.67,902.454,1024,288,13.56,28.53,63.62
vit_relpos_base_patch16_clsgap_224,1128.94,907.03,1024,224,17.6,25.12,86.43
vit_relpos_base_patch16_cls_224,1126.78,908.771,1024,224,17.6,25.12,86.43
inception_resnet_v2,1126.73,908.809,1024,299,13.18,25.06,55.84
ens_adv_inception_resnet_v2,1125.41,909.877,1024,299,13.18,25.06,55.84
beit_base_patch16_224,1112.26,920.631,1024,224,17.58,23.9,86.53
coat_tiny,1108.72,923.572,1024,224,4.35,27.2,5.5
beitv2_base_patch16_224,1108.55,923.711,1024,224,17.58,23.9,86.53
mvitv2_small_cls,1101.66,929.491,1024,224,7.04,28.17,34.87
resnetv2_50d_gn,1092.35,937.413,1024,288,7.24,19.7,25.57
pit_b_distilled_224,1078.48,474.731,512,224,12.5,33.07,74.79
pit_b_224,1075.34,476.117,512,224,12.42,32.94,73.76
hrnet_w40,1059.78,966.217,1024,224,12.75,25.29,57.56
coatnet_1_224,1045.17,489.859,512,224,8.7,39.0,42.23
resnet101d,1039.88,984.712,1024,320,16.48,34.77,44.57
flexivit_base,1037.21,987.248,1024,240,20.29,28.36,86.59
gluon_resnext101_64x4d,1034.86,989.491,1024,224,15.52,31.21,83.46
vit_small_patch16_36x1_224,1033.13,991.146,1024,224,13.71,35.69,64.67
vit_large_r50_s32_224,1030.67,993.517,1024,224,19.58,24.41,328.99
maxvit_rmlp_tiny_rw_256,1029.25,746.162,768,256,6.77,46.92,29.15
xcit_tiny_24_p16_384_dist,1027.64,996.444,1024,384,6.87,34.29,12.12
efficientnet_b4,1014.08,504.879,512,384,4.51,50.04,19.34
maxvit_tiny_rw_256,1008.0,1015.861,1024,256,6.74,44.35,29.07
vit_small_patch16_18x2_224,1006.7,1017.169,1024,224,13.71,35.69,64.67
swinv2_cr_small_224,1005.28,1018.603,1024,224,9.07,50.27,49.7
regnetx_080,1004.51,1019.384,1024,224,8.02,14.06,39.57
repvgg_b3,994.23,1029.925,1024,224,29.16,15.1,123.09
swinv2_cr_small_ns_224,993.75,1030.424,1024,224,9.08,50.27,49.7
repvgg_b2g4,988.97,1035.405,1024,224,12.63,12.9,61.76
convnext_small,988.3,1036.113,1024,288,14.39,35.65,50.22
gluon_xception65,987.82,777.458,768,299,13.96,52.48,39.92
vit_small_r26_s32_384,982.68,1042.031,1024,384,10.43,29.85,36.47
xception65,978.83,784.597,768,299,13.96,52.48,39.92
regnetz_040,975.77,787.056,768,320,6.35,37.78,27.12
regnetz_040h,971.51,790.512,768,320,6.43,37.94,28.94
gluon_seresnext101_64x4d,965.3,1060.794,1024,224,15.53,31.25,88.23
maxvit_tiny_pm_256,964.03,1062.189,1024,256,6.61,47.9,30.09
efficientformer_l7,962.55,1063.825,1024,224,10.17,24.45,82.23
twins_svt_large,962.19,1064.229,1024,224,15.15,35.1,99.27
tf_efficientnet_b4,957.62,534.646,512,380,4.49,49.49,19.34
pvt_v2_b4,957.38,1069.569,1024,224,10.14,53.74,62.56
poolformer_m36,954.91,1072.334,1024,224,8.8,22.02,56.17
cait_s24_224,954.44,1072.866,1024,224,9.35,40.58,46.92
regnetz_b16_evos,950.47,808.013,768,288,2.36,16.43,9.74
resnest50d_4s2x40d,938.07,1091.586,1024,224,4.4,17.94,30.42
hrnet_w48,936.07,1093.917,1024,224,17.34,28.56,77.47
gmlp_b16_224,930.95,1099.935,1024,224,15.78,30.21,73.08
convnextv2_tiny,930.82,550.041,512,288,7.39,22.21,28.64
convnextv2_small,928.68,1102.629,1024,224,8.71,21.56,50.32
maxxvit_rmlp_tiny_rw_256,918.72,1114.583,1024,256,6.66,39.76,29.64
mobilevitv2_150_384_in22ft1k,915.49,419.435,384,384,9.2,54.25,10.59
pvt_v2_b5,909.79,1125.516,1024,224,11.76,50.92,81.96
nest_small,903.21,850.284,768,224,10.35,40.04,38.35
swin_s3_small_224,899.98,853.339,768,224,9.43,37.84,49.74
xcit_medium_24_p16_224_dist,898.61,1139.525,1024,224,16.13,31.71,84.4
xcit_medium_24_p16_224,898.6,1139.542,1024,224,16.13,31.71,84.4
jx_nest_small,892.03,860.939,768,224,10.35,40.04,38.35
coat_mini,880.8,1162.569,1024,224,6.82,33.68,10.34
swin_base_patch4_window7_224,875.38,1169.764,1024,224,15.47,36.63,87.77
dpn131,865.2,1183.527,1024,224,16.09,32.97,79.25
resnetv2_50d_evos,854.82,1197.895,1024,288,7.15,19.7,25.59
xcit_small_12_p16_384_dist,853.54,1199.694,1024,384,14.14,36.51,26.25
sequencer2d_l,839.78,1219.347,1024,224,9.74,22.12,54.3
crossvit_base_240,839.43,914.892,768,240,21.22,36.33,105.03
hrnet_w44,821.37,1246.671,1024,224,14.94,26.92,67.06
eca_nfnet_l1,818.87,1250.489,1024,320,14.92,34.42,41.41
vit_base_r50_s16_224,817.55,1252.502,1024,224,21.67,35.31,114.69
maxvit_rmlp_small_rw_224,816.34,1254.368,1024,224,10.75,49.3,64.9
gcvit_small,815.24,1256.055,1024,224,8.57,41.61,51.09
regnety_080,811.28,1262.191,1024,288,13.22,29.69,39.18
densenet264,804.85,1272.268,1024,224,12.95,12.8,72.69
mvitv2_base,804.14,1273.395,1024,224,10.16,40.5,51.47
repvgg_b3g4,802.85,1275.443,1024,224,17.89,15.1,83.83
vit_base_patch16_plus_240,782.25,1309.022,1024,240,27.41,33.08,117.56
swinv2_tiny_window16_256,781.61,655.045,512,256,6.68,39.02,28.35
maxvit_small_tf_224,777.04,658.899,512,224,11.66,53.17,68.93
xcit_tiny_24_p8_224,771.1,1327.958,1024,224,9.21,45.39,12.11
xcit_tiny_24_p8_224_dist,770.21,1329.496,1024,224,9.21,45.39,12.11
coatnet_2_rw_224,763.52,670.562,512,224,15.09,49.22,73.87
vit_relpos_base_patch16_plus_240,763.4,1341.361,1024,240,27.3,34.33,117.38
efficientnet_b3_gn,763.0,671.023,512,320,2.14,28.83,11.73
coatnet_rmlp_2_rw_224,759.73,673.906,512,224,15.18,54.78,73.88
vit_small_patch16_384,753.82,1018.79,768,384,15.52,50.78,22.2
hrnet_w64,750.36,1364.663,1024,224,28.97,35.09,128.06
xception71,749.7,1024.396,768,299,18.09,69.92,42.34
resnet152d,742.37,1379.356,1024,320,24.08,47.67,60.21
swinv2_small_window8_256,741.95,1380.134,1024,256,11.58,40.14,49.73
mobilevitv2_175_384_in22ft1k,739.09,519.544,384,384,12.47,63.29,14.25
ecaresnet200d,736.17,1390.959,1024,256,20.0,43.15,64.69
seresnet200d,733.28,1396.444,1024,256,20.01,43.15,71.86
swin_s3_base_224,733.27,1396.459,1024,224,13.69,48.26,71.13
convit_base,731.09,1400.636,1024,224,17.52,31.77,86.54
resnest101e,726.65,1409.184,1024,256,13.38,28.66,48.28
deit3_small_patch16_384,726.49,1057.125,768,384,15.52,50.78,22.21
deit3_small_patch16_384_in21ft1k,726.32,1057.368,768,384,15.52,50.78,22.21
volo_d2_224,722.61,1417.079,1024,224,14.34,41.34,58.68
tnt_b_patch16_224,721.24,1419.762,1024,224,14.09,39.01,65.41
xcit_nano_12_p8_384_dist,720.41,1421.4,1024,384,6.34,46.08,3.05
swinv2_cr_base_224,719.23,1423.721,1024,224,15.86,59.66,87.88
poolformer_m48,719.07,1424.046,1024,224,11.59,29.17,73.47
coatnet_2_224,715.36,715.711,512,224,16.5,52.67,74.68
swinv2_cr_base_ns_224,712.96,1436.239,1024,224,15.86,59.66,87.88
dpn107,691.0,1481.897,1024,224,18.38,33.46,86.92
convnext_base,687.14,1490.219,1024,288,25.43,47.53,88.59
resnetv2_50x1_bitm,684.31,374.087,256,448,16.62,44.46,25.55
efficientnet_b3_g8_gn,664.63,770.341,512,320,3.2,28.83,14.25
regnety_064,657.71,1556.911,1024,288,10.56,27.11,30.58
regnetv_064,652.6,1569.096,1024,288,10.55,27.11,30.58
xcit_small_12_p8_224,651.3,1572.214,1024,224,18.69,47.21,26.21
xcit_small_12_p8_224_dist,651.08,1572.755,1024,224,18.69,47.21,26.21
resnetrs152,649.95,1575.501,1024,320,24.34,48.14,86.62
mobilevitv2_200_384_in22ft1k,647.42,395.4,256,384,16.24,72.34,18.45
seresnet152d,645.69,1585.88,1024,320,24.09,47.72,66.84
tresnet_l,644.38,1589.105,1024,224,10.88,11.9,55.99
tresnet_v2_l,642.3,1594.246,1024,224,8.81,16.34,46.17
nest_base,640.98,798.76,512,224,17.96,53.39,67.72
regnetx_120,640.37,1599.07,1024,224,12.13,21.37,46.11
seresnext101_32x8d,639.53,1601.159,1024,288,27.24,51.63,93.57
regnetz_e8,639.43,1601.423,1024,320,15.46,63.94,57.7
ese_vovnet99b_iabn,636.1,1609.798,1024,224,16.49,11.27,63.2
jx_nest_base,634.61,806.787,512,224,17.96,53.39,67.72
regnety_120,625.75,1636.422,1024,224,12.14,21.38,51.82
efficientnetv2_m,624.53,1639.618,1024,416,18.6,67.5,54.14
seresnext101d_32x8d,621.55,1647.466,1024,288,27.64,52.95,93.59
resnext101_64x4d,619.77,1652.21,1024,288,25.66,51.59,83.46
swsl_resnext101_32x16d,612.21,1672.624,1024,224,36.27,51.18,194.03
ig_resnext101_32x16d,611.98,1673.243,1024,224,36.27,51.18,194.03
maxvit_rmlp_small_rw_256,611.67,1255.571,768,256,14.15,66.09,64.9
ssl_resnext101_32x16d,611.31,1675.063,1024,224,36.27,51.18,194.03
regnety_320,605.31,1691.684,1024,224,32.34,30.26,145.05
gcvit_base,602.42,1699.782,1024,224,14.87,55.48,90.32
regnetz_c16_evos,596.93,857.706,512,320,3.86,25.88,13.49
maxxvit_rmlp_small_rw_256,590.18,1735.046,1024,256,14.67,58.38,66.01
legacy_senet154,585.86,1747.854,1024,224,20.77,38.69,115.09
senet154,585.53,1748.836,1024,224,20.77,38.69,115.09
seresnextaa101d_32x8d,585.08,1750.175,1024,288,28.51,56.44,93.59
gluon_senet154,584.86,1750.843,1024,224,20.77,38.69,115.09
convmixer_768_32,581.95,1759.577,1024,224,19.55,25.95,21.11
seresnet269d,574.5,1782.4,1024,256,26.59,53.6,113.67
nf_regnet_b5,565.36,905.602,512,456,11.7,61.95,49.74
mixer_l16_224,553.66,1849.49,1024,224,44.6,41.69,208.2
resnet200d,545.14,1878.401,1024,320,31.25,67.33,64.69
nfnet_f1,544.28,1881.353,1024,320,35.97,46.77,132.63
vit_large_patch32_384,543.45,1884.237,1024,384,45.31,43.86,306.63
efficientnetv2_rw_m,543.37,1884.512,1024,416,21.49,79.62,53.24
vit_medium_patch16_gap_384,539.24,949.475,512,384,26.08,67.54,39.03
efficientnet_b5,533.21,960.212,512,448,9.59,93.56,30.39
swinv2_base_window8_256,531.81,1925.495,1024,256,20.37,52.59,87.92
maxxvitv2_rmlp_base_rw_224,525.72,1947.791,1024,224,24.2,62.77,116.09
xcit_large_24_p16_224_dist,509.19,2011.039,1024,224,35.86,47.27,189.1
xcit_large_24_p16_224,509.15,2011.169,1024,224,35.86,47.27,189.1
swin_large_patch4_window7_224,504.4,1522.593,768,224,34.53,54.94,196.53
halonet_h1,503.39,508.543,256,256,3.0,51.17,8.1
volo_d3_224,502.58,2037.467,1024,224,20.78,60.09,86.33
swinv2_small_window16_256,488.97,1047.084,512,256,12.82,66.29,49.73
tresnet_xl,481.58,2126.301,1024,224,15.17,15.34,78.44
vit_small_patch8_224,479.11,1068.641,512,224,22.44,80.84,21.67
tf_efficientnet_b5,476.47,805.919,384,456,10.46,98.86,30.39
maxvit_rmlp_base_rw_224,472.06,2169.196,1024,224,23.15,92.64,116.14
resnetrs200,471.68,2170.964,1024,320,31.51,67.81,93.21
xcit_tiny_12_p8_384_dist,471.45,2172.002,1024,384,14.13,69.14,6.71
dm_nfnet_f1,461.24,2220.087,1024,320,35.97,46.77,132.63
tf_efficientnetv2_m,458.93,1673.426,768,480,24.76,89.84,54.14
xcit_small_24_p16_384_dist,457.16,2239.891,1024,384,26.72,68.58,47.67
coatnet_rmlp_3_rw_224,439.5,582.463,256,224,33.56,79.47,165.15
maxvit_base_tf_224,430.05,1190.542,512,224,24.04,95.01,119.47
swinv2_cr_large_224,423.86,1811.887,768,224,35.1,78.42,196.68
resnetv2_152x2_bit_teacher,423.36,2418.743,1024,224,46.95,45.11,236.34
swinv2_cr_tiny_384,423.1,907.565,384,384,15.34,161.01,28.33
coatnet_3_rw_224,421.95,606.701,256,224,33.44,73.83,181.81
resnetv2_101x1_bitm,419.35,610.453,256,448,31.65,64.93,44.54
coatnet_3_224,405.07,631.982,256,224,36.56,79.01,166.97
convnextv2_base,403.59,1268.593,512,288,25.43,47.53,88.72
eca_nfnet_l2,401.73,2548.946,1024,384,30.05,68.28,56.72
regnetz_d8_evos,394.39,1947.294,768,320,7.03,38.92,23.46
convmixer_1024_20_ks9_p14,393.5,2602.254,1024,224,5.55,5.51,24.38
eva_large_patch14_196,392.3,2610.234,1024,196,61.57,63.52,304.14
crossvit_15_dagger_408,390.72,655.182,256,408,21.45,95.05,28.5
vit_large_patch16_224,390.66,2621.182,1024,224,61.6,63.52,304.33
vit_base_patch16_18x2_224,384.38,2663.987,1024,224,52.51,71.38,256.73
deit3_large_patch16_224_in21ft1k,377.58,2711.976,1024,224,61.6,63.52,304.37
deit3_large_patch16_224,377.53,2712.348,1024,224,61.6,63.52,304.37
convnext_large,373.02,2058.836,768,288,56.87,71.29,197.77
beit_large_patch16_224,360.62,2839.572,1024,224,61.6,63.52,304.43
beitv2_large_patch16_224,360.58,2839.86,1024,224,61.6,63.52,304.43
swinv2_base_window12to16_192to256_22kft1k,360.56,1065.006,384,256,22.02,84.71,87.92
swinv2_base_window16_256,360.23,1065.959,384,256,22.02,84.71,87.92
regnety_160,353.5,2172.566,768,288,26.37,38.07,83.59
nasnetalarge,345.63,1111.004,384,331,23.89,90.56,88.75
maxvit_tiny_tf_384,344.01,744.157,256,384,17.53,123.42,30.98
xcit_small_24_p8_224,342.37,2990.915,1024,224,35.81,90.78,47.63
xcit_small_24_p8_224_dist,342.26,2991.817,1024,224,35.81,90.78,47.63
flexivit_large,335.35,3053.52,1024,240,70.99,75.39,304.36
maxxvitv2_rmlp_large_rw_224,332.33,3081.271,1024,224,44.14,87.15,215.42
vit_large_r50_s32_384,329.8,3104.921,1024,384,57.43,76.52,329.09
pnasnet5large,328.89,1167.534,384,331,25.04,92.89,86.06
tresnet_m_448,325.8,3143.01,1024,448,22.94,29.21,31.39
volo_d1_384,323.04,1584.906,512,384,22.75,108.55,26.78
volo_d4_224,318.96,3210.439,1024,224,44.34,80.22,192.96
xcit_medium_24_p16_384_dist,312.74,3274.268,1024,384,47.39,91.64,84.4
nfnet_f2,310.6,3296.869,1024,352,63.22,79.06,193.78
vit_base_patch16_384,307.09,1250.42,384,384,55.54,101.56,86.86
deit_base_patch16_384,306.8,1251.599,384,384,55.54,101.56,86.86
vit_base_patch16_clip_384,306.29,1253.685,384,384,55.54,101.56,86.86
deit_base_distilled_patch16_384,305.48,1257.017,384,384,55.65,101.82,87.63
ecaresnet269d,305.06,3356.684,1024,352,50.25,101.25,102.09
maxvit_large_tf_224,301.43,1273.908,384,224,43.68,127.35,211.79
deit3_base_patch16_384_in21ft1k,298.01,1288.526,384,384,55.54,101.56,86.88
deit3_base_patch16_384,297.88,1289.093,384,384,55.54,101.56,86.88
resnetrs270,296.97,3448.186,1024,352,51.13,105.48,129.86
regnetx_320,289.44,2653.413,768,224,31.81,36.3,107.81
efficientnet_b6,287.31,890.997,256,528,19.4,167.39,43.04
vit_large_patch14_224,286.23,3577.501,1024,224,81.08,88.79,304.2
vit_large_patch14_clip_224,285.99,3580.5,1024,224,81.08,88.79,304.2
crossvit_18_dagger_408,285.18,673.248,192,408,32.47,124.87,44.61
cait_xxs24_384,281.48,3637.936,1024,384,9.63,122.66,12.03
ig_resnext101_32x32d,275.12,1860.956,512,224,87.29,91.12,468.53
tf_efficientnet_b6,274.07,700.545,192,528,19.4,167.39,43.04
dm_nfnet_f2,264.79,2900.408,768,352,63.22,79.06,193.78
beit_base_patch16_384,261.27,1469.733,384,384,55.54,101.56,86.74
efficientnetv2_l,260.33,1966.694,512,480,56.4,157.99,118.52
swinv2_cr_small_384,259.75,985.56,256,384,29.7,298.03,49.7
tf_efficientnetv2_l,257.29,1989.923,512,480,56.4,157.99,118.52
resnest200e,254.36,1006.453,256,320,35.69,82.78,70.2
mvitv2_large,249.99,2048.061,512,224,43.87,112.02,217.99
xcit_tiny_24_p8_384_dist,248.25,4124.916,1024,384,27.05,132.95,12.11
convnext_xlarge,242.63,2110.182,512,288,100.8,95.05,350.2
resmlp_big_24_224_in22ft1k,241.9,4233.056,1024,224,100.23,87.31,129.14
resmlp_big_24_224,241.74,4235.988,1024,224,100.23,87.31,129.14
resmlp_big_24_distilled_224,241.44,4241.249,1024,224,100.23,87.31,129.14
convnextv2_large,239.52,1068.782,256,288,56.87,71.29,197.96
coatnet_4_224,238.62,1072.827,256,224,62.48,129.26,275.43
swin_base_patch4_window12_384,236.12,813.144,192,384,47.19,134.78,87.9
xcit_medium_24_p8_224_dist,233.5,3289.007,768,224,63.53,121.23,84.32
xcit_medium_24_p8_224,233.5,3289.104,768,224,63.53,121.23,84.32
eca_nfnet_l3,229.87,2227.284,512,448,52.55,118.4,72.04
vit_base_r50_s16_384,226.32,1696.687,384,384,67.43,135.03,98.95
maxvit_small_tf_384,224.01,857.105,192,384,35.87,183.65,69.02
xcit_small_12_p8_384_dist,221.54,1733.28,384,384,54.92,138.29,26.21
swinv2_large_window12to16_192to256_22kft1k,220.1,1163.101,256,256,47.81,121.53,196.74
volo_d5_224,210.88,4855.76,1024,224,72.4,118.11,295.46
vit_base_patch8_224,199.67,1282.079,256,224,78.22,161.69,86.58
cait_xs24_384,197.64,3885.811,768,384,19.28,183.98,26.67
resnetrs350,196.19,5219.377,1024,384,77.59,154.74,163.96
cait_xxs36_384,188.27,5439.03,1024,384,14.35,183.7,17.37
swinv2_cr_base_384,185.68,1378.725,256,384,50.57,333.68,87.88
coatnet_rmlp_2_rw_384,184.84,1038.746,192,384,47.69,209.43,73.88
swinv2_cr_huge_224,184.09,2085.934,384,224,115.97,121.08,657.83
convnext_xxlarge,183.68,2787.486,512,224,151.66,95.29,846.47
volo_d2_384,180.56,2126.753,384,384,46.17,184.51,58.87
xcit_large_24_p16_384_dist,176.39,5805.281,1024,384,105.35,137.17,189.1
regnety_640,174.81,4393.396,768,224,64.16,42.5,281.38
maxvit_xlarge_tf_224,171.63,1491.6,256,224,97.49,191.02,474.95
nfnet_f3,170.11,4514.791,768,416,115.58,141.78,254.92
densenet264d_iabn,167.13,6126.84,1024,224,13.47,14.0,72.74
efficientnet_b7,166.38,1153.975,192,600,38.33,289.94,66.35
maxvit_tiny_tf_512,163.72,781.809,128,512,33.49,257.59,31.05
efficientnetv2_xl,162.7,3146.865,512,512,93.85,247.32,208.12
tf_efficientnetv2_xl,161.32,3173.821,512,512,93.85,247.32,208.12
tf_efficientnet_b7,160.43,1196.798,192,600,38.33,289.94,66.35
resnetv2_152x2_bit_teacher_384,159.54,1604.579,256,384,136.16,132.56,236.34
tresnet_l_448,154.66,6620.743,1024,448,43.5,47.56,55.99
vit_huge_patch14_224,154.27,6637.58,1024,224,167.43,139.43,658.75
vit_huge_patch14_clip_224,154.17,6642.017,1024,224,167.4,139.41,632.05
maxxvitv2_rmlp_base_rw_384,153.9,1663.429,256,384,72.98,213.74,116.09
cait_s24_384,152.41,3359.254,512,384,32.17,245.31,47.06
deit3_huge_patch14_224_in21ft1k,150.05,6824.53,1024,224,167.4,139.41,632.13
deit3_huge_patch14_224,149.59,6845.356,1024,224,167.4,139.41,632.13
dm_nfnet_f3,145.48,3519.403,512,416,115.58,141.78,254.92
resnetrs420,142.37,5394.528,768,416,108.45,213.79,191.89
swin_large_patch4_window12_384,138.37,925.016,128,384,104.08,202.16,196.74
resnetv2_50x3_bitm,133.5,1438.189,192,448,145.7,133.37,217.32
maxvit_rmlp_base_rw_384,131.6,1945.285,256,384,70.97,318.95,116.14
xcit_large_24_p8_224_dist,131.32,3898.808,512,224,141.23,181.56,188.93
xcit_large_24_p8_224,131.27,3900.391,512,224,141.23,181.56,188.93
coatnet_5_224,130.48,1471.508,192,224,145.49,194.24,687.47
maxvit_base_tf_384,122.48,1567.652,192,384,73.8,332.9,119.65
resnest269e,119.17,2148.198,256,416,77.69,171.98,110.93
resnetv2_152x2_bitm,117.29,2182.534,256,448,184.99,180.43,236.34
xcit_small_24_p8_384_dist,116.59,3293.649,384,384,105.24,265.91,47.63
tresnet_xl_448,115.63,8855.938,1024,448,60.65,61.31,78.44
swinv2_cr_large_384,113.43,1128.479,128,384,108.95,404.96,196.68
maxvit_small_tf_512,106.82,1198.298,128,512,67.26,383.77,69.13
efficientnet_b8,106.21,1205.18,128,672,63.48,442.89,87.41
tf_efficientnet_b8,102.86,1244.358,128,672,63.48,442.89,87.41
eva_large_patch14_336,102.71,2492.371,256,336,191.1,270.24,304.53
vit_large_patch14_clip_336,102.52,2496.99,256,336,191.11,270.24,304.53
vit_large_patch16_384,102.5,2497.593,256,384,191.21,270.24,304.72
cait_s36_384,101.88,5025.316,512,384,47.99,367.4,68.37
eva_giant_patch14_224,101.84,10055.112,1024,224,267.18,192.64,1012.56
vit_giant_patch14_224,100.71,7625.752,768,224,267.18,192.64,1012.61
vit_giant_patch14_clip_224,100.43,7646.856,768,224,267.18,192.64,1012.65
deit3_large_patch16_384_in21ft1k,99.81,2564.809,256,384,191.21,270.24,304.76
deit3_large_patch16_384,99.8,2564.994,256,384,191.21,270.24,304.76
swinv2_base_window12to24_192to384_22kft1k,96.12,665.832,64,384,55.25,280.36,87.92
nfnet_f4,89.33,5731.574,512,512,216.26,262.26,316.07
beit_large_patch16_384,88.56,2890.58,256,384,191.21,270.24,305.0
maxvit_large_tf_384,86.44,1480.84,128,384,132.55,445.84,212.03
regnety_1280,82.49,4654.845,384,224,127.66,71.58,644.81
xcit_medium_24_p8_384_dist,79.96,3201.705,256,384,186.67,354.73,84.32
resnetv2_101x3_bitm,79.41,2417.67,192,448,280.33,194.78,387.93
volo_d3_448,77.64,2473.021,192,448,96.33,446.83,86.63
dm_nfnet_f4,77.54,4952.036,384,512,216.26,262.26,316.07
nfnet_f5,67.46,5691.915,384,544,290.97,349.71,377.21
tf_efficientnet_l2,63.66,1507.989,96,475,172.11,609.89,480.31
swinv2_large_window12to24_192to384_22kft1k,60.94,787.651,48,384,116.15,407.83,196.74
vit_gigantic_patch14_224,60.18,8507.121,512,224,483.95,275.37,1844.44
vit_gigantic_patch14_clip_224,60.11,8517.85,512,224,483.96,275.37,1844.91
volo_d4_448,57.87,3317.675,192,448,197.13,527.35,193.41
maxvit_base_tf_512,57.86,2212.256,128,512,138.02,703.99,119.88
dm_nfnet_f5,57.78,6645.368,384,544,290.97,349.71,377.21
vit_huge_patch14_clip_336,57.4,4460.085,256,336,390.97,407.54,632.46
ig_resnext101_32x48d,56.43,6804.709,384,224,153.57,131.06,828.41
convnextv2_huge,56.31,1704.92,96,384,337.96,232.35,660.29
convmixer_1536_20,55.47,18461.426,1024,224,48.68,33.03,51.63
swinv2_cr_giant_224,52.39,3665.046,192,224,483.85,309.15,2598.76
nfnet_f6,51.81,7411.574,384,576,378.69,452.2,438.36
maxvit_xlarge_tf_384,50.76,1891.335,96,384,292.78,668.76,475.32
swinv2_cr_huge_384,49.01,1305.73,64,384,352.04,583.18,657.94
regnety_2560,47.69,8051.463,384,224,257.07,87.48,826.14
xcit_large_24_p8_384_dist,44.91,4275.004,192,384,415.0,531.82,188.93
dm_nfnet_f6,44.62,5737.462,256,576,378.69,452.2,438.36
nfnet_f7,41.13,6224.782,256,608,480.39,570.85,499.5
maxvit_large_tf_512,41.04,1559.597,64,512,244.75,942.15,212.33
eva_giant_patch14_336,39.89,6418.269,256,336,620.64,550.67,1013.01
volo_d5_448,39.88,3209.812,128,448,315.06,737.92,295.91
beit_large_patch16_512,35.33,2716.953,96,512,362.24,656.39,305.67
cait_m36_384,32.89,7783.487,256,384,173.11,734.81,271.22
resnetv2_152x4_bitm,30.46,3151.929,96,480,844.84,414.26,936.53
volo_d5_512,27.89,4590.0,128,512,425.09,1105.37,296.09
maxvit_xlarge_tf_512,24.38,1968.424,48,512,534.14,1413.22,475.77
efficientnet_l2,23.13,1383.428,32,800,479.12,1707.39,480.31
swinv2_cr_giant_384,15.06,2124.735,32,384,1450.71,1394.86,2598.76
cait_m48_448,13.86,9235.876,128,448,329.41,1708.23,356.46
eva_giant_patch14_560,10.52,3043.009,32,560,1906.76,2577.17,1014.45
| 0 |
hf_public_repos/pytorch-image-models | hf_public_repos/pytorch-image-models/results/README.md | # Validation and Benchmark Results
This folder contains validation and benchmark results for the models in this collection. Validation scores are currently only run for models with pretrained weights and ImageNet-1k heads, benchmark numbers are run for all.
## Datasets
There are currently results for the ImageNet validation set and 5 additional test / label sets.
The test set results include rank and top-1/top-5 differences from clean validation. For the "Real Labels", ImageNetV2, and Sketch test sets, the differences were calculated against the full 1000 class ImageNet-1k validation set. For both the Adversarial and Rendition sets, the differences were calculated against 'clean' runs on the ImageNet-1k validation set with the same 200 classes used in each test set respectively.
### ImageNet Validation - [`results-imagenet.csv`](results-imagenet.csv)
The standard 50,000 image ImageNet-1k validation set. Model selection during training utilizes this validation set, so it is not a true test set. Question: Does anyone have the official ImageNet-1k test set classification labels now that challenges are done?
* Source: http://image-net.org/challenges/LSVRC/2012/index
* Paper: "ImageNet Large Scale Visual Recognition Challenge" - https://arxiv.org/abs/1409.0575
### ImageNet-"Real Labels" - [`results-imagenet-real.csv`](results-imagenet-real.csv)
The usual ImageNet-1k validation set with a fresh new set of labels intended to improve on mistakes in the original annotation process.
* Source: https://github.com/google-research/reassessed-imagenet
* Paper: "Are we done with ImageNet?" - https://arxiv.org/abs/2006.07159
### ImageNetV2 Matched Frequency - [`results-imagenetv2-matched-frequency.csv`](results-imagenetv2-matched-frequency.csv)
An ImageNet test set of 10,000 images sampled from new images roughly 10 years after the original. Care was taken to replicate the original ImageNet curation/sampling process.
* Source: https://github.com/modestyachts/ImageNetV2
* Paper: "Do ImageNet Classifiers Generalize to ImageNet?" - https://arxiv.org/abs/1902.10811
### ImageNet-Sketch - [`results-sketch.csv`](results-sketch.csv)
50,000 non photographic (or photos of such) images (sketches, doodles, mostly monochromatic) covering all 1000 ImageNet classes.
* Source: https://github.com/HaohanWang/ImageNet-Sketch
* Paper: "Learning Robust Global Representations by Penalizing Local Predictive Power" - https://arxiv.org/abs/1905.13549
### ImageNet-Adversarial - [`results-imagenet-a.csv`](results-imagenet-a.csv)
A collection of 7500 images covering 200 of the 1000 ImageNet classes. Images are naturally occurring adversarial examples that confuse typical ImageNet classifiers. This is a challenging dataset, your typical ResNet-50 will score 0% top-1.
For clean validation with same 200 classes, see [`results-imagenet-a-clean.csv`](results-imagenet-a-clean.csv)
* Source: https://github.com/hendrycks/natural-adv-examples
* Paper: "Natural Adversarial Examples" - https://arxiv.org/abs/1907.07174
### ImageNet-Rendition - [`results-imagenet-r.csv`](results-imagenet-r.csv)
Renditions of 200 ImageNet classes resulting in 30,000 images for testing robustness.
For clean validation with same 200 classes, see [`results-imagenet-r-clean.csv`](results-imagenet-r-clean.csv)
* Source: https://github.com/hendrycks/imagenet-r
* Paper: "The Many Faces of Robustness" - https://arxiv.org/abs/2006.16241
### TODO
* Explore adding a reduced version of ImageNet-C (Corruptions) and ImageNet-P (Perturbations) from https://github.com/hendrycks/robustness. The originals are huge and image size specific.
## Benchmark
CSV files with a `model_benchmark` prefix include benchmark numbers for models on various accelerators with different precision. Currently only run on RTX 3090 w/ AMP for inference, I intend to add more in the future.
## Metadata
CSV files with `model_metadata` prefix contain extra information about the source training, currently the pretraining dataset and technique (ie distillation, SSL, WSL, etc). Eventually I'd like to have metadata about augmentation, regularization, etc. but that will be a challenge to source consistently.
| 0 |